diff --git a/TESTING.asciidoc b/TESTING.asciidoc index 13ceef4bdd7..108c6ba9765 100644 --- a/TESTING.asciidoc +++ b/TESTING.asciidoc @@ -338,8 +338,8 @@ time to setup all the VMs one at a time. Run this to download and setup the VMs we use for testing by default: -------------------------------------------------------- -vagrant up --provision trusty && vagrant halt trusty -vagrant up --provision centos-7 && vagrant halt centos-7 +vagrant up --provision trusty --provider virtualbox && vagrant halt trusty +vagrant up --provision centos-7 --provider virtualbox && vagrant halt centos-7 -------------------------------------------------------- or run this to download and setup all the VMs: @@ -347,7 +347,7 @@ or run this to download and setup all the VMs: ------------------------------------------------------------------------------- vagrant halt for box in $(vagrant status | grep 'poweroff\|not created' | cut -f1 -d' '); do - vagrant up --provision $box + vagrant up --provision $box --provider virtualbox vagrant halt $box done ------------------------------------------------------------------------------- @@ -420,13 +420,13 @@ This is just regular vagrant so you can run normal multi box vagrant commands to test things manually. Just run: --------------------------------------- -vagrant up trusty && vagrant ssh trusty +vagrant up trusty --provider virtualbox && vagrant ssh trusty --------------------------------------- to get an Ubuntu or ------------------------------------------- -vagrant up centos-7 && vagrant ssh centos-7 +vagrant up centos-7 --provider virtualbox && vagrant ssh centos-7 ------------------------------------------- to get a CentOS. Once you are done with them you should halt them: @@ -469,7 +469,7 @@ vagrant ssh precise -c 'sudo rm -rf /bin'; echo oops All you've got to do to get another one is ---------------------------------------------- -vagrant destroy -f trusty && vagrant up trusty +vagrant destroy -f trusty && vagrant up trusty --provider virtualbox ---------------------------------------------- The whole process takes a minute and a half on a modern laptop, two and a half @@ -508,7 +508,7 @@ mvn -pl distribution/rpm package and in another window: ---------------------------------------------------- -vagrant up centos-7 && vagrant ssh centos-7 +vagrant up centos-7 --provider virtualbox && vagrant ssh centos-7 cd $RPM sudo bats $BATS/*rpm*.bats ---------------------------------------------------- @@ -520,20 +520,34 @@ If you wanted to retest all the release artifacts on a single VM you could: mvn -amd -pl distribution install -DskipTests # Copy them all the testroot mvn -Dtests.vagrant -pl qa/vagrant pre-integration-test -vagrant up trusty && vagrant ssh trusty +vagrant up trusty --provider virtualbox && vagrant ssh trusty cd $TESTROOT sudo bats $BATS/*.bats ------------------------------------------------- == Coverage analysis -To run tests instrumented with jacoco and produce a coverage report in -`target/site/jacoco/`: +Tests can be run instrumented with jacoco to produce a coverage report in +`target/site/jacoco/`. + +Unit test coverage: --------------------------------------------------------------------------- mvn -Dtests.coverage test jacoco:report --------------------------------------------------------------------------- +Integration test coverage: + +--------------------------------------------------------------------------- +mvn -Dtests.coverage -Dskip.unit.tests verify jacoco:report +--------------------------------------------------------------------------- + +Combined (Unit+Integration) coverage: + +--------------------------------------------------------------------------- +mvn -Dtests.coverage verify jacoco:report +--------------------------------------------------------------------------- + == Debugging from an IDE If you want to run elasticsearch from your IDE, you should execute ./run.sh diff --git a/core/pom.xml b/core/pom.xml index c9f8656eacb..4b55f93aa19 100644 --- a/core/pom.xml +++ b/core/pom.xml @@ -105,8 +105,6 @@ - - com.google.guava guava @@ -165,7 +163,6 @@ commons-cli commons-cli - org.codehaus.groovy diff --git a/core/src/main/java/org/apache/lucene/queries/MinDocQuery.java b/core/src/main/java/org/apache/lucene/queries/MinDocQuery.java index 169c017804b..1e9ecf7ae6f 100644 --- a/core/src/main/java/org/apache/lucene/queries/MinDocQuery.java +++ b/core/src/main/java/org/apache/lucene/queries/MinDocQuery.java @@ -27,7 +27,6 @@ import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.Query; import org.apache.lucene.search.Scorer; import org.apache.lucene.search.Weight; -import org.apache.lucene.util.Bits; import java.io.IOException; @@ -60,7 +59,7 @@ public final class MinDocQuery extends Query { public Weight createWeight(IndexSearcher searcher, boolean needsScores) throws IOException { return new ConstantScoreWeight(this) { @Override - public Scorer scorer(LeafReaderContext context, final Bits acceptDocs) throws IOException { + public Scorer scorer(LeafReaderContext context) throws IOException { final int maxDoc = context.reader().maxDoc(); if (context.docBase + maxDoc <= minDoc) { return null; @@ -89,12 +88,6 @@ public final class MinDocQuery extends Query { } else { doc = target; } - while (doc < maxDoc) { - if (acceptDocs == null || acceptDocs.get(doc)) { - break; - } - doc += 1; - } if (doc >= maxDoc) { doc = NO_MORE_DOCS; } diff --git a/core/src/main/java/org/apache/lucene/queryparser/classic/MapperQueryParser.java b/core/src/main/java/org/apache/lucene/queryparser/classic/MapperQueryParser.java index 6974dc0e5ae..493423c1fd1 100644 --- a/core/src/main/java/org/apache/lucene/queryparser/classic/MapperQueryParser.java +++ b/core/src/main/java/org/apache/lucene/queryparser/classic/MapperQueryParser.java @@ -279,7 +279,7 @@ public class MapperQueryParser extends QueryParser { if (q != null) { added = true; applyBoost(mField, q); - applySlop(q, slop); + q = applySlop(q, slop); disMaxQuery.add(q); } } @@ -293,7 +293,7 @@ public class MapperQueryParser extends QueryParser { Query q = super.getFieldQuery(mField, queryText, slop); if (q != null) { applyBoost(mField, q); - applySlop(q, slop); + q = applySlop(q, slop); clauses.add(new BooleanClause(q, BooleanClause.Occur.SHOULD)); } } @@ -718,15 +718,6 @@ public class MapperQueryParser extends QueryParser { return super.getWildcardQuery(field, aggStr.toString()); } - @Override - protected WildcardQuery newWildcardQuery(Term t) { - // Backport: https://issues.apache.org/jira/browse/LUCENE-6677 - assert Version.LATEST == Version.LUCENE_5_2_1; - WildcardQuery query = new WildcardQuery(t, maxDeterminizedStates); - query.setRewriteMethod(multiTermRewriteMethod); - return query; - } - @Override protected Query getRegexpQuery(String field, String termStr) throws ParseException { if (lowercaseExpandedTerms) { @@ -815,14 +806,24 @@ public class MapperQueryParser extends QueryParser { } } - private void applySlop(Query q, int slop) { - if (q instanceof FilteredQuery) { - applySlop(((FilteredQuery)q).getQuery(), slop); - } + private Query applySlop(Query q, int slop) { if (q instanceof PhraseQuery) { - ((PhraseQuery) q).setSlop(slop); + PhraseQuery pq = (PhraseQuery) q; + PhraseQuery.Builder builder = new PhraseQuery.Builder(); + builder.setSlop(slop); + final Term[] terms = pq.getTerms(); + final int[] positions = pq.getPositions(); + for (int i = 0; i < terms.length; ++i) { + builder.add(terms[i], positions[i]); + } + pq = builder.build(); + pq.setBoost(q.getBoost()); + return pq; } else if (q instanceof MultiPhraseQuery) { ((MultiPhraseQuery) q).setSlop(slop); + return q; + } else { + return q; } } diff --git a/core/src/main/java/org/apache/lucene/search/postingshighlight/CustomSeparatorBreakIterator.java b/core/src/main/java/org/apache/lucene/search/postingshighlight/CustomSeparatorBreakIterator.java deleted file mode 100644 index efdddf5260e..00000000000 --- a/core/src/main/java/org/apache/lucene/search/postingshighlight/CustomSeparatorBreakIterator.java +++ /dev/null @@ -1,153 +0,0 @@ -/* -Licensed to Elasticsearch under one or more contributor -license agreements. See the NOTICE file distributed with -this work for additional information regarding copyright -ownership. Elasticsearch licenses this file to you under -the Apache License, Version 2.0 (the "License"); you may -not use this file except in compliance with the License. -You may obtain a copy of the License at - * - http://www.apache.org/licenses/LICENSE-2.0 - * -Unless required by applicable law or agreed to in writing, -software distributed under the License is distributed on an -"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -KIND, either express or implied. See the License for the -specific language governing permissions and limitations -under the License. - */ - -package org.apache.lucene.search.postingshighlight; - -import java.text.BreakIterator; -import java.text.CharacterIterator; - -/** - * A {@link BreakIterator} that breaks the text whenever a certain separator, provided as a constructor argument, is found. - */ -public class CustomSeparatorBreakIterator extends BreakIterator { - - private final char separator; - private CharacterIterator text; - private int current; - - public CustomSeparatorBreakIterator(char separator) { - this.separator = separator; - } - - @Override - public int current() { - return current; - } - - @Override - public int first() { - text.setIndex(text.getBeginIndex()); - return current = text.getIndex(); - } - - @Override - public int last() { - text.setIndex(text.getEndIndex()); - return current = text.getIndex(); - } - - @Override - public int next() { - if (text.getIndex() == text.getEndIndex()) { - return DONE; - } else { - return advanceForward(); - } - } - - private int advanceForward() { - char c; - while( (c = text.next()) != CharacterIterator.DONE) { - if (c == separator) { - return current = text.getIndex() + 1; - } - } - assert text.getIndex() == text.getEndIndex(); - return current = text.getIndex(); - } - - @Override - public int following(int pos) { - if (pos < text.getBeginIndex() || pos > text.getEndIndex()) { - throw new IllegalArgumentException("offset out of bounds"); - } else if (pos == text.getEndIndex()) { - // this conflicts with the javadocs, but matches actual behavior (Oracle has a bug in something) - // http://bugs.sun.com/bugdatabase/view_bug.do?bug_id=9000909 - text.setIndex(text.getEndIndex()); - current = text.getIndex(); - return DONE; - } else { - text.setIndex(pos); - current = text.getIndex(); - return advanceForward(); - } - } - - @Override - public int previous() { - if (text.getIndex() == text.getBeginIndex()) { - return DONE; - } else { - return advanceBackward(); - } - } - - private int advanceBackward() { - char c; - while( (c = text.previous()) != CharacterIterator.DONE) { - if (c == separator) { - return current = text.getIndex() + 1; - } - } - assert text.getIndex() == text.getBeginIndex(); - return current = text.getIndex(); - } - - @Override - public int preceding(int pos) { - if (pos < text.getBeginIndex() || pos > text.getEndIndex()) { - throw new IllegalArgumentException("offset out of bounds"); - } else if (pos == text.getBeginIndex()) { - // this conflicts with the javadocs, but matches actual behavior (Oracle has a bug in something) - // http://bugs.sun.com/bugdatabase/view_bug.do?bug_id=9000909 - text.setIndex(text.getBeginIndex()); - current = text.getIndex(); - return DONE; - } else { - text.setIndex(pos); - current = text.getIndex(); - return advanceBackward(); - } - } - - @Override - public int next(int n) { - if (n < 0) { - for (int i = 0; i < -n; i++) { - previous(); - } - } else { - for (int i = 0; i < n; i++) { - next(); - } - } - return current(); - } - - @Override - public CharacterIterator getText() { - return text; - } - - @Override - public void setText(CharacterIterator newText) { - text = newText; - current = text.getBeginIndex(); - } -} diff --git a/core/src/main/java/org/apache/lucene/search/suggest/analyzing/XAnalyzingSuggester.java b/core/src/main/java/org/apache/lucene/search/suggest/analyzing/XAnalyzingSuggester.java index ec26fffb228..98401cd2e14 100644 --- a/core/src/main/java/org/apache/lucene/search/suggest/analyzing/XAnalyzingSuggester.java +++ b/core/src/main/java/org/apache/lucene/search/suggest/analyzing/XAnalyzingSuggester.java @@ -28,6 +28,7 @@ import org.apache.lucene.search.suggest.Lookup; import org.apache.lucene.store.*; import org.apache.lucene.util.*; import org.apache.lucene.util.automaton.Automaton; +import org.apache.lucene.util.automaton.LimitedFiniteStringsIterator; import org.apache.lucene.util.automaton.Operations; import org.apache.lucene.util.automaton.Transition; import org.apache.lucene.util.fst.*; @@ -465,16 +466,12 @@ public long ramBytesUsed() { byte buffer[] = new byte[8]; try { ByteArrayDataOutput output = new ByteArrayDataOutput(buffer); - BytesRef surfaceForm; - while ((surfaceForm = iterator.next()) != null) { - Set paths = toFiniteStrings(surfaceForm, ts2a); - - maxAnalyzedPathsForOneInput = Math.max(maxAnalyzedPathsForOneInput, paths.size()); - - for (IntsRef path : paths) { - - Util.toBytesRef(path, scratch); + for (BytesRef surfaceForm; (surfaceForm = iterator.next()) != null;) { + LimitedFiniteStringsIterator finiteStrings = + new LimitedFiniteStringsIterator(toAutomaton(surfaceForm, ts2a), maxGraphExpansions); + for (IntsRef string; (string = finiteStrings.next()) != null; count++) { + Util.toBytesRef(string, scratch); // length of the analyzed text (FST input) if (scratch.length() > Short.MAX_VALUE-2) { @@ -526,7 +523,7 @@ public long ramBytesUsed() { writer.write(buffer, 0, output.getPosition()); } - count++; + maxAnalyzedPathsForOneInput = Math.max(maxAnalyzedPathsForOneInput, finiteStrings.size()); } writer.close(); @@ -912,23 +909,17 @@ public long ramBytesUsed() { return prefixPaths; } - public final Set toFiniteStrings(final BytesRef surfaceForm, final TokenStreamToAutomaton ts2a) throws IOException { - // Analyze surface form: - TokenStream ts = indexAnalyzer.tokenStream("", surfaceForm.utf8ToString()); - return toFiniteStrings(ts2a, ts); - } - - public final Set toFiniteStrings(final TokenStreamToAutomaton ts2a, final TokenStream ts) throws IOException { - Automaton automaton = null; - try { - - // Create corresponding automaton: labels are bytes - // from each analyzed token, with byte 0 used as - // separator between tokens: - automaton = ts2a.toAutomaton(ts); - } finally { - IOUtils.closeWhileHandlingException(ts); + final Automaton toAutomaton(final BytesRef surfaceForm, final TokenStreamToAutomaton ts2a) throws IOException { + try (TokenStream ts = indexAnalyzer.tokenStream("", surfaceForm.utf8ToString())) { + return toAutomaton(ts, ts2a); } + } + + final Automaton toAutomaton(TokenStream ts, final TokenStreamToAutomaton ts2a) throws IOException { + // Create corresponding automaton: labels are bytes + // from each analyzed token, with byte 0 used as + // separator between tokens: + Automaton automaton = ts2a.toAutomaton(ts); automaton = replaceSep(automaton); automaton = convertAutomaton(automaton); @@ -940,11 +931,24 @@ public long ramBytesUsed() { // more than one path, eg if the analyzer created a // graph using SynFilter or WDF): - // TODO: we could walk & add simultaneously, so we - // don't have to alloc [possibly biggish] - // intermediate HashSet in RAM: + return automaton; + } - return Operations.getFiniteStrings(automaton, maxGraphExpansions); + // EDIT: Adrien, needed by lookup providers + // NOTE: these XForks are unmaintainable, we need to get rid of them... + public Set toFiniteStrings(TokenStream stream) throws IOException { + final TokenStreamToAutomaton ts2a = getTokenStreamToAutomaton(); + Automaton automaton; + try (TokenStream ts = stream) { + automaton = toAutomaton(ts, ts2a); + } + LimitedFiniteStringsIterator finiteStrings = + new LimitedFiniteStringsIterator(automaton, maxGraphExpansions); + Set set = new HashSet<>(); + for (IntsRef string = finiteStrings.next(); string != null; string = finiteStrings.next()) { + set.add(IntsRef.deepCopyOf(string)); + } + return Collections.unmodifiableSet(set); } final Automaton toLookupAutomaton(final CharSequence key) throws IOException { diff --git a/core/src/main/java/org/apache/lucene/search/suggest/analyzing/XFuzzySuggester.java b/core/src/main/java/org/apache/lucene/search/suggest/analyzing/XFuzzySuggester.java index 5170057a67c..20f95c646fc 100644 --- a/core/src/main/java/org/apache/lucene/search/suggest/analyzing/XFuzzySuggester.java +++ b/core/src/main/java/org/apache/lucene/search/suggest/analyzing/XFuzzySuggester.java @@ -28,9 +28,10 @@ import org.apache.lucene.util.fst.FST; import org.apache.lucene.util.fst.PairOutputs; import java.io.IOException; -import java.util.Arrays; +import java.util.ArrayList; import java.util.List; -import java.util.Set; + +import static org.apache.lucene.util.automaton.Operations.DEFAULT_MAX_DETERMINIZED_STATES; /** * Implements a fuzzy {@link AnalyzingSuggester}. The similarity measurement is @@ -221,42 +222,37 @@ public final class XFuzzySuggester extends XAnalyzingSuggester { } Automaton toLevenshteinAutomata(Automaton automaton) { - final Set ref = Operations.getFiniteStrings(automaton, -1); - Automaton subs[] = new Automaton[ref.size()]; - int upto = 0; - for (IntsRef path : ref) { - if (path.length <= nonFuzzyPrefix || path.length < minFuzzyLength) { - subs[upto] = Automata.makeString(path.ints, path.offset, path.length); - upto++; + List subs = new ArrayList<>(); + FiniteStringsIterator finiteStrings = new FiniteStringsIterator(automaton); + for (IntsRef string; (string = finiteStrings.next()) != null;) { + if (string.length <= nonFuzzyPrefix || string.length < minFuzzyLength) { + subs.add(Automata.makeString(string.ints, string.offset, string.length)); } else { - int ints[] = new int[path.length-nonFuzzyPrefix]; - System.arraycopy(path.ints, path.offset+nonFuzzyPrefix, ints, 0, ints.length); + int ints[] = new int[string.length-nonFuzzyPrefix]; + System.arraycopy(string.ints, string.offset+nonFuzzyPrefix, ints, 0, ints.length); // TODO: maybe add alphaMin to LevenshteinAutomata, // and pass 1 instead of 0? We probably don't want // to allow the trailing dedup bytes to be // edited... but then 0 byte is "in general" allowed // on input (but not in UTF8). LevenshteinAutomata lev = new LevenshteinAutomata(ints, unicodeAware ? Character.MAX_CODE_POINT : 255, transpositions); - subs[upto] = lev.toAutomaton(maxEdits, UnicodeUtil.newString(path.ints, path.offset, nonFuzzyPrefix)); - upto++; + subs.add(lev.toAutomaton(maxEdits, UnicodeUtil.newString(string.ints, string.offset, nonFuzzyPrefix))); } } - if (subs.length == 0) { + if (subs.isEmpty()) { // automaton is empty, there is no accepted paths through it return Automata.makeEmpty(); // matches nothing - } else if (subs.length == 1) { + } else if (subs.size() == 1) { // no synonyms or anything: just a single path through the tokenstream - return subs[0]; + return subs.get(0); } else { // multiple paths: this is really scary! is it slow? // maybe we should not do this and throw UOE? - Automaton a = Operations.union(Arrays.asList(subs)); + Automaton a = Operations.union(subs); // TODO: we could call toLevenshteinAutomata() before det? // this only happens if you have multiple paths anyway (e.g. synonyms) - - // This automaton should not blow up during determinize: - return Operations.determinize(a, Integer.MAX_VALUE); + return Operations.determinize(a, DEFAULT_MAX_DETERMINIZED_STATES); } } } diff --git a/core/src/main/java/org/elasticsearch/Version.java b/core/src/main/java/org/elasticsearch/Version.java index d12fcd3274b..624aa02e416 100644 --- a/core/src/main/java/org/elasticsearch/Version.java +++ b/core/src/main/java/org/elasticsearch/Version.java @@ -258,7 +258,7 @@ public class Version { public static final int V_2_0_0_ID = 2000099; public static final Version V_2_0_0 = new Version(V_2_0_0_ID, true, org.apache.lucene.util.Version.LUCENE_5_2_1); public static final int V_2_1_0_ID = 2010099; - public static final Version V_2_1_0 = new Version(V_2_1_0_ID, true, org.apache.lucene.util.Version.LUCENE_5_2_1); + public static final Version V_2_1_0 = new Version(V_2_1_0_ID, true, org.apache.lucene.util.Version.LUCENE_5_3_0); public static final Version CURRENT = V_2_1_0; diff --git a/core/src/main/java/org/elasticsearch/action/ActionModule.java b/core/src/main/java/org/elasticsearch/action/ActionModule.java index c613f617774..7be0b032259 100644 --- a/core/src/main/java/org/elasticsearch/action/ActionModule.java +++ b/core/src/main/java/org/elasticsearch/action/ActionModule.java @@ -180,6 +180,7 @@ import org.elasticsearch.action.suggest.TransportSuggestAction; import org.elasticsearch.action.support.ActionFilter; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.AutoCreateIndex; +import org.elasticsearch.action.support.DestructiveOperations; import org.elasticsearch.action.support.TransportAction; import org.elasticsearch.action.termvectors.MultiTermVectorsAction; import org.elasticsearch.action.termvectors.TermVectorsAction; @@ -252,6 +253,7 @@ public class ActionModule extends AbstractModule { } bind(ActionFilters.class).asEagerSingleton(); bind(AutoCreateIndex.class).asEagerSingleton(); + bind(DestructiveOperations.class).asEagerSingleton(); registerAction(NodesInfoAction.INSTANCE, TransportNodesInfoAction.class); registerAction(NodesStatsAction.INSTANCE, TransportNodesStatsAction.class); registerAction(NodesHotThreadsAction.INSTANCE, TransportNodesHotThreadsAction.class); diff --git a/core/src/main/java/org/elasticsearch/action/ActionWriteResponse.java b/core/src/main/java/org/elasticsearch/action/ActionWriteResponse.java index a63f6dcd9fa..f4152ac85e4 100644 --- a/core/src/main/java/org/elasticsearch/action/ActionWriteResponse.java +++ b/core/src/main/java/org/elasticsearch/action/ActionWriteResponse.java @@ -39,7 +39,7 @@ import java.util.Collections; /** * Base class for write action responses. */ -public abstract class ActionWriteResponse extends ActionResponse { +public class ActionWriteResponse extends ActionResponse { public final static ActionWriteResponse.ShardInfo.Failure[] EMPTY = new ActionWriteResponse.ShardInfo.Failure[0]; diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthResponse.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthResponse.java index 755fb330f7b..ea2f6d7a581 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthResponse.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthResponse.java @@ -19,7 +19,6 @@ package org.elasticsearch.action.admin.cluster.health; -import com.google.common.collect.ImmutableList; import com.google.common.collect.Maps; import org.elasticsearch.Version; import org.elasticsearch.action.ActionResponse; @@ -39,6 +38,7 @@ import org.elasticsearch.rest.RestStatus; import java.io.IOException; import java.util.ArrayList; +import java.util.Collections; import java.util.Iterator; import java.util.List; import java.util.Locale; @@ -270,7 +270,7 @@ public class ClusterHealthResponse extends ActionResponse implements Iterable, Streama ClusterShardHealth shardHealth = readClusterShardHealth(in); shards.put(shardHealth.getId(), shardHealth); } - validationFailures = ImmutableList.copyOf(in.readStringArray()); + validationFailures = Arrays.asList(in.readStringArray()); } @Override diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/repositories/get/GetRepositoriesResponse.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/repositories/get/GetRepositoriesResponse.java index 2d930309d02..c933156fcb0 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/repositories/get/GetRepositoriesResponse.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/repositories/get/GetRepositoriesResponse.java @@ -19,7 +19,6 @@ package org.elasticsearch.action.admin.cluster.repositories.get; -import com.google.common.collect.ImmutableList; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.cluster.metadata.RepositoryMetaData; import org.elasticsearch.common.io.stream.StreamInput; @@ -27,6 +26,8 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.settings.Settings; import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; import java.util.Iterator; import java.util.List; @@ -35,13 +36,13 @@ import java.util.List; */ public class GetRepositoriesResponse extends ActionResponse implements Iterable { - private ImmutableList repositories = ImmutableList.of(); + private List repositories = Collections.emptyList(); GetRepositoriesResponse() { } - GetRepositoriesResponse(ImmutableList repositories) { + GetRepositoriesResponse(List repositories) { this.repositories = repositories; } @@ -59,7 +60,7 @@ public class GetRepositoriesResponse extends ActionResponse implements Iterable< public void readFrom(StreamInput in) throws IOException { super.readFrom(in); int size = in.readVInt(); - ImmutableList.Builder repositoryListBuilder = ImmutableList.builder(); + List repositoryListBuilder = new ArrayList<>(); for (int j = 0; j < size; j++) { repositoryListBuilder.add(new RepositoryMetaData( in.readString(), @@ -67,7 +68,7 @@ public class GetRepositoriesResponse extends ActionResponse implements Iterable< Settings.readSettingsFromStream(in)) ); } - repositories = repositoryListBuilder.build(); + repositories = Collections.unmodifiableList(repositoryListBuilder); } @Override diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/repositories/get/TransportGetRepositoriesAction.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/repositories/get/TransportGetRepositoriesAction.java index bf7d7e4e9c1..1e2e2fd7335 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/repositories/get/TransportGetRepositoriesAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/repositories/get/TransportGetRepositoriesAction.java @@ -19,7 +19,6 @@ package org.elasticsearch.action.admin.cluster.repositories.get; -import com.google.common.collect.ImmutableList; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.master.TransportMasterNodeReadAction; @@ -37,6 +36,10 @@ import org.elasticsearch.repositories.RepositoryMissingException; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; + /** * Transport action for get repositories operation */ @@ -71,11 +74,11 @@ public class TransportGetRepositoriesAction extends TransportMasterNodeReadActio if (repositories != null) { listener.onResponse(new GetRepositoriesResponse(repositories.repositories())); } else { - listener.onResponse(new GetRepositoriesResponse(ImmutableList.of())); + listener.onResponse(new GetRepositoriesResponse(Collections.emptyList())); } } else { if (repositories != null) { - ImmutableList.Builder repositoryListBuilder = ImmutableList.builder(); + List repositoryListBuilder = new ArrayList<>(); for (String repository : request.repositories()) { RepositoryMetaData repositoryMetaData = repositories.repository(repository); if (repositoryMetaData == null) { @@ -84,7 +87,7 @@ public class TransportGetRepositoriesAction extends TransportMasterNodeReadActio } repositoryListBuilder.add(repositoryMetaData); } - listener.onResponse(new GetRepositoriesResponse(repositoryListBuilder.build())); + listener.onResponse(new GetRepositoriesResponse(Collections.unmodifiableList(repositoryListBuilder))); } else { listener.onFailure(new RepositoryMissingException(request.repositories()[0])); } diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsResponse.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsResponse.java index 71b8fa34a2a..4ca88daad54 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsResponse.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsResponse.java @@ -19,7 +19,6 @@ package org.elasticsearch.action.admin.cluster.snapshots.get; -import com.google.common.collect.ImmutableList; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -29,6 +28,8 @@ import org.elasticsearch.common.xcontent.XContentBuilderString; import org.elasticsearch.snapshots.SnapshotInfo; import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; import java.util.List; /** @@ -36,12 +37,12 @@ import java.util.List; */ public class GetSnapshotsResponse extends ActionResponse implements ToXContent { - private ImmutableList snapshots = ImmutableList.of(); + private List snapshots = Collections.emptyList(); GetSnapshotsResponse() { } - GetSnapshotsResponse(ImmutableList snapshots) { + GetSnapshotsResponse(List snapshots) { this.snapshots = snapshots; } @@ -58,11 +59,11 @@ public class GetSnapshotsResponse extends ActionResponse implements ToXContent { public void readFrom(StreamInput in) throws IOException { super.readFrom(in); int size = in.readVInt(); - ImmutableList.Builder builder = ImmutableList.builder(); + List builder = new ArrayList<>(); for (int i = 0; i < size; i++) { builder.add(SnapshotInfo.readSnapshotInfo(in)); } - snapshots = builder.build(); + snapshots = Collections.unmodifiableList(builder); } @Override diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/TransportGetSnapshotsAction.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/TransportGetSnapshotsAction.java index 40a00c73d4f..b21e16d2d66 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/TransportGetSnapshotsAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/TransportGetSnapshotsAction.java @@ -19,7 +19,6 @@ package org.elasticsearch.action.admin.cluster.snapshots.get; -import com.google.common.collect.ImmutableList; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.master.TransportMasterNodeAction; @@ -37,6 +36,8 @@ import org.elasticsearch.snapshots.SnapshotsService; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; +import java.util.ArrayList; +import java.util.Collections; import java.util.List; /** @@ -71,7 +72,7 @@ public class TransportGetSnapshotsAction extends TransportMasterNodeAction listener) { try { - ImmutableList.Builder snapshotInfoBuilder = ImmutableList.builder(); + List snapshotInfoBuilder = new ArrayList<>(); if (isAllSnapshots(request.snapshots())) { List snapshots = snapshotsService.snapshots(request.repository()); for (Snapshot snapshot : snapshots) { @@ -88,7 +89,7 @@ public class TransportGetSnapshotsAction extends TransportMasterNodeAction shards; + private List shards; private ImmutableMap indicesStatus; @@ -57,7 +57,7 @@ public class SnapshotStatus implements ToXContent, Streamable { private SnapshotStats stats; - SnapshotStatus(SnapshotId snapshotId, State state, ImmutableList shards) { + SnapshotStatus(SnapshotId snapshotId, State state, List shards) { this.snapshotId = snapshotId; this.state = state; this.shards = shards; @@ -127,11 +127,11 @@ public class SnapshotStatus implements ToXContent, Streamable { snapshotId = SnapshotId.readSnapshotId(in); state = State.fromValue(in.readByte()); int size = in.readVInt(); - ImmutableList.Builder builder = ImmutableList.builder(); + List builder = new ArrayList<>(); for (int i = 0; i < size; i++) { builder.add(SnapshotIndexShardStatus.readShardSnapshotStatus(in)); } - shards = builder.build(); + shards = Collections.unmodifiableList(builder); updateShardStats(); } diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotsStatusResponse.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotsStatusResponse.java index 6191a45d6b3..e5692374fcb 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotsStatusResponse.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotsStatusResponse.java @@ -19,7 +19,6 @@ package org.elasticsearch.action.admin.cluster.snapshots.status; -import com.google.common.collect.ImmutableList; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -28,18 +27,21 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentBuilderString; import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; /** * Snapshot status response */ public class SnapshotsStatusResponse extends ActionResponse implements ToXContent { - private ImmutableList snapshots = ImmutableList.of(); + private List snapshots = Collections.emptyList(); SnapshotsStatusResponse() { } - SnapshotsStatusResponse(ImmutableList snapshots) { + SnapshotsStatusResponse(List snapshots) { this.snapshots = snapshots; } @@ -48,7 +50,7 @@ public class SnapshotsStatusResponse extends ActionResponse implements ToXConten * * @return the list of snapshots */ - public ImmutableList getSnapshots() { + public List getSnapshots() { return snapshots; } @@ -56,11 +58,11 @@ public class SnapshotsStatusResponse extends ActionResponse implements ToXConten public void readFrom(StreamInput in) throws IOException { super.readFrom(in); int size = in.readVInt(); - ImmutableList.Builder builder = ImmutableList.builder(); + List builder = new ArrayList<>(); for (int i = 0; i < size; i++) { builder.add(SnapshotStatus.readSnapshotStatus(in)); } - snapshots = builder.build(); + snapshots = Collections.unmodifiableList(builder); } @Override diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/TransportSnapshotsStatusAction.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/TransportSnapshotsStatusAction.java index 65ceaa2c533..12a8135cf44 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/TransportSnapshotsStatusAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/TransportSnapshotsStatusAction.java @@ -19,7 +19,6 @@ package org.elasticsearch.action.admin.cluster.snapshots.status; -import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; @@ -42,6 +41,8 @@ import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; import java.util.List; import java.util.Map; import java.util.Set; @@ -138,7 +139,7 @@ public class TransportSnapshotsStatusAction extends TransportMasterNodeAction currentSnapshots, TransportNodesSnapshotsStatus.NodesSnapshotStatus nodeSnapshotStatuses) throws IOException { // First process snapshot that are currently processed - ImmutableList.Builder builder = ImmutableList.builder(); + List builder = new ArrayList<>(); Set currentSnapshotIds = newHashSet(); if (!currentSnapshots.isEmpty()) { Map nodeSnapshotStatusMap; @@ -150,7 +151,7 @@ public class TransportSnapshotsStatusAction extends TransportMasterNodeAction shardStatusBuilder = ImmutableList.builder(); + List shardStatusBuilder = new ArrayList<>(); for (ImmutableMap.Entry shardEntry : entry.shards().entrySet()) { SnapshotsInProgress.ShardSnapshotStatus status = shardEntry.getValue(); if (status.nodeId() != null) { @@ -189,7 +190,7 @@ public class TransportSnapshotsStatusAction extends TransportMasterNodeAction shardStatusBuilder = ImmutableList.builder(); + List shardStatusBuilder = new ArrayList<>(); if (snapshot.state().completed()) { ImmutableMap shardStatues = snapshotsService.snapshotShards(snapshotId); for (ImmutableMap.Entry shardStatus : shardStatues.entrySet()) { @@ -222,13 +223,13 @@ public class TransportSnapshotsStatusAction extends TransportMasterNodeAction> aliasMetaData = metaData.findAliases(aliases, indexAsArray); + ImmutableOpenMap> aliasMetaData = metaData.findAliases(aliases, indexAsArray); List finalAliases = new ArrayList<>(); - for (ObjectCursor> curAliases : aliasMetaData.values()) { + for (ObjectCursor> curAliases : aliasMetaData.values()) { for (AliasMetaData aliasMeta: curAliases.value) { finalAliases.add(aliasMeta.alias()); } diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/alias/get/GetAliasesResponse.java b/core/src/main/java/org/elasticsearch/action/admin/indices/alias/get/GetAliasesResponse.java index 106e864a367..e23faa1cbbf 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/alias/get/GetAliasesResponse.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/alias/get/GetAliasesResponse.java @@ -20,7 +20,6 @@ package org.elasticsearch.action.admin.indices.alias.get; import com.carrotsearch.hppc.cursors.ObjectObjectCursor; -import com.google.common.collect.ImmutableList; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.cluster.metadata.AliasMetaData; import org.elasticsearch.common.collect.ImmutableOpenMap; @@ -29,6 +28,7 @@ import org.elasticsearch.common.io.stream.StreamOutput; import java.io.IOException; import java.util.ArrayList; +import java.util.Collections; import java.util.List; /** @@ -61,7 +61,7 @@ public class GetAliasesResponse extends ActionResponse { for (int j = 0; j < valueSize; j++) { value.add(AliasMetaData.Builder.readFrom(in)); } - aliasesBuilder.put(key, ImmutableList.copyOf(value)); + aliasesBuilder.put(key, Collections.unmodifiableList(value)); } aliases = aliasesBuilder.build(); } diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/alias/get/TransportGetAliasesAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/alias/get/TransportGetAliasesAction.java index 496b8a3e8d1..7c7dfb039bf 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/alias/get/TransportGetAliasesAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/alias/get/TransportGetAliasesAction.java @@ -64,7 +64,7 @@ public class TransportGetAliasesAction extends TransportMasterNodeReadAction listener) { String[] concreteIndices = indexNameExpressionResolver.concreteIndices(state, request); - @SuppressWarnings("unchecked") // ImmutableList to List results incompatible type + @SuppressWarnings("unchecked") ImmutableOpenMap> result = (ImmutableOpenMap) state.metaData().findAliases(request.aliases(), concreteIndices); listener.onResponse(new GetAliasesResponse(result)); } diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/close/TransportCloseIndexAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/close/TransportCloseIndexAction.java index 6eb0c0665c1..e4793027559 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/close/TransportCloseIndexAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/close/TransportCloseIndexAction.java @@ -48,10 +48,10 @@ public class TransportCloseIndexAction extends TransportMasterNodeAction { + private FlushRequest request = new FlushRequest(); - ShardFlushRequest() { - } - - ShardFlushRequest(ShardId shardId, FlushRequest request) { - super(shardId, request); + public ShardFlushRequest(FlushRequest request) { + super(request); this.request = request; } + public ShardFlushRequest() { + } + + FlushRequest getRequest() { + return request; + } @Override public void readFrom(StreamInput in) throws IOException { @@ -53,7 +53,5 @@ class ShardFlushRequest extends BroadcastShardRequest { request.writeTo(out); } - FlushRequest getRequest() { - return request; - } + } diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/flush/ShardFlushResponse.java b/core/src/main/java/org/elasticsearch/action/admin/indices/flush/ShardFlushResponse.java deleted file mode 100644 index 6f2cc6a5522..00000000000 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/flush/ShardFlushResponse.java +++ /dev/null @@ -1,37 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.action.admin.indices.flush; - -import org.elasticsearch.action.support.broadcast.BroadcastShardResponse; -import org.elasticsearch.index.shard.ShardId; - -/** - * - */ -class ShardFlushResponse extends BroadcastShardResponse { - - ShardFlushResponse() { - - } - - ShardFlushResponse(ShardId shardId) { - super(shardId); - } -} \ No newline at end of file diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportFlushAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportFlushAction.java index 323a6cc2382..2882b508a81 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportFlushAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportFlushAction.java @@ -19,99 +19,45 @@ package org.elasticsearch.action.admin.indices.flush; +import org.elasticsearch.action.ActionWriteResponse; import org.elasticsearch.action.ShardOperationFailedException; import org.elasticsearch.action.support.ActionFilters; -import org.elasticsearch.action.support.DefaultShardOperationFailedException; -import org.elasticsearch.action.support.broadcast.BroadcastShardOperationFailedException; -import org.elasticsearch.action.support.broadcast.TransportBroadcastAction; +import org.elasticsearch.action.support.replication.TransportBroadcastReplicationAction; import org.elasticsearch.cluster.ClusterService; -import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.block.ClusterBlockException; -import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; -import org.elasticsearch.cluster.routing.GroupShardsIterator; -import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.index.shard.IndexShard; -import org.elasticsearch.indices.IndicesService; +import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; -import java.util.ArrayList; import java.util.List; -import java.util.concurrent.atomic.AtomicReferenceArray; /** * Flush Action. */ -public class TransportFlushAction extends TransportBroadcastAction { - - private final IndicesService indicesService; +public class TransportFlushAction extends TransportBroadcastReplicationAction { @Inject public TransportFlushAction(Settings settings, ThreadPool threadPool, ClusterService clusterService, - TransportService transportService, IndicesService indicesService, - ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) { - super(settings, FlushAction.NAME, threadPool, clusterService, transportService, actionFilters, indexNameExpressionResolver, - FlushRequest.class, ShardFlushRequest.class, ThreadPool.Names.FLUSH); - this.indicesService = indicesService; + TransportService transportService, ActionFilters actionFilters, + IndexNameExpressionResolver indexNameExpressionResolver, + TransportShardFlushAction replicatedFlushAction) { + super(FlushAction.NAME, FlushRequest.class, settings, threadPool, clusterService, transportService, actionFilters, indexNameExpressionResolver, replicatedFlushAction); } @Override - protected FlushResponse newResponse(FlushRequest request, AtomicReferenceArray shardsResponses, ClusterState clusterState) { - int successfulShards = 0; - int failedShards = 0; - List shardFailures = null; - for (int i = 0; i < shardsResponses.length(); i++) { - Object shardResponse = shardsResponses.get(i); - if (shardResponse == null) { - // a non active shard, ignore - } else if (shardResponse instanceof BroadcastShardOperationFailedException) { - failedShards++; - if (shardFailures == null) { - shardFailures = new ArrayList<>(); - } - shardFailures.add(new DefaultShardOperationFailedException((BroadcastShardOperationFailedException) shardResponse)); - } else { - successfulShards++; - } - } - return new FlushResponse(shardsResponses.length(), successfulShards, failedShards, shardFailures); + protected ActionWriteResponse newShardResponse() { + return new ActionWriteResponse(); } @Override - protected ShardFlushRequest newShardRequest(int numShards, ShardRouting shard, FlushRequest request) { - return new ShardFlushRequest(shard.shardId(), request); + protected ShardFlushRequest newShardRequest(FlushRequest request, ShardId shardId) { + return new ShardFlushRequest(request).setShardId(shardId); } @Override - protected ShardFlushResponse newShardResponse() { - return new ShardFlushResponse(); - } - - @Override - protected ShardFlushResponse shardOperation(ShardFlushRequest request) { - IndexShard indexShard = indicesService.indexServiceSafe(request.shardId().getIndex()).shardSafe(request.shardId().id()); - indexShard.flush(request.getRequest()); - return new ShardFlushResponse(request.shardId()); - } - - /** - * The refresh request works against *all* shards. - */ - @Override - protected GroupShardsIterator shards(ClusterState clusterState, FlushRequest request, String[] concreteIndices) { - return clusterState.routingTable().allActiveShardsGrouped(concreteIndices, true, true); - } - - @Override - protected ClusterBlockException checkGlobalBlock(ClusterState state, FlushRequest request) { - return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_WRITE); - } - - @Override - protected ClusterBlockException checkRequestBlock(ClusterState state, FlushRequest countRequest, String[] concreteIndices) { - return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_WRITE, concreteIndices); + protected FlushResponse newResponse(int successfulShards, int failedShards, int totalNumCopies, List shardFailures) { + return new FlushResponse(totalNumCopies, successfulShards, failedShards, shardFailures); } } diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportShardFlushAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportShardFlushAction.java new file mode 100644 index 00000000000..239a487614f --- /dev/null +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportShardFlushAction.java @@ -0,0 +1,102 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.indices.flush; + +import org.elasticsearch.action.ActionWriteResponse; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.replication.TransportReplicationAction; +import org.elasticsearch.cluster.ClusterService; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.action.index.MappingUpdatedAction; +import org.elasticsearch.cluster.action.shard.ShardStateAction; +import org.elasticsearch.cluster.block.ClusterBlockException; +import org.elasticsearch.cluster.block.ClusterBlockLevel; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.routing.ShardIterator; +import org.elasticsearch.common.collect.Tuple; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.shard.IndexShard; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.indices.IndicesService; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; + +/** + * + */ +public class TransportShardFlushAction extends TransportReplicationAction { + + public static final String NAME = FlushAction.NAME + "[s]"; + + @Inject + public TransportShardFlushAction(Settings settings, TransportService transportService, ClusterService clusterService, + IndicesService indicesService, ThreadPool threadPool, ShardStateAction shardStateAction, + MappingUpdatedAction mappingUpdatedAction, ActionFilters actionFilters, + IndexNameExpressionResolver indexNameExpressionResolver) { + super(settings, NAME, transportService, clusterService, indicesService, threadPool, shardStateAction, mappingUpdatedAction, + actionFilters, indexNameExpressionResolver, ShardFlushRequest.class, ShardFlushRequest.class, ThreadPool.Names.FLUSH); + } + + @Override + protected ActionWriteResponse newResponseInstance() { + return new ActionWriteResponse(); + } + + @Override + protected Tuple shardOperationOnPrimary(ClusterState clusterState, PrimaryOperationRequest shardRequest) throws Throwable { + IndexShard indexShard = indicesService.indexServiceSafe(shardRequest.shardId.getIndex()).shardSafe(shardRequest.shardId.id()); + indexShard.flush(shardRequest.request.getRequest()); + logger.trace("{} flush request executed on primary", indexShard.shardId()); + return new Tuple<>(new ActionWriteResponse(), shardRequest.request); + } + + @Override + protected void shardOperationOnReplica(ShardId shardId, ShardFlushRequest request) { + IndexShard indexShard = indicesService.indexServiceSafe(request.shardId().getIndex()).shardSafe(request.shardId().id()); + indexShard.flush(request.getRequest()); + logger.trace("{} flush request executed on replica", indexShard.shardId()); + } + + @Override + protected boolean checkWriteConsistency() { + return false; + } + + @Override + protected ShardIterator shards(ClusterState clusterState, InternalRequest request) { + return clusterState.getRoutingTable().indicesRouting().get(request.concreteIndex()).getShards().get(request.request().shardId().getId()).shardsIt(); + } + + @Override + protected ClusterBlockException checkGlobalBlock(ClusterState state) { + return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_WRITE); + } + + @Override + protected ClusterBlockException checkRequestBlock(ClusterState state, InternalRequest request) { + return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_WRITE, new String[]{request.concreteIndex()}); + } + + @Override + protected boolean shouldExecuteReplication(Settings settings) { + return true; + } +} diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/get/GetIndexResponse.java b/core/src/main/java/org/elasticsearch/action/admin/indices/get/GetIndexResponse.java index 3bc0ad0e1ff..0930f8f1d4e 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/get/GetIndexResponse.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/get/GetIndexResponse.java @@ -20,7 +20,6 @@ package org.elasticsearch.action.admin.indices.get; import com.carrotsearch.hppc.cursors.ObjectObjectCursor; -import com.google.common.collect.ImmutableList; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.cluster.metadata.AliasMetaData; @@ -32,21 +31,24 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.search.warmer.IndexWarmersMetaData; import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; /** * A response for a delete index action. */ public class GetIndexResponse extends ActionResponse { - private ImmutableOpenMap> warmers = ImmutableOpenMap.of(); + private ImmutableOpenMap> warmers = ImmutableOpenMap.of(); private ImmutableOpenMap> mappings = ImmutableOpenMap.of(); - private ImmutableOpenMap> aliases = ImmutableOpenMap.of(); + private ImmutableOpenMap> aliases = ImmutableOpenMap.of(); private ImmutableOpenMap settings = ImmutableOpenMap.of(); private String[] indices; - GetIndexResponse(String[] indices, ImmutableOpenMap> warmers, + GetIndexResponse(String[] indices, ImmutableOpenMap> warmers, ImmutableOpenMap> mappings, - ImmutableOpenMap> aliases, ImmutableOpenMap settings) { + ImmutableOpenMap> aliases, ImmutableOpenMap settings) { this.indices = indices; if (warmers != null) { this.warmers = warmers; @@ -73,11 +75,11 @@ public class GetIndexResponse extends ActionResponse { return indices(); } - public ImmutableOpenMap> warmers() { + public ImmutableOpenMap> warmers() { return warmers; } - public ImmutableOpenMap> getWarmers() { + public ImmutableOpenMap> getWarmers() { return warmers(); } @@ -89,11 +91,11 @@ public class GetIndexResponse extends ActionResponse { return mappings(); } - public ImmutableOpenMap> aliases() { + public ImmutableOpenMap> aliases() { return aliases; } - public ImmutableOpenMap> getAliases() { + public ImmutableOpenMap> getAliases() { return aliases(); } @@ -110,11 +112,11 @@ public class GetIndexResponse extends ActionResponse { super.readFrom(in); this.indices = in.readStringArray(); int warmersSize = in.readVInt(); - ImmutableOpenMap.Builder> warmersMapBuilder = ImmutableOpenMap.builder(); + ImmutableOpenMap.Builder> warmersMapBuilder = ImmutableOpenMap.builder(); for (int i = 0; i < warmersSize; i++) { String key = in.readString(); int valueSize = in.readVInt(); - ImmutableList.Builder warmerEntryBuilder = ImmutableList.builder(); + List warmerEntryBuilder = new ArrayList<>(); for (int j = 0; j < valueSize; j++) { warmerEntryBuilder.add(new IndexWarmersMetaData.Entry( in.readString(), @@ -123,7 +125,7 @@ public class GetIndexResponse extends ActionResponse { in.readBytesReference()) ); } - warmersMapBuilder.put(key, warmerEntryBuilder.build()); + warmersMapBuilder.put(key, Collections.unmodifiableList(warmerEntryBuilder)); } warmers = warmersMapBuilder.build(); int mappingsSize = in.readVInt(); @@ -139,15 +141,15 @@ public class GetIndexResponse extends ActionResponse { } mappings = mappingsMapBuilder.build(); int aliasesSize = in.readVInt(); - ImmutableOpenMap.Builder> aliasesMapBuilder = ImmutableOpenMap.builder(); + ImmutableOpenMap.Builder> aliasesMapBuilder = ImmutableOpenMap.builder(); for (int i = 0; i < aliasesSize; i++) { String key = in.readString(); int valueSize = in.readVInt(); - ImmutableList.Builder aliasEntryBuilder = ImmutableList.builder(); + List aliasEntryBuilder = new ArrayList<>(); for (int j = 0; j < valueSize; j++) { aliasEntryBuilder.add(AliasMetaData.Builder.readFrom(in)); } - aliasesMapBuilder.put(key, aliasEntryBuilder.build()); + aliasesMapBuilder.put(key, Collections.unmodifiableList(aliasEntryBuilder)); } aliases = aliasesMapBuilder.build(); int settingsSize = in.readVInt(); @@ -164,7 +166,7 @@ public class GetIndexResponse extends ActionResponse { super.writeTo(out); out.writeStringArray(indices); out.writeVInt(warmers.size()); - for (ObjectObjectCursor> indexEntry : warmers) { + for (ObjectObjectCursor> indexEntry : warmers) { out.writeString(indexEntry.key); out.writeVInt(indexEntry.value.size()); for (IndexWarmersMetaData.Entry warmerEntry : indexEntry.value) { @@ -184,7 +186,7 @@ public class GetIndexResponse extends ActionResponse { } } out.writeVInt(aliases.size()); - for (ObjectObjectCursor> indexEntry : aliases) { + for (ObjectObjectCursor> indexEntry : aliases) { out.writeString(indexEntry.key); out.writeVInt(indexEntry.value.size()); for (AliasMetaData aliasEntry : indexEntry.value) { diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/get/TransportGetIndexAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/get/TransportGetIndexAction.java index 89360ce42b0..e398541fa99 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/get/TransportGetIndexAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/get/TransportGetIndexAction.java @@ -19,7 +19,6 @@ package org.elasticsearch.action.admin.indices.get; -import com.google.common.collect.ImmutableList; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.indices.get.GetIndexRequest.Feature; @@ -41,6 +40,8 @@ import org.elasticsearch.search.warmer.IndexWarmersMetaData.Entry; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; +import java.util.List; + /** * Get index action. */ @@ -71,9 +72,9 @@ public class TransportGetIndexAction extends TransportClusterInfoAction listener) { - ImmutableOpenMap> warmersResult = ImmutableOpenMap.of(); + ImmutableOpenMap> warmersResult = ImmutableOpenMap.of(); ImmutableOpenMap> mappingsResult = ImmutableOpenMap.of(); - ImmutableOpenMap> aliasesResult = ImmutableOpenMap.of(); + ImmutableOpenMap> aliasesResult = ImmutableOpenMap.of(); ImmutableOpenMap settings = ImmutableOpenMap.of(); Feature[] features = request.features(); boolean doneAliases = false; diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/open/TransportOpenIndexAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/open/TransportOpenIndexAction.java index 1e3abb0257f..3ba8c2e80c4 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/open/TransportOpenIndexAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/open/TransportOpenIndexAction.java @@ -47,10 +47,11 @@ public class TransportOpenIndexAction extends TransportMasterNodeAction { public static final RefreshAction INSTANCE = new RefreshAction(); diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/refresh/RefreshRequest.java b/core/src/main/java/org/elasticsearch/action/admin/indices/refresh/RefreshRequest.java index 8f871307135..b0cb49c8874 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/refresh/RefreshRequest.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/refresh/RefreshRequest.java @@ -33,7 +33,6 @@ import org.elasticsearch.action.support.broadcast.BroadcastRequest; */ public class RefreshRequest extends BroadcastRequest { - RefreshRequest() { } @@ -48,5 +47,4 @@ public class RefreshRequest extends BroadcastRequest { public RefreshRequest(String... indices) { super(indices); } - } diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/refresh/RefreshResponse.java b/core/src/main/java/org/elasticsearch/action/admin/indices/refresh/RefreshResponse.java index 28295fdd0a0..ba3ec31c6a5 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/refresh/RefreshResponse.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/refresh/RefreshResponse.java @@ -21,34 +21,18 @@ package org.elasticsearch.action.admin.indices.refresh; import org.elasticsearch.action.ShardOperationFailedException; import org.elasticsearch.action.support.broadcast.BroadcastResponse; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; -import java.io.IOException; import java.util.List; /** * The response of a refresh action. - * - * */ public class RefreshResponse extends BroadcastResponse { RefreshResponse() { - } RefreshResponse(int totalShards, int successfulShards, int failedShards, List shardFailures) { super(totalShards, successfulShards, failedShards, shardFailures); } - - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - super.writeTo(out); - } } diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/refresh/ShardRefreshRequest.java b/core/src/main/java/org/elasticsearch/action/admin/indices/refresh/ShardRefreshRequest.java deleted file mode 100644 index 37ea2cc46de..00000000000 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/refresh/ShardRefreshRequest.java +++ /dev/null @@ -1,37 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.action.admin.indices.refresh; - -import org.elasticsearch.action.support.broadcast.BroadcastShardRequest; -import org.elasticsearch.index.shard.ShardId; - -/** - * - */ -class ShardRefreshRequest extends BroadcastShardRequest { - - ShardRefreshRequest() { - } - - ShardRefreshRequest(ShardId shardId, RefreshRequest request) { - super(shardId, request); - } - -} diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/refresh/ShardRefreshResponse.java b/core/src/main/java/org/elasticsearch/action/admin/indices/refresh/ShardRefreshResponse.java deleted file mode 100644 index 4de0f5877dd..00000000000 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/refresh/ShardRefreshResponse.java +++ /dev/null @@ -1,36 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.action.admin.indices.refresh; - -import org.elasticsearch.action.support.broadcast.BroadcastShardResponse; -import org.elasticsearch.index.shard.ShardId; - -/** - * - */ -class ShardRefreshResponse extends BroadcastShardResponse { - - ShardRefreshResponse() { - } - - ShardRefreshResponse(ShardId shardId) { - super(shardId); - } -} \ No newline at end of file diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/refresh/TransportRefreshAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/refresh/TransportRefreshAction.java index 2eead86e202..2ba385dd7d1 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/refresh/TransportRefreshAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/refresh/TransportRefreshAction.java @@ -19,100 +19,46 @@ package org.elasticsearch.action.admin.indices.refresh; +import org.elasticsearch.action.ActionWriteResponse; import org.elasticsearch.action.ShardOperationFailedException; import org.elasticsearch.action.support.ActionFilters; -import org.elasticsearch.action.support.DefaultShardOperationFailedException; -import org.elasticsearch.action.support.broadcast.BroadcastShardOperationFailedException; -import org.elasticsearch.action.support.broadcast.TransportBroadcastAction; +import org.elasticsearch.action.support.replication.ReplicationRequest; +import org.elasticsearch.action.support.replication.TransportBroadcastReplicationAction; import org.elasticsearch.cluster.ClusterService; -import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.block.ClusterBlockException; -import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; -import org.elasticsearch.cluster.routing.GroupShardsIterator; -import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.index.shard.IndexShard; -import org.elasticsearch.indices.IndicesService; +import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; -import java.util.ArrayList; import java.util.List; -import java.util.concurrent.atomic.AtomicReferenceArray; /** * Refresh action. */ -public class TransportRefreshAction extends TransportBroadcastAction { - - private final IndicesService indicesService; +public class TransportRefreshAction extends TransportBroadcastReplicationAction { @Inject public TransportRefreshAction(Settings settings, ThreadPool threadPool, ClusterService clusterService, - TransportService transportService, IndicesService indicesService, - ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) { - super(settings, RefreshAction.NAME, threadPool, clusterService, transportService, actionFilters, indexNameExpressionResolver, - RefreshRequest.class, ShardRefreshRequest.class, ThreadPool.Names.REFRESH); - this.indicesService = indicesService; + TransportService transportService, ActionFilters actionFilters, + IndexNameExpressionResolver indexNameExpressionResolver, + TransportShardRefreshAction shardRefreshAction) { + super(RefreshAction.NAME, RefreshRequest.class, settings, threadPool, clusterService, transportService, actionFilters, indexNameExpressionResolver, shardRefreshAction); } @Override - protected RefreshResponse newResponse(RefreshRequest request, AtomicReferenceArray shardsResponses, ClusterState clusterState) { - int successfulShards = 0; - int failedShards = 0; - List shardFailures = null; - for (int i = 0; i < shardsResponses.length(); i++) { - Object shardResponse = shardsResponses.get(i); - if (shardResponse == null) { - // non active shard, ignore - } else if (shardResponse instanceof BroadcastShardOperationFailedException) { - failedShards++; - if (shardFailures == null) { - shardFailures = new ArrayList<>(); - } - shardFailures.add(new DefaultShardOperationFailedException((BroadcastShardOperationFailedException) shardResponse)); - } else { - successfulShards++; - } - } - return new RefreshResponse(shardsResponses.length(), successfulShards, failedShards, shardFailures); + protected ActionWriteResponse newShardResponse() { + return new ActionWriteResponse(); } @Override - protected ShardRefreshRequest newShardRequest(int numShards, ShardRouting shard, RefreshRequest request) { - return new ShardRefreshRequest(shard.shardId(), request); + protected ReplicationRequest newShardRequest(RefreshRequest request, ShardId shardId) { + return new ReplicationRequest(request).setShardId(shardId); } @Override - protected ShardRefreshResponse newShardResponse() { - return new ShardRefreshResponse(); - } - - @Override - protected ShardRefreshResponse shardOperation(ShardRefreshRequest request) { - IndexShard indexShard = indicesService.indexServiceSafe(request.shardId().getIndex()).shardSafe(request.shardId().id()); - indexShard.refresh("api"); - logger.trace("{} refresh request executed", indexShard.shardId()); - return new ShardRefreshResponse(request.shardId()); - } - - /** - * The refresh request works against *all* shards. - */ - @Override - protected GroupShardsIterator shards(ClusterState clusterState, RefreshRequest request, String[] concreteIndices) { - return clusterState.routingTable().allAssignedShardsGrouped(concreteIndices, true, true); - } - - @Override - protected ClusterBlockException checkGlobalBlock(ClusterState state, RefreshRequest request) { - return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_WRITE); - } - - @Override - protected ClusterBlockException checkRequestBlock(ClusterState state, RefreshRequest countRequest, String[] concreteIndices) { - return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_WRITE, concreteIndices); + protected RefreshResponse newResponse(int successfulShards, int failedShards, int totalNumCopies, List shardFailures) { + return new RefreshResponse(totalNumCopies, successfulShards, failedShards, shardFailures); } } diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/refresh/TransportShardRefreshAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/refresh/TransportShardRefreshAction.java new file mode 100644 index 00000000000..ac3911abfbf --- /dev/null +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/refresh/TransportShardRefreshAction.java @@ -0,0 +1,103 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.indices.refresh; + +import org.elasticsearch.action.ActionWriteResponse; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.replication.ReplicationRequest; +import org.elasticsearch.action.support.replication.TransportReplicationAction; +import org.elasticsearch.cluster.ClusterService; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.action.index.MappingUpdatedAction; +import org.elasticsearch.cluster.action.shard.ShardStateAction; +import org.elasticsearch.cluster.block.ClusterBlockException; +import org.elasticsearch.cluster.block.ClusterBlockLevel; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.routing.ShardIterator; +import org.elasticsearch.common.collect.Tuple; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.shard.IndexShard; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.indices.IndicesService; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; + +/** + * + */ +public class TransportShardRefreshAction extends TransportReplicationAction { + + public static final String NAME = RefreshAction.NAME + "[s]"; + + @Inject + public TransportShardRefreshAction(Settings settings, TransportService transportService, ClusterService clusterService, + IndicesService indicesService, ThreadPool threadPool, ShardStateAction shardStateAction, + MappingUpdatedAction mappingUpdatedAction, ActionFilters actionFilters, + IndexNameExpressionResolver indexNameExpressionResolver) { + super(settings, NAME, transportService, clusterService, indicesService, threadPool, shardStateAction, mappingUpdatedAction, + actionFilters, indexNameExpressionResolver, ReplicationRequest.class, ReplicationRequest.class, ThreadPool.Names.REFRESH); + } + + @Override + protected ActionWriteResponse newResponseInstance() { + return new ActionWriteResponse(); + } + + @Override + protected Tuple shardOperationOnPrimary(ClusterState clusterState, PrimaryOperationRequest shardRequest) throws Throwable { + IndexShard indexShard = indicesService.indexServiceSafe(shardRequest.shardId.getIndex()).shardSafe(shardRequest.shardId.id()); + indexShard.refresh("api"); + logger.trace("{} refresh request executed on primary", indexShard.shardId()); + return new Tuple<>(new ActionWriteResponse(), shardRequest.request); + } + + @Override + protected void shardOperationOnReplica(ShardId shardId, ReplicationRequest request) { + IndexShard indexShard = indicesService.indexServiceSafe(shardId.getIndex()).shardSafe(shardId.id()); + indexShard.refresh("api"); + logger.trace("{} refresh request executed on replica", indexShard.shardId()); + } + + @Override + protected boolean checkWriteConsistency() { + return false; + } + + @Override + protected ShardIterator shards(ClusterState clusterState, InternalRequest request) { + return clusterState.getRoutingTable().indicesRouting().get(request.concreteIndex()).getShards().get(request.request().shardId().getId()).shardsIt(); + } + + @Override + protected ClusterBlockException checkGlobalBlock(ClusterState state) { + return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_WRITE); + } + + @Override + protected ClusterBlockException checkRequestBlock(ClusterState state, InternalRequest request) { + return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_WRITE, new String[]{request.concreteIndex()}); + } + + @Override + protected boolean shouldExecuteReplication(Settings settings) { + return true; + } +} diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/segments/ShardSegments.java b/core/src/main/java/org/elasticsearch/action/admin/indices/segments/ShardSegments.java index 4b3264fca40..3e39cfd561f 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/segments/ShardSegments.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/segments/ShardSegments.java @@ -19,7 +19,6 @@ package org.elasticsearch.action.admin.indices.segments; -import com.google.common.collect.ImmutableList; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -28,6 +27,7 @@ import org.elasticsearch.index.engine.Segment; import java.io.IOException; import java.util.ArrayList; +import java.util.Collections; import java.util.Iterator; import java.util.List; @@ -91,7 +91,7 @@ public class ShardSegments implements Streamable, Iterable { shardRouting = readShardRoutingEntry(in); int size = in.readVInt(); if (size == 0) { - segments = ImmutableList.of(); + segments = Collections.emptyList(); } else { segments = new ArrayList<>(size); for (int i = 0; i < size; i++) { diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoresResponse.java b/core/src/main/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoresResponse.java index 50d305efe90..84b39d4c689 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoresResponse.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoresResponse.java @@ -21,7 +21,6 @@ package org.elasticsearch.action.admin.indices.shards; import com.carrotsearch.hppc.cursors.IntObjectCursor; import com.carrotsearch.hppc.cursors.ObjectObjectCursor; -import com.google.common.collect.ImmutableList; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.action.ShardOperationFailedException; @@ -38,6 +37,7 @@ import org.elasticsearch.common.xcontent.XContentBuilderString; import java.io.IOException; import java.util.ArrayList; +import java.util.Collections; import java.util.List; import static org.elasticsearch.action.admin.indices.shards.IndicesShardStoresResponse.StoreStatus.*; @@ -258,15 +258,15 @@ public class IndicesShardStoresResponse extends ActionResponse implements ToXCon } private ImmutableOpenMap>> storeStatuses; - private ImmutableList failures; + private List failures; - public IndicesShardStoresResponse(ImmutableOpenMap>> storeStatuses, ImmutableList failures) { + public IndicesShardStoresResponse(ImmutableOpenMap>> storeStatuses, List failures) { this.storeStatuses = storeStatuses; this.failures = failures; } IndicesShardStoresResponse() { - this(ImmutableOpenMap.>>of(), ImmutableList.of()); + this(ImmutableOpenMap.>>of(), Collections.emptyList()); } /** @@ -281,7 +281,7 @@ public class IndicesShardStoresResponse extends ActionResponse implements ToXCon * Returns node {@link Failure}s encountered * while executing the request */ - public ImmutableList getFailures() { + public List getFailures() { return failures; } @@ -306,12 +306,12 @@ public class IndicesShardStoresResponse extends ActionResponse implements ToXCon storeStatusesBuilder.put(index, shardEntries.build()); } int numFailure = in.readVInt(); - ImmutableList.Builder failureBuilder = ImmutableList.builder(); + List failureBuilder = new ArrayList<>(); for (int i = 0; i < numFailure; i++) { failureBuilder.add(Failure.readFailure(in)); } storeStatuses = storeStatusesBuilder.build(); - failures = failureBuilder.build(); + failures = Collections.unmodifiableList(failureBuilder); } @Override diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/shards/TransportIndicesShardStoresAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/shards/TransportIndicesShardStoresAction.java index b783ce112ac..01613d69086 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/shards/TransportIndicesShardStoresAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/shards/TransportIndicesShardStoresAction.java @@ -18,7 +18,6 @@ */ package org.elasticsearch.action.admin.indices.shards; -import com.google.common.collect.ImmutableList; import org.apache.lucene.util.CollectionUtil; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.FailedNodeException; @@ -34,7 +33,11 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; -import org.elasticsearch.cluster.routing.*; +import org.elasticsearch.cluster.routing.IndexRoutingTable; +import org.elasticsearch.cluster.routing.IndexShardRoutingTable; +import org.elasticsearch.cluster.routing.RoutingNodes; +import org.elasticsearch.cluster.routing.RoutingTable; +import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.common.collect.ImmutableOpenIntMap; import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.inject.Inject; @@ -48,7 +51,11 @@ import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; -import java.util.*; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashSet; +import java.util.Queue; +import java.util.Set; import java.util.concurrent.ConcurrentLinkedQueue; /** @@ -157,7 +164,7 @@ public class TransportIndicesShardStoresAction extends TransportMasterNodeReadAc void finish() { ImmutableOpenMap.Builder>> indicesStoreStatusesBuilder = ImmutableOpenMap.builder(); - ImmutableList.Builder failureBuilder = ImmutableList.builder(); + java.util.List failureBuilder = new ArrayList<>(); for (Response fetchResponse : fetchResponses) { ImmutableOpenIntMap> indexStoreStatuses = indicesStoreStatusesBuilder.get(fetchResponse.shardId.getIndex()); final ImmutableOpenIntMap.Builder> indexShardsBuilder; @@ -183,7 +190,7 @@ public class TransportIndicesShardStoresAction extends TransportMasterNodeReadAc failureBuilder.add(new IndicesShardStoresResponse.Failure(failure.nodeId(), fetchResponse.shardId.getIndex(), fetchResponse.shardId.id(), failure.getCause())); } } - listener.onResponse(new IndicesShardStoresResponse(indicesStoreStatusesBuilder.build(), failureBuilder.build())); + listener.onResponse(new IndicesShardStoresResponse(indicesStoreStatusesBuilder.build(), Collections.unmodifiableList(failureBuilder))); } private IndicesShardStoresResponse.StoreStatus.Allocation getAllocation(String index, int shardID, DiscoveryNode node) { diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/validate/query/ValidateQueryResponse.java b/core/src/main/java/org/elasticsearch/action/admin/indices/validate/query/ValidateQueryResponse.java index 3d1ef78d2bf..2d3c0a0a90e 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/validate/query/ValidateQueryResponse.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/validate/query/ValidateQueryResponse.java @@ -19,7 +19,6 @@ package org.elasticsearch.action.admin.indices.validate.query; -import com.google.common.collect.ImmutableList; import org.elasticsearch.action.ShardOperationFailedException; import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.common.io.stream.StreamInput; @@ -27,6 +26,7 @@ import org.elasticsearch.common.io.stream.StreamOutput; import java.io.IOException; import java.util.ArrayList; +import java.util.Collections; import java.util.List; import static org.elasticsearch.action.admin.indices.validate.query.QueryExplanation.readQueryExplanation; @@ -51,7 +51,7 @@ public class ValidateQueryResponse extends BroadcastResponse { this.valid = valid; this.queryExplanations = queryExplanations; if (queryExplanations == null) { - this.queryExplanations = ImmutableList.of(); + this.queryExplanations = Collections.emptyList(); } } @@ -67,7 +67,7 @@ public class ValidateQueryResponse extends BroadcastResponse { */ public List getQueryExplanation() { if (queryExplanations == null) { - return ImmutableList.of(); + return Collections.emptyList(); } return queryExplanations; } diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/validate/template/TransportRenderSearchTemplateAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/validate/template/TransportRenderSearchTemplateAction.java index d469e29ca96..ab3090a5a81 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/validate/template/TransportRenderSearchTemplateAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/validate/template/TransportRenderSearchTemplateAction.java @@ -55,7 +55,7 @@ public class TransportRenderSearchTemplateAction extends HandledTransportAction< @Override protected void doRun() throws Exception { - ExecutableScript executable = scriptService.executable(request.template(), ScriptContext.Standard.SEARCH); + ExecutableScript executable = scriptService.executable(request.template(), ScriptContext.Standard.SEARCH, request); BytesReference processedTemplate = (BytesReference) executable.run(); RenderSearchTemplateResponse response = new RenderSearchTemplateResponse(); response.source(processedTemplate); diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/warmer/get/GetWarmersResponse.java b/core/src/main/java/org/elasticsearch/action/admin/indices/warmer/get/GetWarmersResponse.java index cb45d36d39b..3ed444c88dd 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/warmer/get/GetWarmersResponse.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/warmer/get/GetWarmersResponse.java @@ -20,7 +20,6 @@ package org.elasticsearch.action.admin.indices.warmer.get; import com.carrotsearch.hppc.cursors.ObjectObjectCursor; -import com.google.common.collect.ImmutableList; import org.elasticsearch.Version; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.common.bytes.BytesReference; @@ -30,6 +29,9 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.search.warmer.IndexWarmersMetaData; import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; /** * Holds a warmer-name to a list of {@link IndexWarmersMetaData} mapping for each warmer specified @@ -38,20 +40,20 @@ import java.io.IOException; */ public class GetWarmersResponse extends ActionResponse { - private ImmutableOpenMap> warmers = ImmutableOpenMap.of(); + private ImmutableOpenMap> warmers = ImmutableOpenMap.of(); - GetWarmersResponse(ImmutableOpenMap> warmers) { + GetWarmersResponse(ImmutableOpenMap> warmers) { this.warmers = warmers; } GetWarmersResponse() { } - public ImmutableOpenMap> warmers() { + public ImmutableOpenMap> warmers() { return warmers; } - public ImmutableOpenMap> getWarmers() { + public ImmutableOpenMap> getWarmers() { return warmers(); } @@ -59,11 +61,11 @@ public class GetWarmersResponse extends ActionResponse { public void readFrom(StreamInput in) throws IOException { super.readFrom(in); int size = in.readVInt(); - ImmutableOpenMap.Builder> indexMapBuilder = ImmutableOpenMap.builder(); + ImmutableOpenMap.Builder> indexMapBuilder = ImmutableOpenMap.builder(); for (int i = 0; i < size; i++) { String key = in.readString(); int valueSize = in.readVInt(); - ImmutableList.Builder warmerEntryBuilder = ImmutableList.builder(); + List warmerEntryBuilder = new ArrayList<>(); for (int j = 0; j < valueSize; j++) { String name = in.readString(); String[] types = in.readStringArray(); @@ -77,7 +79,7 @@ public class GetWarmersResponse extends ActionResponse { source) ); } - indexMapBuilder.put(key, warmerEntryBuilder.build()); + indexMapBuilder.put(key, Collections.unmodifiableList(warmerEntryBuilder)); } warmers = indexMapBuilder.build(); } @@ -86,7 +88,7 @@ public class GetWarmersResponse extends ActionResponse { public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeVInt(warmers.size()); - for (ObjectObjectCursor> indexEntry : warmers) { + for (ObjectObjectCursor> indexEntry : warmers) { out.writeString(indexEntry.key); out.writeVInt(indexEntry.value.size()); for (IndexWarmersMetaData.Entry warmerEntry : indexEntry.value) { diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/warmer/get/TransportGetWarmersAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/warmer/get/TransportGetWarmersAction.java index 0504e329a1e..50d972b3e61 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/warmer/get/TransportGetWarmersAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/warmer/get/TransportGetWarmersAction.java @@ -19,7 +19,6 @@ package org.elasticsearch.action.admin.indices.warmer.get; -import com.google.common.collect.ImmutableList; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.master.info.TransportClusterInfoAction; @@ -35,6 +34,8 @@ import org.elasticsearch.search.warmer.IndexWarmersMetaData; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; +import java.util.List; + /** * Internal Actions executed on the master fetching the warmer from the cluster state metadata. * @@ -66,7 +67,7 @@ public class TransportGetWarmersAction extends TransportClusterInfoAction listener) { - ImmutableOpenMap> result = state.metaData().findWarmers( + ImmutableOpenMap> result = state.metaData().findWarmers( concreteIndices, request.types(), request.warmers() ); listener.onResponse(new GetWarmersResponse(result)); diff --git a/core/src/main/java/org/elasticsearch/action/bulk/BulkShardRequest.java b/core/src/main/java/org/elasticsearch/action/bulk/BulkShardRequest.java index a1eb616b1ee..6bda7b259ee 100644 --- a/core/src/main/java/org/elasticsearch/action/bulk/BulkShardRequest.java +++ b/core/src/main/java/org/elasticsearch/action/bulk/BulkShardRequest.java @@ -22,6 +22,7 @@ package org.elasticsearch.action.bulk; import org.elasticsearch.action.support.replication.ReplicationRequest; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.index.shard.ShardId; import java.io.IOException; import java.util.ArrayList; @@ -32,8 +33,6 @@ import java.util.List; */ public class BulkShardRequest extends ReplicationRequest { - private int shardId; - private BulkItemRequest[] items; private boolean refresh; @@ -44,7 +43,7 @@ public class BulkShardRequest extends ReplicationRequest { BulkShardRequest(BulkRequest bulkRequest, String index, int shardId, boolean refresh, BulkItemRequest[] items) { super(bulkRequest); this.index = index; - this.shardId = shardId; + this.setShardId(new ShardId(index, shardId)); this.items = items; this.refresh = refresh; } @@ -53,10 +52,6 @@ public class BulkShardRequest extends ReplicationRequest { return this.refresh; } - int shardId() { - return shardId; - } - BulkItemRequest[] items() { return items; } @@ -75,7 +70,6 @@ public class BulkShardRequest extends ReplicationRequest { @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); - out.writeVInt(shardId); out.writeVInt(items.length); for (BulkItemRequest item : items) { if (item != null) { @@ -91,7 +85,6 @@ public class BulkShardRequest extends ReplicationRequest { @Override public void readFrom(StreamInput in) throws IOException { super.readFrom(in); - shardId = in.readVInt(); items = new BulkItemRequest[in.readVInt()]; for (int i = 0; i < items.length; i++) { if (in.readBoolean()) { diff --git a/core/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java b/core/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java index 2ca2dfe142a..a9aa3dcb31d 100644 --- a/core/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java +++ b/core/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java @@ -109,7 +109,7 @@ public class TransportShardBulkAction extends TransportReplicationAction listener) { request.request().routing(state.metaData().resolveIndexRouting(request.request().routing(), request.request().index())); diff --git a/core/src/main/java/org/elasticsearch/action/index/TransportIndexAction.java b/core/src/main/java/org/elasticsearch/action/index/TransportIndexAction.java index 97620829f3a..83e70c2f504 100644 --- a/core/src/main/java/org/elasticsearch/action/index/TransportIndexAction.java +++ b/core/src/main/java/org/elasticsearch/action/index/TransportIndexAction.java @@ -120,11 +120,6 @@ public class TransportIndexAction extends TransportReplicationAction indexResponseActionListener) { MetaData metaData = clusterService.state().metaData(); diff --git a/core/src/main/java/org/elasticsearch/action/percolate/PercolateShardResponse.java b/core/src/main/java/org/elasticsearch/action/percolate/PercolateShardResponse.java index c626cda581e..5416e2f66d7 100644 --- a/core/src/main/java/org/elasticsearch/action/percolate/PercolateShardResponse.java +++ b/core/src/main/java/org/elasticsearch/action/percolate/PercolateShardResponse.java @@ -18,7 +18,6 @@ */ package org.elasticsearch.action.percolate; -import com.google.common.collect.ImmutableList; import org.apache.lucene.util.BytesRef; import org.elasticsearch.action.support.broadcast.BroadcastShardResponse; import org.elasticsearch.common.bytes.BytesReference; @@ -35,6 +34,7 @@ import org.elasticsearch.search.query.QuerySearchResult; import java.io.IOException; import java.util.ArrayList; +import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -45,7 +45,7 @@ public class PercolateShardResponse extends BroadcastShardResponse { private static final BytesRef[] EMPTY_MATCHES = new BytesRef[0]; private static final float[] EMPTY_SCORES = new float[0]; - private static final List> EMPTY_HL = ImmutableList.of(); + private static final List> EMPTY_HL = Collections.emptyList(); private long count; private float[] scores; diff --git a/core/src/main/java/org/elasticsearch/action/percolate/TransportPercolateAction.java b/core/src/main/java/org/elasticsearch/action/percolate/TransportPercolateAction.java index 2194975161a..a9015d24129 100644 --- a/core/src/main/java/org/elasticsearch/action/percolate/TransportPercolateAction.java +++ b/core/src/main/java/org/elasticsearch/action/percolate/TransportPercolateAction.java @@ -146,7 +146,7 @@ public class TransportPercolateAction extends TransportBroadcastAction implem } public MultiSearchRequest add(byte[] data, int from, int length, - @Nullable String[] indices, @Nullable String[] types, @Nullable String searchType) throws Exception { - return add(new BytesArray(data, from, length), indices, types, searchType, null, IndicesOptions.strictExpandOpenAndForbidClosed(), true); + boolean isTemplateRequest, @Nullable String[] indices, @Nullable String[] types, @Nullable String searchType) throws Exception { + return add(new BytesArray(data, from, length), isTemplateRequest, indices, types, searchType, null, IndicesOptions.strictExpandOpenAndForbidClosed(), true); } - public MultiSearchRequest add(BytesReference data, @Nullable String[] indices, @Nullable String[] types, @Nullable String searchType, IndicesOptions indicesOptions) throws Exception { - return add(data, indices, types, searchType, null, indicesOptions, true); + public MultiSearchRequest add(BytesReference data, boolean isTemplateRequest, @Nullable String[] indices, @Nullable String[] types, @Nullable String searchType, IndicesOptions indicesOptions) throws Exception { + return add(data, isTemplateRequest, indices, types, searchType, null, indicesOptions, true); } - public MultiSearchRequest add(BytesReference data, @Nullable String[] indices, @Nullable String[] types, @Nullable String searchType, @Nullable String routing, IndicesOptions indicesOptions, boolean allowExplicitIndex) throws Exception { + public MultiSearchRequest add(BytesReference data, boolean isTemplateRequest, @Nullable String[] indices, @Nullable String[] types, @Nullable String searchType, @Nullable String routing, IndicesOptions indicesOptions, boolean allowExplicitIndex) throws Exception { XContent xContent = XContentFactory.xContent(data); int from = 0; int length = data.length(); @@ -146,8 +146,11 @@ public class MultiSearchRequest extends ActionRequest implem if (nextMarker == -1) { break; } - - searchRequest.source(data.slice(from, nextMarker - from)); + if (isTemplateRequest) { + searchRequest.templateSource(data.slice(from, nextMarker - from)); + } else { + searchRequest.source(data.slice(from, nextMarker - from)); + } // move pointers from = nextMarker + 1; @@ -157,15 +160,6 @@ public class MultiSearchRequest extends ActionRequest implem return this; } - private String[] parseArray(XContentParser parser) throws IOException { - final List list = new ArrayList<>(); - assert parser.currentToken() == XContentParser.Token.START_ARRAY; - while (parser.nextToken() != XContentParser.Token.END_ARRAY) { - list.add(parser.text()); - } - return list.toArray(new String[list.size()]); - } - private int findNextMarker(byte marker, int from, BytesReference data, int length) { for (int i = from; i < length; i++) { if (data.get(i) == marker) { diff --git a/core/src/main/java/org/elasticsearch/action/search/type/TransportSearchCountAction.java b/core/src/main/java/org/elasticsearch/action/search/type/TransportSearchCountAction.java index b97377a4d87..1147ece73e3 100644 --- a/core/src/main/java/org/elasticsearch/action/search/type/TransportSearchCountAction.java +++ b/core/src/main/java/org/elasticsearch/action/search/type/TransportSearchCountAction.java @@ -75,7 +75,8 @@ public class TransportSearchCountAction extends TransportSearchTypeAction { @Override protected void moveToSecondPhase() throws Exception { // no need to sort, since we know we have no hits back - final InternalSearchResponse internalResponse = searchPhaseController.merge(SearchPhaseController.EMPTY_DOCS, firstResults, (AtomicArray) AtomicArray.empty()); + final InternalSearchResponse internalResponse = searchPhaseController.merge(SearchPhaseController.EMPTY_DOCS, firstResults, + (AtomicArray) AtomicArray.empty(), request); String scrollId = null; if (request.scroll() != null) { scrollId = buildScrollId(request.searchType(), firstResults, null); diff --git a/core/src/main/java/org/elasticsearch/action/search/type/TransportSearchDfsQueryAndFetchAction.java b/core/src/main/java/org/elasticsearch/action/search/type/TransportSearchDfsQueryAndFetchAction.java index 8868379b3bf..7244a1ff58a 100644 --- a/core/src/main/java/org/elasticsearch/action/search/type/TransportSearchDfsQueryAndFetchAction.java +++ b/core/src/main/java/org/elasticsearch/action/search/type/TransportSearchDfsQueryAndFetchAction.java @@ -134,7 +134,8 @@ public class TransportSearchDfsQueryAndFetchAction extends TransportSearchTypeAc @Override public void doRun() throws IOException { sortedShardList = searchPhaseController.sortDocs(true, queryFetchResults); - final InternalSearchResponse internalResponse = searchPhaseController.merge(sortedShardList, queryFetchResults, queryFetchResults); + final InternalSearchResponse internalResponse = searchPhaseController.merge(sortedShardList, queryFetchResults, + queryFetchResults, request); String scrollId = null; if (request.scroll() != null) { scrollId = TransportSearchHelper.buildScrollId(request.searchType(), firstResults, null); diff --git a/core/src/main/java/org/elasticsearch/action/search/type/TransportSearchDfsQueryThenFetchAction.java b/core/src/main/java/org/elasticsearch/action/search/type/TransportSearchDfsQueryThenFetchAction.java index de3032eb887..20bb205fef5 100644 --- a/core/src/main/java/org/elasticsearch/action/search/type/TransportSearchDfsQueryThenFetchAction.java +++ b/core/src/main/java/org/elasticsearch/action/search/type/TransportSearchDfsQueryThenFetchAction.java @@ -20,6 +20,7 @@ package org.elasticsearch.action.search.type; import com.carrotsearch.hppc.IntArrayList; + import org.apache.lucene.search.ScoreDoc; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRunnable; @@ -39,8 +40,8 @@ import org.elasticsearch.search.action.SearchServiceTransportAction; import org.elasticsearch.search.controller.SearchPhaseController; import org.elasticsearch.search.dfs.AggregatedDfs; import org.elasticsearch.search.dfs.DfsSearchResult; -import org.elasticsearch.search.fetch.ShardFetchSearchRequest; import org.elasticsearch.search.fetch.FetchSearchResult; +import org.elasticsearch.search.fetch.ShardFetchSearchRequest; import org.elasticsearch.search.internal.InternalSearchResponse; import org.elasticsearch.search.internal.ShardSearchTransportRequest; import org.elasticsearch.search.query.QuerySearchRequest; @@ -210,7 +211,8 @@ public class TransportSearchDfsQueryThenFetchAction extends TransportSearchTypeA threadPool.executor(ThreadPool.Names.SEARCH).execute(new ActionRunnable(listener) { @Override public void doRun() throws IOException { - final InternalSearchResponse internalResponse = searchPhaseController.merge(sortedShardList, queryResults, fetchResults); + final InternalSearchResponse internalResponse = searchPhaseController.merge(sortedShardList, queryResults, + fetchResults, request); String scrollId = null; if (request.scroll() != null) { scrollId = TransportSearchHelper.buildScrollId(request.searchType(), firstResults, null); diff --git a/core/src/main/java/org/elasticsearch/action/search/type/TransportSearchQueryAndFetchAction.java b/core/src/main/java/org/elasticsearch/action/search/type/TransportSearchQueryAndFetchAction.java index c37fdce7633..3c4f5419f00 100644 --- a/core/src/main/java/org/elasticsearch/action/search/type/TransportSearchQueryAndFetchAction.java +++ b/core/src/main/java/org/elasticsearch/action/search/type/TransportSearchQueryAndFetchAction.java @@ -81,7 +81,8 @@ public class TransportSearchQueryAndFetchAction extends TransportSearchTypeActio public void doRun() throws IOException { boolean useScroll = request.scroll() != null; sortedShardList = searchPhaseController.sortDocs(useScroll, firstResults); - final InternalSearchResponse internalResponse = searchPhaseController.merge(sortedShardList, firstResults, firstResults); + final InternalSearchResponse internalResponse = searchPhaseController.merge(sortedShardList, firstResults, + firstResults, request); String scrollId = null; if (request.scroll() != null) { scrollId = buildScrollId(request.searchType(), firstResults, null); diff --git a/core/src/main/java/org/elasticsearch/action/search/type/TransportSearchQueryThenFetchAction.java b/core/src/main/java/org/elasticsearch/action/search/type/TransportSearchQueryThenFetchAction.java index edd5cf63d22..c23e5b70c15 100644 --- a/core/src/main/java/org/elasticsearch/action/search/type/TransportSearchQueryThenFetchAction.java +++ b/core/src/main/java/org/elasticsearch/action/search/type/TransportSearchQueryThenFetchAction.java @@ -20,6 +20,7 @@ package org.elasticsearch.action.search.type; import com.carrotsearch.hppc.IntArrayList; + import org.apache.lucene.search.ScoreDoc; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRunnable; @@ -145,7 +146,8 @@ public class TransportSearchQueryThenFetchAction extends TransportSearchTypeActi threadPool.executor(ThreadPool.Names.SEARCH).execute(new ActionRunnable(listener) { @Override public void doRun() throws IOException { - final InternalSearchResponse internalResponse = searchPhaseController.merge(sortedShardList, firstResults, fetchResults); + final InternalSearchResponse internalResponse = searchPhaseController.merge(sortedShardList, firstResults, + fetchResults, request); String scrollId = null; if (request.scroll() != null) { scrollId = TransportSearchHelper.buildScrollId(request.searchType(), firstResults, null); diff --git a/core/src/main/java/org/elasticsearch/action/search/type/TransportSearchScanAction.java b/core/src/main/java/org/elasticsearch/action/search/type/TransportSearchScanAction.java index c5ea86763f4..cf2b4ee8df0 100644 --- a/core/src/main/java/org/elasticsearch/action/search/type/TransportSearchScanAction.java +++ b/core/src/main/java/org/elasticsearch/action/search/type/TransportSearchScanAction.java @@ -20,6 +20,7 @@ package org.elasticsearch.action.search.type; import com.google.common.collect.ImmutableMap; + import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchResponse; @@ -73,7 +74,8 @@ public class TransportSearchScanAction extends TransportSearchTypeAction { @Override protected void moveToSecondPhase() throws Exception { - final InternalSearchResponse internalResponse = searchPhaseController.merge(SearchPhaseController.EMPTY_DOCS, firstResults, (AtomicArray) AtomicArray.empty()); + final InternalSearchResponse internalResponse = searchPhaseController.merge(SearchPhaseController.EMPTY_DOCS, firstResults, + (AtomicArray) AtomicArray.empty(), request); String scrollId = null; if (request.scroll() != null) { scrollId = buildScrollId(request.searchType(), firstResults, ImmutableMap.of("total_hits", Long.toString(internalResponse.hits().totalHits()))); diff --git a/core/src/main/java/org/elasticsearch/action/search/type/TransportSearchScrollQueryAndFetchAction.java b/core/src/main/java/org/elasticsearch/action/search/type/TransportSearchScrollQueryAndFetchAction.java index bb2c82d8831..cd4238ccdea 100644 --- a/core/src/main/java/org/elasticsearch/action/search/type/TransportSearchScrollQueryAndFetchAction.java +++ b/core/src/main/java/org/elasticsearch/action/search/type/TransportSearchScrollQueryAndFetchAction.java @@ -21,7 +21,11 @@ package org.elasticsearch.action.search.type; import org.apache.lucene.search.ScoreDoc; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.search.*; +import org.elasticsearch.action.search.ReduceSearchPhaseException; +import org.elasticsearch.action.search.SearchPhaseExecutionException; +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.action.search.SearchScrollRequest; +import org.elasticsearch.action.search.ShardSearchFailure; import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; @@ -188,7 +192,8 @@ public class TransportSearchScrollQueryAndFetchAction extends AbstractComponent private void innerFinishHim() throws Exception { ScoreDoc[] sortedShardList = searchPhaseController.sortDocs(true, queryFetchResults); - final InternalSearchResponse internalResponse = searchPhaseController.merge(sortedShardList, queryFetchResults, queryFetchResults); + final InternalSearchResponse internalResponse = searchPhaseController.merge(sortedShardList, queryFetchResults, + queryFetchResults, request); String scrollId = null; if (request.scroll() != null) { scrollId = request.scrollId(); diff --git a/core/src/main/java/org/elasticsearch/action/search/type/TransportSearchScrollQueryThenFetchAction.java b/core/src/main/java/org/elasticsearch/action/search/type/TransportSearchScrollQueryThenFetchAction.java index 9c7742615c4..85b06ea7860 100644 --- a/core/src/main/java/org/elasticsearch/action/search/type/TransportSearchScrollQueryThenFetchAction.java +++ b/core/src/main/java/org/elasticsearch/action/search/type/TransportSearchScrollQueryThenFetchAction.java @@ -20,9 +20,14 @@ package org.elasticsearch.action.search.type; import com.carrotsearch.hppc.IntArrayList; + import org.apache.lucene.search.ScoreDoc; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.search.*; +import org.elasticsearch.action.search.ReduceSearchPhaseException; +import org.elasticsearch.action.search.SearchPhaseExecutionException; +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.action.search.SearchScrollRequest; +import org.elasticsearch.action.search.ShardSearchFailure; import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; @@ -239,7 +244,7 @@ public class TransportSearchScrollQueryThenFetchAction extends AbstractComponent } private void innerFinishHim() { - InternalSearchResponse internalResponse = searchPhaseController.merge(sortedShardList, queryResults, fetchResults); + InternalSearchResponse internalResponse = searchPhaseController.merge(sortedShardList, queryResults, fetchResults, request); String scrollId = null; if (request.scroll() != null) { scrollId = request.scrollId(); diff --git a/core/src/main/java/org/elasticsearch/action/search/type/TransportSearchScrollScanAction.java b/core/src/main/java/org/elasticsearch/action/search/type/TransportSearchScrollScanAction.java index 16ab26f7d46..2bc516b2bbe 100644 --- a/core/src/main/java/org/elasticsearch/action/search/type/TransportSearchScrollScanAction.java +++ b/core/src/main/java/org/elasticsearch/action/search/type/TransportSearchScrollScanAction.java @@ -212,7 +212,8 @@ public class TransportSearchScrollScanAction extends AbstractComponent { docs.add(scoreDoc); } } - final InternalSearchResponse internalResponse = searchPhaseController.merge(docs.toArray(new ScoreDoc[0]), queryFetchResults, queryFetchResults); + final InternalSearchResponse internalResponse = searchPhaseController.merge(docs.toArray(new ScoreDoc[0]), queryFetchResults, + queryFetchResults, request); ((InternalSearchHits) internalResponse.hits()).totalHits = Long.parseLong(this.scrollId.getAttributes().get("total_hits")); diff --git a/core/src/main/java/org/elasticsearch/action/suggest/TransportSuggestAction.java b/core/src/main/java/org/elasticsearch/action/suggest/TransportSuggestAction.java index c584c8856a5..43141f3e501 100644 --- a/core/src/main/java/org/elasticsearch/action/suggest/TransportSuggestAction.java +++ b/core/src/main/java/org/elasticsearch/action/suggest/TransportSuggestAction.java @@ -143,7 +143,7 @@ public class TransportSuggestAction extends TransportBroadcastAction extends ActionRequest implements IndicesRequest.Replaceable { +public class BroadcastRequest extends ActionRequest implements IndicesRequest.Replaceable { protected String[] indices; private IndicesOptions indicesOptions = IndicesOptions.strictExpandOpenAndForbidClosed(); - protected BroadcastRequest() { + public BroadcastRequest() { } diff --git a/core/src/main/java/org/elasticsearch/action/support/broadcast/BroadcastResponse.java b/core/src/main/java/org/elasticsearch/action/support/broadcast/BroadcastResponse.java index 560c7ec9869..54d6220fd34 100644 --- a/core/src/main/java/org/elasticsearch/action/support/broadcast/BroadcastResponse.java +++ b/core/src/main/java/org/elasticsearch/action/support/broadcast/BroadcastResponse.java @@ -32,17 +32,17 @@ import static org.elasticsearch.action.support.DefaultShardOperationFailedExcept /** * Base class for all broadcast operation based responses. */ -public abstract class BroadcastResponse extends ActionResponse { +public class BroadcastResponse extends ActionResponse { private static final ShardOperationFailedException[] EMPTY = new ShardOperationFailedException[0]; private int totalShards; private int successfulShards; private int failedShards; private ShardOperationFailedException[] shardFailures = EMPTY; - protected BroadcastResponse() { + public BroadcastResponse() { } - protected BroadcastResponse(int totalShards, int successfulShards, int failedShards, List shardFailures) { + public BroadcastResponse(int totalShards, int successfulShards, int failedShards, List shardFailures) { this.totalShards = totalShards; this.successfulShards = successfulShards; this.failedShards = failedShards; diff --git a/core/src/main/java/org/elasticsearch/action/support/replication/ReplicationRequest.java b/core/src/main/java/org/elasticsearch/action/support/replication/ReplicationRequest.java index 93907cf0aa6..37244c71efe 100644 --- a/core/src/main/java/org/elasticsearch/action/support/replication/ReplicationRequest.java +++ b/core/src/main/java/org/elasticsearch/action/support/replication/ReplicationRequest.java @@ -24,6 +24,7 @@ import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.IndicesRequest; import org.elasticsearch.action.WriteConsistencyLevel; import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.common.Nullable; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.unit.TimeValue; @@ -37,7 +38,7 @@ import static org.elasticsearch.action.ValidateActions.addValidationError; /** * */ -public abstract class ReplicationRequest extends ActionRequest implements IndicesRequest { +public class ReplicationRequest extends ActionRequest implements IndicesRequest { public static final TimeValue DEFAULT_TIMEOUT = new TimeValue(1, TimeUnit.MINUTES); @@ -49,14 +50,14 @@ public abstract class ReplicationRequest extends A private WriteConsistencyLevel consistencyLevel = WriteConsistencyLevel.DEFAULT; private volatile boolean canHaveDuplicates = false; - protected ReplicationRequest() { + public ReplicationRequest() { } /** * Creates a new request that inherits headers and context from the request provided as argument. */ - protected ReplicationRequest(ActionRequest request) { + public ReplicationRequest(ActionRequest request) { super(request); } @@ -133,6 +134,16 @@ public abstract class ReplicationRequest extends A return this.consistencyLevel; } + /** + * @return the shardId of the shard where this operation should be executed on. + * can be null in case the shardId is determined by a single document (index, type, id) for example for index or delete request. + */ + public + @Nullable + ShardId shardId() { + return internalShardId; + } + /** * Sets the consistency level of write. Defaults to {@link org.elasticsearch.action.WriteConsistencyLevel#DEFAULT} */ @@ -173,4 +184,10 @@ public abstract class ReplicationRequest extends A out.writeString(index); out.writeBoolean(canHaveDuplicates); } + + public T setShardId(ShardId shardId) { + this.internalShardId = shardId; + this.index = shardId.getIndex(); + return (T) this; + } } diff --git a/core/src/main/java/org/elasticsearch/action/support/replication/TransportBroadcastReplicationAction.java b/core/src/main/java/org/elasticsearch/action/support/replication/TransportBroadcastReplicationAction.java new file mode 100644 index 00000000000..42a83630a53 --- /dev/null +++ b/core/src/main/java/org/elasticsearch/action/support/replication/TransportBroadcastReplicationAction.java @@ -0,0 +1,162 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.support.replication; + +import com.carrotsearch.hppc.cursors.IntObjectCursor; +import org.elasticsearch.ExceptionsHelper; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionWriteResponse; +import org.elasticsearch.action.ShardOperationFailedException; +import org.elasticsearch.action.UnavailableShardsException; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.DefaultShardOperationFailedException; +import org.elasticsearch.action.support.HandledTransportAction; +import org.elasticsearch.action.support.broadcast.BroadcastRequest; +import org.elasticsearch.action.support.broadcast.BroadcastResponse; +import org.elasticsearch.action.support.broadcast.BroadcastShardOperationFailedException; +import org.elasticsearch.cluster.ClusterService; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.routing.IndexShardRoutingTable; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.CountDown; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.concurrent.CopyOnWriteArrayList; + +/** + * Base class for requests that should be executed on all shards of an index or several indices. + * This action sends shard requests to all primary shards of the indices and they are then replicated like write requests + */ +public abstract class TransportBroadcastReplicationAction extends HandledTransportAction { + + private final TransportReplicationAction replicatedBroadcastShardAction; + private final ClusterService clusterService; + + public TransportBroadcastReplicationAction(String name, Class request, Settings settings, ThreadPool threadPool, ClusterService clusterService, + TransportService transportService, + ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, TransportReplicationAction replicatedBroadcastShardAction) { + super(settings, name, threadPool, transportService, actionFilters, indexNameExpressionResolver, request); + this.replicatedBroadcastShardAction = replicatedBroadcastShardAction; + this.clusterService = clusterService; + } + + @Override + protected void doExecute(final Request request, final ActionListener listener) { + final ClusterState clusterState = clusterService.state(); + List shards = shards(request, clusterState); + final CopyOnWriteArrayList shardsResponses = new CopyOnWriteArrayList(); + if (shards.size() == 0) { + finishAndNotifyListener(listener, shardsResponses); + } + final CountDown responsesCountDown = new CountDown(shards.size()); + for (final ShardId shardId : shards) { + ActionListener shardActionListener = new ActionListener() { + @Override + public void onResponse(ShardResponse shardResponse) { + shardsResponses.add(shardResponse); + logger.trace("{}: got response from {}", actionName, shardId); + if (responsesCountDown.countDown()) { + finishAndNotifyListener(listener, shardsResponses); + } + } + + @Override + public void onFailure(Throwable e) { + logger.trace("{}: got failure from {}", actionName, shardId); + int totalNumCopies = clusterState.getMetaData().index(shardId.index().getName()).getNumberOfReplicas() + 1; + ShardResponse shardResponse = newShardResponse(); + ActionWriteResponse.ShardInfo.Failure[] failures; + if (ExceptionsHelper.unwrap(e, UnavailableShardsException.class) != null) { + failures = new ActionWriteResponse.ShardInfo.Failure[0]; + } else { + ActionWriteResponse.ShardInfo.Failure failure = new ActionWriteResponse.ShardInfo.Failure(shardId.index().name(), shardId.id(), null, e, ExceptionsHelper.status(e), true); + failures = new ActionWriteResponse.ShardInfo.Failure[totalNumCopies]; + Arrays.fill(failures, failure); + } + shardResponse.setShardInfo(new ActionWriteResponse.ShardInfo(totalNumCopies, 0, failures)); + shardsResponses.add(shardResponse); + if (responsesCountDown.countDown()) { + finishAndNotifyListener(listener, shardsResponses); + } + } + }; + shardExecute(request, shardId, shardActionListener); + } + } + + protected void shardExecute(Request request, ShardId shardId, ActionListener shardActionListener) { + replicatedBroadcastShardAction.execute(newShardRequest(request, shardId), shardActionListener); + } + + /** + * @return all shard ids the request should run on + */ + protected List shards(Request request, ClusterState clusterState) { + List shardIds = new ArrayList<>(); + String[] concreteIndices = indexNameExpressionResolver.concreteIndices(clusterState, request); + for (String index : concreteIndices) { + IndexMetaData indexMetaData = clusterState.metaData().getIndices().get(index); + if (indexMetaData != null) { + for (IntObjectCursor shardRouting : clusterState.getRoutingTable().indicesRouting().get(index).getShards()) { + shardIds.add(shardRouting.value.shardId()); + } + } + } + return shardIds; + } + + protected abstract ShardResponse newShardResponse(); + + protected abstract ShardRequest newShardRequest(Request request, ShardId shardId); + + private void finishAndNotifyListener(ActionListener listener, CopyOnWriteArrayList shardsResponses) { + logger.trace("{}: got all shard responses", actionName); + int successfulShards = 0; + int failedShards = 0; + int totalNumCopies = 0; + List shardFailures = null; + for (int i = 0; i < shardsResponses.size(); i++) { + ActionWriteResponse shardResponse = shardsResponses.get(i); + if (shardResponse == null) { + // non active shard, ignore + } else { + failedShards += shardResponse.getShardInfo().getFailed(); + successfulShards += shardResponse.getShardInfo().getSuccessful(); + totalNumCopies += shardResponse.getShardInfo().getTotal(); + if (shardFailures == null) { + shardFailures = new ArrayList<>(); + } + for (ActionWriteResponse.ShardInfo.Failure failure : shardResponse.getShardInfo().getFailures()) { + shardFailures.add(new DefaultShardOperationFailedException(new BroadcastShardOperationFailedException(new ShardId(failure.index(), failure.shardId()), failure.getCause()))); + } + } + } + listener.onResponse(newResponse(successfulShards, failedShards, totalNumCopies, shardFailures)); + } + + protected abstract BroadcastResponse newResponse(int successfulShards, int failedShards, int totalNumCopies, List shardFailures); +} diff --git a/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java b/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java index 155c30756ca..608575007f4 100644 --- a/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java +++ b/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java @@ -142,7 +142,9 @@ public abstract class TransportReplicationActionof()); + return new PlainShardIterator(shardIterator.shardId(), Collections.emptyList()); } @Override diff --git a/core/src/main/java/org/elasticsearch/action/update/UpdateHelper.java b/core/src/main/java/org/elasticsearch/action/update/UpdateHelper.java index fba2f23852e..11c79676106 100644 --- a/core/src/main/java/org/elasticsearch/action/update/UpdateHelper.java +++ b/core/src/main/java/org/elasticsearch/action/update/UpdateHelper.java @@ -246,7 +246,7 @@ public class UpdateHelper extends AbstractComponent { private Map executeScript(UpdateRequest request, Map ctx) { try { if (scriptService != null) { - ExecutableScript script = scriptService.executable(request.script, ScriptContext.Standard.UPDATE); + ExecutableScript script = scriptService.executable(request.script, ScriptContext.Standard.UPDATE, request); script.setNextVar("ctx", ctx); script.run(); // we need to unwrap the ctx... diff --git a/core/src/main/java/org/elasticsearch/bootstrap/ESPolicy.java b/core/src/main/java/org/elasticsearch/bootstrap/ESPolicy.java index 08f8ba4cfe6..4f204966875 100644 --- a/core/src/main/java/org/elasticsearch/bootstrap/ESPolicy.java +++ b/core/src/main/java/org/elasticsearch/bootstrap/ESPolicy.java @@ -22,6 +22,8 @@ package org.elasticsearch.bootstrap; import org.elasticsearch.common.SuppressForbidden; import java.net.URI; +import java.net.URL; +import java.security.CodeSource; import java.security.Permission; import java.security.PermissionCollection; import java.security.Policy; @@ -44,11 +46,22 @@ final class ESPolicy extends Policy { } @Override @SuppressForbidden(reason = "fast equals check is desired") - public boolean implies(ProtectionDomain domain, Permission permission) { - // run groovy scripts with no permissions - if ("/groovy/script".equals(domain.getCodeSource().getLocation().getFile())) { - return false; + public boolean implies(ProtectionDomain domain, Permission permission) { + CodeSource codeSource = domain.getCodeSource(); + // codesource can be null when reducing privileges via doPrivileged() + if (codeSource != null) { + URL location = codeSource.getLocation(); + // location can be null... ??? nobody knows + // https://bugs.openjdk.java.net/browse/JDK-8129972 + if (location != null) { + // run groovy scripts with no permissions + if ("/groovy/script".equals(location.getFile())) { + return false; + } + } } + + // otherwise defer to template + dynamic file permissions return template.implies(domain, permission) || dynamic.implies(permission); } } diff --git a/core/src/main/java/org/elasticsearch/bootstrap/JNAKernel32Library.java b/core/src/main/java/org/elasticsearch/bootstrap/JNAKernel32Library.java index 4dcbd6c752e..2a129f07261 100644 --- a/core/src/main/java/org/elasticsearch/bootstrap/JNAKernel32Library.java +++ b/core/src/main/java/org/elasticsearch/bootstrap/JNAKernel32Library.java @@ -19,7 +19,6 @@ package org.elasticsearch.bootstrap; -import com.google.common.collect.ImmutableList; import com.sun.jna.*; import com.sun.jna.win32.StdCallLibrary; @@ -29,6 +28,7 @@ import org.elasticsearch.common.logging.Loggers; import java.util.ArrayList; import java.util.Arrays; +import java.util.Collections; import java.util.List; @@ -85,8 +85,8 @@ final class JNAKernel32Library { return result; } - ImmutableList getCallbacks() { - return ImmutableList.builder().addAll(callbacks).build(); + List getCallbacks() { + return Collections.unmodifiableList(callbacks); } /** diff --git a/core/src/main/java/org/elasticsearch/client/transport/TransportClientNodesService.java b/core/src/main/java/org/elasticsearch/client/transport/TransportClientNodesService.java index 8e93ded7b61..c60d6895d0f 100644 --- a/core/src/main/java/org/elasticsearch/client/transport/TransportClientNodesService.java +++ b/core/src/main/java/org/elasticsearch/client/transport/TransportClientNodesService.java @@ -20,7 +20,6 @@ package org.elasticsearch.client.transport; import com.carrotsearch.hppc.cursors.ObjectCursor; -import com.google.common.collect.ImmutableList; import com.google.common.collect.Sets; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.Version; @@ -50,6 +49,7 @@ import org.elasticsearch.transport.TransportRequestOptions; import org.elasticsearch.transport.TransportService; import java.util.ArrayList; +import java.util.Collections; import java.util.HashSet; import java.util.Iterator; import java.util.List; @@ -83,12 +83,12 @@ public class TransportClientNodesService extends AbstractComponent { private final Headers headers; // nodes that are added to be discovered - private volatile ImmutableList listedNodes = ImmutableList.of(); + private volatile List listedNodes = Collections.emptyList(); private final Object mutex = new Object(); - private volatile List nodes = ImmutableList.of(); - private volatile List filteredNodes = ImmutableList.of(); + private volatile List nodes = Collections.emptyList(); + private volatile List filteredNodes = Collections.emptyList(); private final AtomicInteger tempNodeIdGenerator = new AtomicInteger(); @@ -129,11 +129,11 @@ public class TransportClientNodesService extends AbstractComponent { } public List transportAddresses() { - ImmutableList.Builder lstBuilder = ImmutableList.builder(); + List lstBuilder = new ArrayList<>(); for (DiscoveryNode listedNode : listedNodes) { lstBuilder.add(listedNode.address()); } - return lstBuilder.build(); + return Collections.unmodifiableList(lstBuilder); } public List connectedNodes() { @@ -170,14 +170,14 @@ public class TransportClientNodesService extends AbstractComponent { if (filtered.isEmpty()) { return this; } - ImmutableList.Builder builder = ImmutableList.builder(); + List builder = new ArrayList<>(); builder.addAll(listedNodes()); for (TransportAddress transportAddress : filtered) { DiscoveryNode node = new DiscoveryNode("#transport#-" + tempNodeIdGenerator.incrementAndGet(), transportAddress, minCompatibilityVersion); logger.debug("adding address [{}]", node); builder.add(node); } - listedNodes = builder.build(); + listedNodes = Collections.unmodifiableList(builder); nodesSampler.sample(); } return this; @@ -188,7 +188,7 @@ public class TransportClientNodesService extends AbstractComponent { if (closed) { throw new IllegalStateException("transport client is closed, can't remove an address"); } - ImmutableList.Builder builder = ImmutableList.builder(); + List builder = new ArrayList<>(); for (DiscoveryNode otherNode : listedNodes) { if (!otherNode.address().equals(transportAddress)) { builder.add(otherNode); @@ -196,7 +196,7 @@ public class TransportClientNodesService extends AbstractComponent { logger.debug("removing address [{}]", otherNode); } } - listedNodes = builder.build(); + listedNodes = Collections.unmodifiableList(builder); nodesSampler.sample(); } return this; @@ -271,7 +271,7 @@ public class TransportClientNodesService extends AbstractComponent { for (DiscoveryNode listedNode : listedNodes) { transportService.disconnectFromNode(listedNode); } - nodes = ImmutableList.of(); + nodes = Collections.emptyList(); } } @@ -321,7 +321,7 @@ public class TransportClientNodesService extends AbstractComponent { } } - return new ImmutableList.Builder().addAll(nodes).build(); + return Collections.unmodifiableList(new ArrayList<>(nodes)); } } @@ -386,7 +386,7 @@ public class TransportClientNodesService extends AbstractComponent { } nodes = validateNewNodes(newNodes); - filteredNodes = ImmutableList.copyOf(newFilteredNodes); + filteredNodes = Collections.unmodifiableList(new ArrayList<>(newFilteredNodes)); } } @@ -486,7 +486,7 @@ public class TransportClientNodesService extends AbstractComponent { } nodes = validateNewNodes(newNodes); - filteredNodes = ImmutableList.copyOf(newFilteredNodes); + filteredNodes = Collections.unmodifiableList(new ArrayList<>(newFilteredNodes)); } } diff --git a/core/src/main/java/org/elasticsearch/cluster/ClusterChangedEvent.java b/core/src/main/java/org/elasticsearch/cluster/ClusterChangedEvent.java index 209781d78f5..f55452b5805 100644 --- a/core/src/main/java/org/elasticsearch/cluster/ClusterChangedEvent.java +++ b/core/src/main/java/org/elasticsearch/cluster/ClusterChangedEvent.java @@ -20,13 +20,13 @@ package org.elasticsearch.cluster; import com.carrotsearch.hppc.cursors.ObjectCursor; -import com.google.common.collect.ImmutableList; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.node.DiscoveryNodes; import java.util.ArrayList; import java.util.Arrays; +import java.util.Collections; import java.util.List; /** @@ -86,7 +86,7 @@ public class ClusterChangedEvent { return Arrays.asList(state.metaData().indices().keys().toArray(String.class)); } if (!metaDataChanged()) { - return ImmutableList.of(); + return Collections.emptyList(); } List created = null; for (ObjectCursor cursor : state.metaData().indices().keys()) { @@ -98,7 +98,7 @@ public class ClusterChangedEvent { created.add(index); } } - return created == null ? ImmutableList.of() : created; + return created == null ? Collections.emptyList() : created; } /** @@ -116,10 +116,10 @@ public class ClusterChangedEvent { // See discussion on https://github.com/elastic/elasticsearch/pull/9952 and // https://github.com/elastic/elasticsearch/issues/11665 if (hasNewMaster() || previousState == null) { - return ImmutableList.of(); + return Collections.emptyList(); } if (!metaDataChanged()) { - return ImmutableList.of(); + return Collections.emptyList(); } List deleted = null; for (ObjectCursor cursor : previousState.metaData().indices().keys()) { @@ -131,7 +131,7 @@ public class ClusterChangedEvent { deleted.add(index); } } - return deleted == null ? ImmutableList.of() : deleted; + return deleted == null ? Collections.emptyList() : deleted; } public boolean metaDataChanged() { diff --git a/core/src/main/java/org/elasticsearch/cluster/RestoreInProgress.java b/core/src/main/java/org/elasticsearch/cluster/RestoreInProgress.java index 857f1a34d3f..eabe615d587 100644 --- a/core/src/main/java/org/elasticsearch/cluster/RestoreInProgress.java +++ b/core/src/main/java/org/elasticsearch/cluster/RestoreInProgress.java @@ -19,7 +19,6 @@ package org.elasticsearch.cluster; -import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; import org.elasticsearch.cluster.ClusterState.Custom; import org.elasticsearch.cluster.metadata.SnapshotId; @@ -30,6 +29,9 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.index.shard.ShardId; import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; import java.util.List; import java.util.Map; @@ -42,14 +44,14 @@ public class RestoreInProgress extends AbstractDiffable implements Custo public static final RestoreInProgress PROTO = new RestoreInProgress(); - private final ImmutableList entries; + private final List entries; /** * Constructs new restore metadata * * @param entries list of currently running restore processes */ - public RestoreInProgress(ImmutableList entries) { + public RestoreInProgress(List entries) { this.entries = entries; } @@ -59,7 +61,7 @@ public class RestoreInProgress extends AbstractDiffable implements Custo * @param entries list of currently running restore processes */ public RestoreInProgress(Entry... entries) { - this.entries = ImmutableList.copyOf(entries); + this.entries = Arrays.asList(entries); } /** @@ -111,7 +113,7 @@ public class RestoreInProgress extends AbstractDiffable implements Custo private final State state; private final SnapshotId snapshotId; private final ImmutableMap shards; - private final ImmutableList indices; + private final List indices; /** * Creates new restore metadata @@ -121,7 +123,7 @@ public class RestoreInProgress extends AbstractDiffable implements Custo * @param indices list of indices being restored * @param shards list of shards being restored and thier current restore status */ - public Entry(SnapshotId snapshotId, State state, ImmutableList indices, ImmutableMap shards) { + public Entry(SnapshotId snapshotId, State state, List indices, ImmutableMap shards) { this.snapshotId = snapshotId; this.state = state; this.indices = indices; @@ -164,7 +166,7 @@ public class RestoreInProgress extends AbstractDiffable implements Custo * * @return list of indices */ - public ImmutableList indices() { + public List indices() { return indices; } @@ -413,7 +415,7 @@ public class RestoreInProgress extends AbstractDiffable implements Custo SnapshotId snapshotId = SnapshotId.readSnapshotId(in); State state = State.fromValue(in.readByte()); int indices = in.readVInt(); - ImmutableList.Builder indexBuilder = ImmutableList.builder(); + List indexBuilder = new ArrayList<>(); for (int j = 0; j < indices; j++) { indexBuilder.add(in.readString()); } @@ -424,7 +426,7 @@ public class RestoreInProgress extends AbstractDiffable implements Custo ShardRestoreStatus shardState = ShardRestoreStatus.readShardRestoreStatus(in); builder.put(shardId, shardState); } - entries[i] = new Entry(snapshotId, state, indexBuilder.build(), builder.build()); + entries[i] = new Entry(snapshotId, state, Collections.unmodifiableList(indexBuilder), builder.build()); } return new RestoreInProgress(entries); } diff --git a/core/src/main/java/org/elasticsearch/cluster/SnapshotsInProgress.java b/core/src/main/java/org/elasticsearch/cluster/SnapshotsInProgress.java index a315e682c3b..a6babbbd9fc 100644 --- a/core/src/main/java/org/elasticsearch/cluster/SnapshotsInProgress.java +++ b/core/src/main/java/org/elasticsearch/cluster/SnapshotsInProgress.java @@ -19,7 +19,6 @@ package org.elasticsearch.cluster; -import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; import org.elasticsearch.cluster.ClusterState.Custom; import org.elasticsearch.cluster.metadata.SnapshotId; @@ -31,7 +30,10 @@ import org.elasticsearch.common.xcontent.XContentBuilderString; import org.elasticsearch.index.shard.ShardId; import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; import java.util.Collection; +import java.util.Collections; import java.util.List; import java.util.Map; @@ -67,11 +69,11 @@ public class SnapshotsInProgress extends AbstractDiffable implements Cus private final SnapshotId snapshotId; private final boolean includeGlobalState; private final ImmutableMap shards; - private final ImmutableList indices; - private final ImmutableMap> waitingIndices; + private final List indices; + private final ImmutableMap> waitingIndices; private final long startTime; - public Entry(SnapshotId snapshotId, boolean includeGlobalState, State state, ImmutableList indices, long startTime, ImmutableMap shards) { + public Entry(SnapshotId snapshotId, boolean includeGlobalState, State state, List indices, long startTime, ImmutableMap shards) { this.state = state; this.snapshotId = snapshotId; this.includeGlobalState = includeGlobalState; @@ -106,11 +108,11 @@ public class SnapshotsInProgress extends AbstractDiffable implements Cus return state; } - public ImmutableList indices() { + public List indices() { return indices; } - public ImmutableMap> waitingIndices() { + public ImmutableMap> waitingIndices() { return waitingIndices; } @@ -152,22 +154,22 @@ public class SnapshotsInProgress extends AbstractDiffable implements Cus return result; } - private ImmutableMap> findWaitingIndices(ImmutableMap shards) { - Map> waitingIndicesMap = newHashMap(); + private ImmutableMap> findWaitingIndices(ImmutableMap shards) { + Map> waitingIndicesMap = newHashMap(); for (ImmutableMap.Entry entry : shards.entrySet()) { if (entry.getValue().state() == State.WAITING) { - ImmutableList.Builder waitingShards = waitingIndicesMap.get(entry.getKey().getIndex()); + List waitingShards = waitingIndicesMap.get(entry.getKey().getIndex()); if (waitingShards == null) { - waitingShards = ImmutableList.builder(); + waitingShards = new ArrayList<>(); waitingIndicesMap.put(entry.getKey().getIndex(), waitingShards); } waitingShards.add(entry.getKey()); } } if (!waitingIndicesMap.isEmpty()) { - ImmutableMap.Builder> waitingIndicesBuilder = ImmutableMap.builder(); - for (Map.Entry> entry : waitingIndicesMap.entrySet()) { - waitingIndicesBuilder.put(entry.getKey(), entry.getValue().build()); + ImmutableMap.Builder> waitingIndicesBuilder = ImmutableMap.builder(); + for (Map.Entry> entry : waitingIndicesMap.entrySet()) { + waitingIndicesBuilder.put(entry.getKey(), Collections.unmodifiableList(entry.getValue())); } return waitingIndicesBuilder.build(); } else { @@ -324,15 +326,15 @@ public class SnapshotsInProgress extends AbstractDiffable implements Cus } } - private final ImmutableList entries; + private final List entries; - public SnapshotsInProgress(ImmutableList entries) { + public SnapshotsInProgress(List entries) { this.entries = entries; } public SnapshotsInProgress(Entry... entries) { - this.entries = ImmutableList.copyOf(entries); + this.entries = Arrays.asList(entries); } public List entries() { @@ -361,7 +363,7 @@ public class SnapshotsInProgress extends AbstractDiffable implements Cus boolean includeGlobalState = in.readBoolean(); State state = State.fromValue(in.readByte()); int indices = in.readVInt(); - ImmutableList.Builder indexBuilder = ImmutableList.builder(); + List indexBuilder = new ArrayList<>(); for (int j = 0; j < indices; j++) { indexBuilder.add(in.readString()); } @@ -374,7 +376,7 @@ public class SnapshotsInProgress extends AbstractDiffable implements Cus State shardState = State.fromValue(in.readByte()); builder.put(shardId, new ShardSnapshotStatus(nodeId, shardState)); } - entries[i] = new Entry(snapshotId, includeGlobalState, state, indexBuilder.build(), startTime, builder.build()); + entries[i] = new Entry(snapshotId, includeGlobalState, state, Collections.unmodifiableList(indexBuilder), startTime, builder.build()); } return new SnapshotsInProgress(entries); } diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolver.java b/core/src/main/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolver.java index eb5b716017a..723d0e9a1ad 100644 --- a/core/src/main/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolver.java +++ b/core/src/main/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolver.java @@ -20,7 +20,6 @@ package org.elasticsearch.cluster.metadata; import com.google.common.base.Predicate; -import com.google.common.collect.ImmutableList; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.action.IndicesRequest; import org.elasticsearch.action.support.IndicesOptions; @@ -57,13 +56,13 @@ import static com.google.common.collect.Maps.newHashMap; public class IndexNameExpressionResolver extends AbstractComponent { - private final ImmutableList expressionResolvers; + private final List expressionResolvers; private final DateMathExpressionResolver dateMathExpressionResolver; @Inject public IndexNameExpressionResolver(Settings settings) { super(settings); - expressionResolvers = ImmutableList.of( + expressionResolvers = Arrays.asList( dateMathExpressionResolver = new DateMathExpressionResolver(settings), new WildcardExpressionResolver() ); diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java index 001bf424a2b..ef4b451de80 100644 --- a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java +++ b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java @@ -24,7 +24,6 @@ import com.carrotsearch.hppc.cursors.ObjectCursor; import com.carrotsearch.hppc.cursors.ObjectObjectCursor; import com.google.common.base.Predicate; import com.google.common.collect.Collections2; -import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableSet; import com.google.common.collect.UnmodifiableIterator; import org.apache.lucene.util.CollectionUtil; @@ -253,7 +252,7 @@ public class MetaData implements Iterable, Diffable, Fr * @param concreteIndices The concrete indexes the index aliases must point to order to be returned. * @return the found index aliases grouped by index */ - public ImmutableOpenMap> findAliases(final String[] aliases, String[] concreteIndices) { + public ImmutableOpenMap> findAliases(final String[] aliases, String[] concreteIndices) { assert aliases != null; assert concreteIndices != null; if (concreteIndices.length == 0) { @@ -261,7 +260,7 @@ public class MetaData implements Iterable, Diffable, Fr } boolean matchAllAliases = matchAllAliases(aliases); - ImmutableOpenMap.Builder> mapBuilder = ImmutableOpenMap.builder(); + ImmutableOpenMap.Builder> mapBuilder = ImmutableOpenMap.builder(); Iterable intersection = HppcMaps.intersection(ObjectHashSet.from(concreteIndices), indices.keys()); for (String index : intersection) { IndexMetaData indexMetaData = indices.get(index); @@ -281,7 +280,7 @@ public class MetaData implements Iterable, Diffable, Fr return o1.alias().compareTo(o2.alias()); } }); - mapBuilder.put(index, ImmutableList.copyOf(filteredValues)); + mapBuilder.put(index, Collections.unmodifiableList(filteredValues)); } } return mapBuilder.build(); @@ -364,7 +363,7 @@ public class MetaData implements Iterable, Diffable, Fr return indexMapBuilder.build(); } - public ImmutableOpenMap> findWarmers(String[] concreteIndices, final String[] types, final String[] uncheckedWarmers) { + public ImmutableOpenMap> findWarmers(String[] concreteIndices, final String[] types, final String[] uncheckedWarmers) { assert uncheckedWarmers != null; assert concreteIndices != null; if (concreteIndices.length == 0) { @@ -373,7 +372,7 @@ public class MetaData implements Iterable, Diffable, Fr // special _all check to behave the same like not specifying anything for the warmers (not for the indices) final String[] warmers = Strings.isAllOrWildcard(uncheckedWarmers) ? Strings.EMPTY_ARRAY : uncheckedWarmers; - ImmutableOpenMap.Builder> mapBuilder = ImmutableOpenMap.builder(); + ImmutableOpenMap.Builder> mapBuilder = ImmutableOpenMap.builder(); Iterable intersection = HppcMaps.intersection(ObjectHashSet.from(concreteIndices), indices.keys()); for (String index : intersection) { IndexMetaData indexMetaData = indices.get(index); @@ -382,6 +381,7 @@ public class MetaData implements Iterable, Diffable, Fr continue; } + // TODO: make this a List so we don't have to copy below Collection filteredWarmers = Collections2.filter(indexWarmersMetaData.entries(), new Predicate() { @Override @@ -399,7 +399,7 @@ public class MetaData implements Iterable, Diffable, Fr }); if (!filteredWarmers.isEmpty()) { - mapBuilder.put(index, ImmutableList.copyOf(filteredWarmers)); + mapBuilder.put(index, Collections.unmodifiableList(new ArrayList<>(filteredWarmers))); } } return mapBuilder.build(); diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/RepositoriesMetaData.java b/core/src/main/java/org/elasticsearch/cluster/metadata/RepositoriesMetaData.java index 48e40d1a54f..23a4c32017d 100644 --- a/core/src/main/java/org/elasticsearch/cluster/metadata/RepositoriesMetaData.java +++ b/core/src/main/java/org/elasticsearch/cluster/metadata/RepositoriesMetaData.java @@ -19,7 +19,6 @@ package org.elasticsearch.cluster.metadata; -import com.google.common.collect.ImmutableList; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.cluster.AbstractDiffable; import org.elasticsearch.cluster.metadata.MetaData.Custom; @@ -33,9 +32,9 @@ import org.elasticsearch.common.xcontent.XContentParser; import java.io.IOException; import java.util.ArrayList; +import java.util.Arrays; import java.util.EnumSet; import java.util.List; -import java.util.Map; /** * Contains metadata about registered snapshot repositories @@ -46,7 +45,7 @@ public class RepositoriesMetaData extends AbstractDiffable implements Me public static final RepositoriesMetaData PROTO = new RepositoriesMetaData(); - private final ImmutableList repositories; + private final List repositories; /** * Constructs new repository metadata @@ -54,7 +53,7 @@ public class RepositoriesMetaData extends AbstractDiffable implements Me * @param repositories list of repositories */ public RepositoriesMetaData(RepositoryMetaData... repositories) { - this.repositories = ImmutableList.copyOf(repositories); + this.repositories = Arrays.asList(repositories); } /** @@ -62,7 +61,7 @@ public class RepositoriesMetaData extends AbstractDiffable implements Me * * @return list of repositories */ - public ImmutableList repositories() { + public List repositories() { return this.repositories; } diff --git a/core/src/main/java/org/elasticsearch/cluster/node/DiscoveryNode.java b/core/src/main/java/org/elasticsearch/cluster/node/DiscoveryNode.java index 4d8229e514c..ca1ccbd9549 100644 --- a/core/src/main/java/org/elasticsearch/cluster/node/DiscoveryNode.java +++ b/core/src/main/java/org/elasticsearch/cluster/node/DiscoveryNode.java @@ -19,7 +19,6 @@ package org.elasticsearch.cluster.node; -import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; import org.elasticsearch.Version; @@ -35,6 +34,8 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import java.io.IOException; import java.net.InetAddress; +import java.util.Collections; +import java.util.List; import java.util.Map; import static org.elasticsearch.common.transport.TransportAddressSerializers.addressToStream; @@ -92,7 +93,7 @@ public class DiscoveryNode implements Streamable, ToXContent { return Booleans.isExplicitTrue(data); } - public static final ImmutableList EMPTY_LIST = ImmutableList.of(); + public static final List EMPTY_LIST = Collections.emptyList(); private String nodeName = ""; private String nodeId; diff --git a/core/src/main/java/org/elasticsearch/cluster/node/DiscoveryNodes.java b/core/src/main/java/org/elasticsearch/cluster/node/DiscoveryNodes.java index eebd770707f..847173ec1a7 100644 --- a/core/src/main/java/org/elasticsearch/cluster/node/DiscoveryNodes.java +++ b/core/src/main/java/org/elasticsearch/cluster/node/DiscoveryNodes.java @@ -22,7 +22,6 @@ package org.elasticsearch.cluster.node; import com.carrotsearch.hppc.ObjectHashSet; import com.carrotsearch.hppc.cursors.ObjectCursor; import com.carrotsearch.hppc.cursors.ObjectObjectCursor; -import com.google.common.collect.ImmutableList; import com.google.common.collect.UnmodifiableIterator; import org.elasticsearch.Version; import org.elasticsearch.cluster.AbstractDiffable; @@ -36,6 +35,7 @@ import org.elasticsearch.common.transport.TransportAddress; import java.io.IOException; import java.util.ArrayList; +import java.util.Collections; import java.util.List; import java.util.Map; import java.util.Set; @@ -433,7 +433,7 @@ public class DiscoveryNodes extends AbstractDiffable implements newMasterNode = masterNode(); } } - return new Delta(previousMasterNode, newMasterNode, localNodeId, ImmutableList.copyOf(removed), ImmutableList.copyOf(added)); + return new Delta(previousMasterNode, newMasterNode, localNodeId, Collections.unmodifiableList(removed), Collections.unmodifiableList(added)); } @Override @@ -472,14 +472,14 @@ public class DiscoveryNodes extends AbstractDiffable implements private final String localNodeId; private final DiscoveryNode previousMasterNode; private final DiscoveryNode newMasterNode; - private final ImmutableList removed; - private final ImmutableList added; + private final List removed; + private final List added; - public Delta(String localNodeId, ImmutableList removed, ImmutableList added) { + public Delta(String localNodeId, List removed, List added) { this(null, null, localNodeId, removed, added); } - public Delta(@Nullable DiscoveryNode previousMasterNode, @Nullable DiscoveryNode newMasterNode, String localNodeId, ImmutableList removed, ImmutableList added) { + public Delta(@Nullable DiscoveryNode previousMasterNode, @Nullable DiscoveryNode newMasterNode, String localNodeId, List removed, List added) { this.previousMasterNode = previousMasterNode; this.newMasterNode = newMasterNode; this.localNodeId = localNodeId; diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/IndexRoutingTable.java b/core/src/main/java/org/elasticsearch/cluster/routing/IndexRoutingTable.java index e8311b5e294..2987cdf1855 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/IndexRoutingTable.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/IndexRoutingTable.java @@ -22,7 +22,6 @@ package org.elasticsearch.cluster.routing; import com.carrotsearch.hppc.IntSet; import com.carrotsearch.hppc.cursors.IntCursor; import com.carrotsearch.hppc.cursors.IntObjectCursor; -import com.google.common.collect.ImmutableList; import com.google.common.collect.Sets; import com.google.common.collect.UnmodifiableIterator; import org.apache.lucene.util.CollectionUtil; @@ -36,6 +35,7 @@ import org.elasticsearch.index.shard.ShardId; import java.io.IOException; import java.util.ArrayList; +import java.util.Collections; import java.util.Comparator; import java.util.List; import java.util.Set; @@ -73,7 +73,7 @@ public class IndexRoutingTable extends AbstractDiffable imple this.index = index; this.shuffler = new RotationShardShuffler(ThreadLocalRandom.current().nextInt()); this.shards = shards; - ImmutableList.Builder allActiveShards = ImmutableList.builder(); + List allActiveShards = new ArrayList<>(); for (IntObjectCursor cursor : shards) { for (ShardRouting shardRouting : cursor.value) { shardRouting.freeze(); @@ -82,7 +82,7 @@ public class IndexRoutingTable extends AbstractDiffable imple } } } - this.allActiveShards = allActiveShards.build(); + this.allActiveShards = Collections.unmodifiableList(allActiveShards); } /** diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/IndexShardRoutingTable.java b/core/src/main/java/org/elasticsearch/cluster/routing/IndexShardRoutingTable.java index 5cc35aeacef..1f9a3461297 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/IndexShardRoutingTable.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/IndexShardRoutingTable.java @@ -19,7 +19,6 @@ package org.elasticsearch.cluster.routing; -import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; import com.google.common.collect.Sets; import org.elasticsearch.cluster.node.DiscoveryNode; @@ -32,6 +31,7 @@ import org.elasticsearch.index.shard.ShardId; import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; +import java.util.Collections; import java.util.Iterator; import java.util.LinkedList; import java.util.List; @@ -52,30 +52,30 @@ public class IndexShardRoutingTable implements Iterable { final ShardId shardId; final ShardRouting primary; - final ImmutableList primaryAsList; - final ImmutableList replicas; - final ImmutableList shards; - final ImmutableList activeShards; - final ImmutableList assignedShards; - final static ImmutableList NO_SHARDS = ImmutableList.of(); + final List primaryAsList; + final List replicas; + final List shards; + final List activeShards; + final List assignedShards; + final static List NO_SHARDS = Collections.emptyList(); final boolean allShardsStarted; /** * The initializing list, including ones that are initializing on a target node because of relocation. * If we can come up with a better variable name, it would be nice... */ - final ImmutableList allInitializingShards; + final List allInitializingShards; IndexShardRoutingTable(ShardId shardId, List shards) { this.shardId = shardId; this.shuffler = new RotationShardShuffler(ThreadLocalRandom.current().nextInt()); - this.shards = ImmutableList.copyOf(shards); + this.shards = Collections.unmodifiableList(shards); ShardRouting primary = null; - ImmutableList.Builder replicas = ImmutableList.builder(); - ImmutableList.Builder activeShards = ImmutableList.builder(); - ImmutableList.Builder assignedShards = ImmutableList.builder(); - ImmutableList.Builder allInitializingShards = ImmutableList.builder(); + List replicas = new ArrayList<>(); + List activeShards = new ArrayList<>(); + List assignedShards = new ArrayList<>(); + List allInitializingShards = new ArrayList<>(); boolean allShardsStarted = true; for (ShardRouting shard : shards) { if (shard.primary()) { @@ -104,14 +104,14 @@ public class IndexShardRoutingTable implements Iterable { this.primary = primary; if (primary != null) { - this.primaryAsList = ImmutableList.of(primary); + this.primaryAsList = Collections.singletonList(primary); } else { - this.primaryAsList = ImmutableList.of(); + this.primaryAsList = Collections.emptyList(); } - this.replicas = replicas.build(); - this.activeShards = activeShards.build(); - this.assignedShards = assignedShards.build(); - this.allInitializingShards = allInitializingShards.build(); + this.replicas = Collections.unmodifiableList(replicas); + this.activeShards = Collections.unmodifiableList(activeShards); + this.assignedShards = Collections.unmodifiableList(assignedShards); + this.allInitializingShards = Collections.unmodifiableList(allInitializingShards); } /** @@ -145,7 +145,7 @@ public class IndexShardRoutingTable implements Iterable { shardRoutings.add(new ShardRouting(shards.get(i), highestVersion)); } } - return new IndexShardRoutingTable(shardId, ImmutableList.copyOf(shardRoutings)); + return new IndexShardRoutingTable(shardId, Collections.unmodifiableList(shardRoutings)); } /** @@ -468,11 +468,11 @@ public class IndexShardRoutingTable implements Iterable { static class AttributesRoutings { - public final ImmutableList withSameAttribute; - public final ImmutableList withoutSameAttribute; + public final List withSameAttribute; + public final List withoutSameAttribute; public final int totalSize; - AttributesRoutings(ImmutableList withSameAttribute, ImmutableList withoutSameAttribute) { + AttributesRoutings(List withSameAttribute, List withoutSameAttribute) { this.withSameAttribute = withSameAttribute; this.withoutSameAttribute = withoutSameAttribute; this.totalSize = withoutSameAttribute.size() + withSameAttribute.size(); @@ -488,9 +488,9 @@ public class IndexShardRoutingTable implements Iterable { if (shardRoutings == null) { synchronized (shardsByAttributeMutex) { ArrayList from = new ArrayList<>(activeShards); - ImmutableList to = collectAttributeShards(key, nodes, from); + List to = collectAttributeShards(key, nodes, from); - shardRoutings = new AttributesRoutings(to, ImmutableList.copyOf(from)); + shardRoutings = new AttributesRoutings(to, Collections.unmodifiableList(from)); activeShardsByAttributes = MapBuilder.newMapBuilder(activeShardsByAttributes).put(key, shardRoutings).immutableMap(); } } @@ -502,15 +502,15 @@ public class IndexShardRoutingTable implements Iterable { if (shardRoutings == null) { synchronized (shardsByAttributeMutex) { ArrayList from = new ArrayList<>(allInitializingShards); - ImmutableList to = collectAttributeShards(key, nodes, from); - shardRoutings = new AttributesRoutings(to, ImmutableList.copyOf(from)); + List to = collectAttributeShards(key, nodes, from); + shardRoutings = new AttributesRoutings(to, Collections.unmodifiableList(from)); initializingShardsByAttributes = MapBuilder.newMapBuilder(initializingShardsByAttributes).put(key, shardRoutings).immutableMap(); } } return shardRoutings; } - private static ImmutableList collectAttributeShards(AttributesKey key, DiscoveryNodes nodes, ArrayList from) { + private static List collectAttributeShards(AttributesKey key, DiscoveryNodes nodes, ArrayList from) { final ArrayList to = new ArrayList<>(); for (final String attribute : key.attributes) { final String localAttributeValue = nodes.localNode().attributes().get(attribute); @@ -527,7 +527,7 @@ public class IndexShardRoutingTable implements Iterable { } } } - return ImmutableList.copyOf(to); + return Collections.unmodifiableList(to); } public ShardIterator preferAttributesActiveInitializingShardsIt(String[] attributes, DiscoveryNodes nodes) { @@ -616,7 +616,7 @@ public class IndexShardRoutingTable implements Iterable { } public IndexShardRoutingTable build() { - return new IndexShardRoutingTable(shardId, ImmutableList.copyOf(shards)); + return new IndexShardRoutingTable(shardId, Collections.unmodifiableList(new ArrayList<>(shards))); } public static IndexShardRoutingTable readFrom(StreamInput in) throws IOException { diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/RoutingTableValidation.java b/core/src/main/java/org/elasticsearch/cluster/routing/RoutingTableValidation.java index acd207f8ad5..f7a8c31e579 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/RoutingTableValidation.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/RoutingTableValidation.java @@ -19,7 +19,6 @@ package org.elasticsearch.cluster.routing; -import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -27,6 +26,7 @@ import org.elasticsearch.common.io.stream.Streamable; import java.io.IOException; import java.util.ArrayList; +import java.util.Collections; import java.util.List; import java.util.Map; @@ -53,7 +53,7 @@ public class RoutingTableValidation implements Streamable { public List allFailures() { if (failures().isEmpty() && indicesFailures().isEmpty()) { - return ImmutableList.of(); + return Collections.emptyList(); } List allFailures = new ArrayList<>(failures()); for (Map.Entry> entry : indicesFailures().entrySet()) { @@ -66,7 +66,7 @@ public class RoutingTableValidation implements Streamable { public List failures() { if (failures == null) { - return ImmutableList.of(); + return Collections.emptyList(); } return failures; } @@ -80,11 +80,11 @@ public class RoutingTableValidation implements Streamable { public List indexFailures(String index) { if (indicesFailures == null) { - return ImmutableList.of(); + return Collections.emptyList(); } List indexFailures = indicesFailures.get(index); if (indexFailures == null) { - return ImmutableList.of(); + return Collections.emptyList(); } return indexFailures; } @@ -120,7 +120,7 @@ public class RoutingTableValidation implements Streamable { valid = in.readBoolean(); int size = in.readVInt(); if (size == 0) { - failures = ImmutableList.of(); + failures = Collections.emptyList(); } else { failures = new ArrayList<>(size); for (int i = 0; i < size; i++) { diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java index 191ec9556c5..a907ef58b9f 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java @@ -20,7 +20,6 @@ package org.elasticsearch.cluster.routing.allocation; import com.carrotsearch.hppc.cursors.ObjectCursor; -import com.google.common.collect.ImmutableList; import org.elasticsearch.cluster.ClusterInfoService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexMetaData; @@ -40,6 +39,7 @@ import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import java.util.ArrayList; +import java.util.Collections; import java.util.List; @@ -89,7 +89,7 @@ public class AllocationService extends AbstractComponent { } public RoutingAllocation.Result applyFailedShard(ClusterState clusterState, ShardRouting failedShard) { - return applyFailedShards(clusterState, ImmutableList.of(new FailedRerouteAllocation.FailedShard(failedShard, null, null))); + return applyFailedShards(clusterState, Collections.singletonList(new FailedRerouteAllocation.FailedShard(failedShard, null, null))); } /** diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDecider.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDecider.java index 8b8652a067a..b7ee93e5e23 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDecider.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDecider.java @@ -335,15 +335,16 @@ public class DiskThresholdDecider extends AllocationDecider { @Override public Decision canAllocate(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) { - final Decision decision = earlyTerminate(allocation); + ClusterInfo clusterInfo = allocation.clusterInfo(); + Map usages = clusterInfo.getNodeMostAvailableDiskUsages(); + final Decision decision = earlyTerminate(allocation, usages); if (decision != null) { return decision; } final double usedDiskThresholdLow = 100.0 - DiskThresholdDecider.this.freeDiskThresholdLow; final double usedDiskThresholdHigh = 100.0 - DiskThresholdDecider.this.freeDiskThresholdHigh; - ClusterInfo clusterInfo = allocation.clusterInfo(); - Map usages = clusterInfo.getNodeMostAvailableDiskUsages(); + DiskUsage usage = getDiskUsage(node, allocation, usages); // First, check that the node currently over the low watermark double freeDiskPercentage = usage.getFreeDiskAsPercentage(); @@ -449,12 +450,13 @@ public class DiskThresholdDecider extends AllocationDecider { if (shardRouting.currentNodeId().equals(node.nodeId()) == false) { throw new IllegalArgumentException("Shard [" + shardRouting + "] is not allocated on node: [" + node.nodeId() + "]"); } - final Decision decision = earlyTerminate(allocation); + final ClusterInfo clusterInfo = allocation.clusterInfo(); + final Map usages = clusterInfo.getNodeLeastAvailableDiskUsages(); + final Decision decision = earlyTerminate(allocation, usages); if (decision != null) { return decision; } - final ClusterInfo clusterInfo = allocation.clusterInfo(); - final Map usages = clusterInfo.getNodeLeastAvailableDiskUsages(); + final DiskUsage usage = getDiskUsage(node, allocation, usages); final String dataPath = clusterInfo.getDataPath(shardRouting); // If this node is already above the high threshold, the shard cannot remain (get it off!) @@ -590,7 +592,7 @@ public class DiskThresholdDecider extends AllocationDecider { } } - private Decision earlyTerminate(RoutingAllocation allocation) { + private Decision earlyTerminate(RoutingAllocation allocation, final Map usages) { // Always allow allocation if the decider is disabled if (!enabled) { return allocation.decision(Decision.YES, NAME, "disk threshold decider disabled"); @@ -613,7 +615,6 @@ public class DiskThresholdDecider extends AllocationDecider { return allocation.decision(Decision.YES, NAME, "cluster info unavailable"); } - final Map usages = clusterInfo.getNodeLeastAvailableDiskUsages(); // Fail open if there are no disk usages available if (usages.isEmpty()) { if (logger.isTraceEnabled()) { diff --git a/core/src/main/java/org/elasticsearch/common/DelegatingHasContextAndHeaders.java b/core/src/main/java/org/elasticsearch/common/DelegatingHasContextAndHeaders.java new file mode 100644 index 00000000000..38764db9eae --- /dev/null +++ b/core/src/main/java/org/elasticsearch/common/DelegatingHasContextAndHeaders.java @@ -0,0 +1,112 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.common; + +import com.carrotsearch.hppc.ObjectObjectAssociativeContainer; + +import org.elasticsearch.common.collect.ImmutableOpenMap; + +import java.util.Set; + +public class DelegatingHasContextAndHeaders implements HasContextAndHeaders { + + private HasContextAndHeaders delegate; + + public DelegatingHasContextAndHeaders(HasContextAndHeaders delegate) { + this.delegate = delegate; + } + + @Override + public void putHeader(String key, V value) { + delegate.putHeader(key, value); + } + + @Override + public void copyContextAndHeadersFrom(HasContextAndHeaders other) { + delegate.copyContextAndHeadersFrom(other); + } + + @Override + public V getHeader(String key) { + return delegate.getHeader(key); + } + + @Override + public boolean hasHeader(String key) { + return delegate.hasHeader(key); + } + + @Override + public V putInContext(Object key, Object value) { + return delegate.putInContext(key, value); + } + + @Override + public Set getHeaders() { + return delegate.getHeaders(); + } + + @Override + public void copyHeadersFrom(HasHeaders from) { + delegate.copyHeadersFrom(from); + } + + @Override + public void putAllInContext(ObjectObjectAssociativeContainer map) { + delegate.putAllInContext(map); + } + + @Override + public V getFromContext(Object key) { + return delegate.getFromContext(key); + } + + @Override + public V getFromContext(Object key, V defaultValue) { + return delegate.getFromContext(key, defaultValue); + } + + @Override + public boolean hasInContext(Object key) { + return delegate.hasInContext(key); + } + + @Override + public int contextSize() { + return delegate.contextSize(); + } + + @Override + public boolean isContextEmpty() { + return delegate.isContextEmpty(); + } + + @Override + public ImmutableOpenMap getContext() { + return delegate.getContext(); + } + + @Override + public void copyContextFrom(HasContext other) { + delegate.copyContextFrom(other); + } + + +} diff --git a/core/src/main/java/org/elasticsearch/common/blobstore/BlobPath.java b/core/src/main/java/org/elasticsearch/common/blobstore/BlobPath.java index 5c1ff00601c..7636097e288 100644 --- a/core/src/main/java/org/elasticsearch/common/blobstore/BlobPath.java +++ b/core/src/main/java/org/elasticsearch/common/blobstore/BlobPath.java @@ -19,26 +19,28 @@ package org.elasticsearch.common.blobstore; -import com.google.common.collect.ImmutableList; +import java.util.ArrayList; +import java.util.Collections; import java.util.Iterator; +import java.util.List; /** * */ public class BlobPath implements Iterable { - private final ImmutableList paths; + private final List paths; public BlobPath() { - this.paths = ImmutableList.of(); + this.paths = Collections.emptyList(); } public static BlobPath cleanPath() { return new BlobPath(); } - private BlobPath(ImmutableList paths) { + private BlobPath(List paths) { this.paths = paths; } @@ -52,8 +54,10 @@ public class BlobPath implements Iterable { } public BlobPath add(String path) { - ImmutableList.Builder builder = ImmutableList.builder(); - return new BlobPath(builder.addAll(paths).add(path).build()); + List paths = new ArrayList<>(); + paths.addAll(this.paths); + paths.add(path); + return new BlobPath(Collections.unmodifiableList(paths)); } public String buildAsString(String separator) { diff --git a/core/src/main/java/org/elasticsearch/common/inject/EncounterImpl.java b/core/src/main/java/org/elasticsearch/common/inject/EncounterImpl.java index 2a3c419cc36..83b8e446d15 100644 --- a/core/src/main/java/org/elasticsearch/common/inject/EncounterImpl.java +++ b/core/src/main/java/org/elasticsearch/common/inject/EncounterImpl.java @@ -16,13 +16,13 @@ package org.elasticsearch.common.inject; -import com.google.common.collect.ImmutableList; import org.elasticsearch.common.inject.internal.Errors; import org.elasticsearch.common.inject.spi.InjectionListener; import org.elasticsearch.common.inject.spi.Message; import org.elasticsearch.common.inject.spi.TypeEncounter; import java.util.ArrayList; +import java.util.Collections; import java.util.List; import static com.google.common.base.Preconditions.checkState; @@ -47,16 +47,16 @@ final class EncounterImpl implements TypeEncounter { valid = false; } - public ImmutableList> getMembersInjectors() { + public List> getMembersInjectors() { return membersInjectors == null - ? ImmutableList.>of() - : ImmutableList.copyOf(membersInjectors); + ? Collections.>emptyList() + : Collections.unmodifiableList(membersInjectors); } - public ImmutableList> getInjectionListeners() { + public List> getInjectionListeners() { return injectionListeners == null - ? ImmutableList.>of() - : ImmutableList.copyOf(injectionListeners); + ? Collections.>emptyList() + : Collections.unmodifiableList(injectionListeners); } @Override diff --git a/core/src/main/java/org/elasticsearch/common/inject/InjectionRequestProcessor.java b/core/src/main/java/org/elasticsearch/common/inject/InjectionRequestProcessor.java index b645f41c3b8..da87efa1b8e 100644 --- a/core/src/main/java/org/elasticsearch/common/inject/InjectionRequestProcessor.java +++ b/core/src/main/java/org/elasticsearch/common/inject/InjectionRequestProcessor.java @@ -16,7 +16,6 @@ package org.elasticsearch.common.inject; -import com.google.common.collect.ImmutableList; import org.elasticsearch.common.inject.internal.Errors; import org.elasticsearch.common.inject.internal.ErrorsException; import org.elasticsearch.common.inject.internal.InternalContext; @@ -85,7 +84,7 @@ class InjectionRequestProcessor extends AbstractProcessor { final InjectorImpl injector; final Object source; final StaticInjectionRequest request; - ImmutableList memberInjectors; + List memberInjectors; public StaticInjection(InjectorImpl injector, StaticInjectionRequest request) { this.injector = injector; diff --git a/core/src/main/java/org/elasticsearch/common/inject/InjectorImpl.java b/core/src/main/java/org/elasticsearch/common/inject/InjectorImpl.java index a8048513055..157ff3f21e3 100644 --- a/core/src/main/java/org/elasticsearch/common/inject/InjectorImpl.java +++ b/core/src/main/java/org/elasticsearch/common/inject/InjectorImpl.java @@ -16,7 +16,6 @@ package org.elasticsearch.common.inject; -import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableSet; import com.google.common.collect.Maps; import org.elasticsearch.common.Classes; @@ -49,6 +48,7 @@ import java.lang.reflect.Modifier; import java.lang.reflect.ParameterizedType; import java.lang.reflect.Type; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collections; import java.util.List; import java.util.Map; @@ -164,7 +164,7 @@ class InjectorImpl implements Injector, Lookups { @Override public Injector createChildInjector(Module... modules) { - return createChildInjector(ImmutableList.copyOf(modules)); + return createChildInjector(Arrays.asList(modules)); } /** @@ -716,7 +716,7 @@ class InjectorImpl implements Injector, Lookups { List> bindings = multimap.get(type); return bindings != null ? Collections.>unmodifiableList((List) multimap.get(type)) - : ImmutableList.>of(); + : Collections.>emptyList(); } } diff --git a/core/src/main/java/org/elasticsearch/common/inject/MembersInjectorImpl.java b/core/src/main/java/org/elasticsearch/common/inject/MembersInjectorImpl.java index 0bac3d8acde..399a231461d 100644 --- a/core/src/main/java/org/elasticsearch/common/inject/MembersInjectorImpl.java +++ b/core/src/main/java/org/elasticsearch/common/inject/MembersInjectorImpl.java @@ -16,7 +16,6 @@ package org.elasticsearch.common.inject; -import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableSet; import org.elasticsearch.common.inject.internal.Errors; import org.elasticsearch.common.inject.internal.ErrorsException; @@ -24,6 +23,8 @@ import org.elasticsearch.common.inject.internal.InternalContext; import org.elasticsearch.common.inject.spi.InjectionListener; import org.elasticsearch.common.inject.spi.InjectionPoint; +import java.util.List; + /** * Injects members of instances of a given type. * @@ -32,12 +33,12 @@ import org.elasticsearch.common.inject.spi.InjectionPoint; class MembersInjectorImpl implements MembersInjector { private final TypeLiteral typeLiteral; private final InjectorImpl injector; - private final ImmutableList memberInjectors; - private final ImmutableList> userMembersInjectors; - private final ImmutableList> injectionListeners; + private final List memberInjectors; + private final List> userMembersInjectors; + private final List> injectionListeners; MembersInjectorImpl(InjectorImpl injector, TypeLiteral typeLiteral, - EncounterImpl encounter, ImmutableList memberInjectors) { + EncounterImpl encounter, List memberInjectors) { this.injector = injector; this.typeLiteral = typeLiteral; this.memberInjectors = memberInjectors; @@ -45,7 +46,7 @@ class MembersInjectorImpl implements MembersInjector { this.injectionListeners = encounter.getInjectionListeners(); } - public ImmutableList getMemberInjectors() { + public List getMemberInjectors() { return memberInjectors; } diff --git a/core/src/main/java/org/elasticsearch/common/inject/MembersInjectorStore.java b/core/src/main/java/org/elasticsearch/common/inject/MembersInjectorStore.java index cd7c168860a..d8ee0d77960 100644 --- a/core/src/main/java/org/elasticsearch/common/inject/MembersInjectorStore.java +++ b/core/src/main/java/org/elasticsearch/common/inject/MembersInjectorStore.java @@ -16,7 +16,6 @@ package org.elasticsearch.common.inject; -import com.google.common.collect.ImmutableList; import org.elasticsearch.common.inject.internal.Errors; import org.elasticsearch.common.inject.internal.ErrorsException; import org.elasticsearch.common.inject.internal.FailableCache; @@ -25,6 +24,7 @@ import org.elasticsearch.common.inject.spi.TypeListenerBinding; import java.lang.reflect.Field; import java.util.ArrayList; +import java.util.Collections; import java.util.List; import java.util.Set; @@ -35,7 +35,7 @@ import java.util.Set; */ class MembersInjectorStore { private final InjectorImpl injector; - private final ImmutableList typeListenerBindings; + private final List typeListenerBindings; private final FailableCache, MembersInjectorImpl> cache = new FailableCache, MembersInjectorImpl>() { @@ -49,7 +49,7 @@ class MembersInjectorStore { MembersInjectorStore(InjectorImpl injector, List typeListenerBindings) { this.injector = injector; - this.typeListenerBindings = ImmutableList.copyOf(typeListenerBindings); + this.typeListenerBindings = Collections.unmodifiableList(typeListenerBindings); } /** @@ -82,7 +82,7 @@ class MembersInjectorStore { errors.merge(e.getErrorMessages()); injectionPoints = e.getPartialValue(); } - ImmutableList injectors = getInjectors(injectionPoints, errors); + List injectors = getInjectors(injectionPoints, errors); errors.throwIfNewErrors(numErrorsBefore); EncounterImpl encounter = new EncounterImpl<>(errors, injector.lookups); @@ -104,7 +104,7 @@ class MembersInjectorStore { /** * Returns the injectors for the specified injection points. */ - ImmutableList getInjectors( + List getInjectors( Set injectionPoints, Errors errors) { List injectors = new ArrayList<>(); for (InjectionPoint injectionPoint : injectionPoints) { @@ -120,6 +120,6 @@ class MembersInjectorStore { // ignored for now } } - return ImmutableList.copyOf(injectors); + return Collections.unmodifiableList(injectors); } } diff --git a/core/src/main/java/org/elasticsearch/common/inject/ProvisionException.java b/core/src/main/java/org/elasticsearch/common/inject/ProvisionException.java index 4c0c3652128..b124dfc5ad6 100644 --- a/core/src/main/java/org/elasticsearch/common/inject/ProvisionException.java +++ b/core/src/main/java/org/elasticsearch/common/inject/ProvisionException.java @@ -16,12 +16,12 @@ package org.elasticsearch.common.inject; -import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableSet; import org.elasticsearch.common.inject.internal.Errors; import org.elasticsearch.common.inject.spi.Message; import java.util.Collection; +import java.util.Collections; import static com.google.common.base.Preconditions.checkArgument; @@ -47,7 +47,7 @@ public final class ProvisionException extends RuntimeException { public ProvisionException(String message, Throwable cause) { super(cause); - this.messages = ImmutableSet.of(new Message(ImmutableList.of(), message, cause)); + this.messages = ImmutableSet.of(new Message(Collections.emptyList(), message, cause)); } public ProvisionException(String message) { diff --git a/core/src/main/java/org/elasticsearch/common/inject/State.java b/core/src/main/java/org/elasticsearch/common/inject/State.java index b3f662c5fd8..53d1bddd313 100644 --- a/core/src/main/java/org/elasticsearch/common/inject/State.java +++ b/core/src/main/java/org/elasticsearch/common/inject/State.java @@ -16,7 +16,6 @@ package org.elasticsearch.common.inject; -import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableSet; import org.elasticsearch.common.inject.internal.BindingImpl; import org.elasticsearch.common.inject.internal.Errors; @@ -24,6 +23,7 @@ import org.elasticsearch.common.inject.internal.MatcherAndConverter; import org.elasticsearch.common.inject.spi.TypeListenerBinding; import java.lang.annotation.Annotation; +import java.util.Collections; import java.util.List; import java.util.Map; @@ -89,7 +89,7 @@ interface State { @Override public List getTypeListenerBindings() { - return ImmutableList.of(); + return Collections.emptyList(); } @Override diff --git a/core/src/main/java/org/elasticsearch/common/inject/TypeLiteral.java b/core/src/main/java/org/elasticsearch/common/inject/TypeLiteral.java index 37dfebbe98b..b83df0914d6 100644 --- a/core/src/main/java/org/elasticsearch/common/inject/TypeLiteral.java +++ b/core/src/main/java/org/elasticsearch/common/inject/TypeLiteral.java @@ -16,11 +16,12 @@ package org.elasticsearch.common.inject; -import com.google.common.collect.ImmutableList; import org.elasticsearch.common.inject.internal.MoreTypes; import org.elasticsearch.common.inject.util.Types; import java.lang.reflect.*; +import java.util.Arrays; +import java.util.Collections; import java.util.List; import static com.google.common.base.Preconditions.checkArgument; @@ -174,7 +175,7 @@ public class TypeLiteral { for (int t = 0; t < types.length; t++) { result[t] = resolve(types[t]); } - return ImmutableList.copyOf(result); + return Arrays.asList(result); } /** diff --git a/core/src/main/java/org/elasticsearch/common/inject/assistedinject/FactoryProvider2.java b/core/src/main/java/org/elasticsearch/common/inject/assistedinject/FactoryProvider2.java index 03b194816ea..3eef8f2b29c 100644 --- a/core/src/main/java/org/elasticsearch/common/inject/assistedinject/FactoryProvider2.java +++ b/core/src/main/java/org/elasticsearch/common/inject/assistedinject/FactoryProvider2.java @@ -16,7 +16,6 @@ package org.elasticsearch.common.inject.assistedinject; -import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; import com.google.common.collect.Iterables; import org.elasticsearch.common.inject.AbstractModule; @@ -41,6 +40,7 @@ import java.lang.reflect.Method; import java.lang.reflect.Proxy; import java.util.ArrayList; import java.util.Arrays; +import java.util.Collections; import java.util.List; import static com.google.common.base.Preconditions.checkState; @@ -91,7 +91,7 @@ final class FactoryProvider2 implements InvocationHandler, Provider { */ private final Key producedType; private final ImmutableMap> returnTypesByMethod; - private final ImmutableMap>> paramTypes; + private final ImmutableMap>> paramTypes; /** * the hosting injector, or null if we haven't been initialized yet @@ -118,7 +118,7 @@ final class FactoryProvider2 implements InvocationHandler, Provider { try { ImmutableMap.Builder> returnTypesBuilder = ImmutableMap.builder(); - ImmutableMap.Builder>> paramTypesBuilder + ImmutableMap.Builder>> paramTypesBuilder = ImmutableMap.builder(); // TODO: also grab methods from superinterfaces for (Method method : factoryRawType.getMethods()) { @@ -133,7 +133,7 @@ final class FactoryProvider2 implements InvocationHandler, Provider { Key paramKey = getKey(param, method, paramAnnotations[p++], errors); keys.add(assistKey(method, paramKey, errors)); } - paramTypesBuilder.put(method, ImmutableList.copyOf(keys)); + paramTypesBuilder.put(method, Collections.unmodifiableList(keys)); } returnTypesByMethod = returnTypesBuilder.build(); paramTypes = paramTypesBuilder.build(); @@ -175,8 +175,8 @@ final class FactoryProvider2 implements InvocationHandler, Provider { @Inject void initialize(Injector injector) { if (this.injector != null) { - throw new ConfigurationException(ImmutableList.of(new Message(FactoryProvider2.class, - "Factories.create() factories may only be used in one Injector!"))); + throw new ConfigurationException(Collections.singletonList(new Message(FactoryProvider2.class, + "Factories.create() factories may only be used in one Injector!"))); } this.injector = injector; diff --git a/core/src/main/java/org/elasticsearch/common/inject/internal/Errors.java b/core/src/main/java/org/elasticsearch/common/inject/internal/Errors.java index d3a73b3c326..a38b2c0b346 100644 --- a/core/src/main/java/org/elasticsearch/common/inject/internal/Errors.java +++ b/core/src/main/java/org/elasticsearch/common/inject/internal/Errors.java @@ -16,7 +16,6 @@ package org.elasticsearch.common.inject.internal; -import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableSet; import org.apache.lucene.util.CollectionUtil; import org.elasticsearch.common.inject.ConfigurationException; @@ -42,7 +41,9 @@ import java.lang.reflect.Field; import java.lang.reflect.Member; import java.lang.reflect.Type; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collection; +import java.util.Collections; import java.util.Comparator; import java.util.Formatter; import java.util.List; @@ -451,7 +452,7 @@ public final class Errors implements Serializable { public List getMessages() { if (root.errors == null) { - return ImmutableList.of(); + return Collections.emptyList(); } List result = new ArrayList<>(root.errors); @@ -568,7 +569,7 @@ public final class Errors implements Serializable { abstract String toString(T t); } - private static final Collection> converters = ImmutableList.of( + private static final Collection> converters = Arrays.asList( new Converter(Class.class) { @Override public String toString(Class c) { diff --git a/core/src/main/java/org/elasticsearch/common/inject/internal/PrivateElementsImpl.java b/core/src/main/java/org/elasticsearch/common/inject/internal/PrivateElementsImpl.java index 59b5aaa0e7d..0caa796021c 100644 --- a/core/src/main/java/org/elasticsearch/common/inject/internal/PrivateElementsImpl.java +++ b/core/src/main/java/org/elasticsearch/common/inject/internal/PrivateElementsImpl.java @@ -16,7 +16,6 @@ package org.elasticsearch.common.inject.internal; -import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; import com.google.common.collect.Maps; import org.elasticsearch.common.inject.Binder; @@ -28,6 +27,7 @@ import org.elasticsearch.common.inject.spi.ElementVisitor; import org.elasticsearch.common.inject.spi.PrivateElements; import java.util.ArrayList; +import java.util.Collections; import java.util.List; import java.util.Map; import java.util.Set; @@ -53,7 +53,7 @@ public final class PrivateElementsImpl implements PrivateElements { /** * lazily instantiated */ - private ImmutableList elements; + private List elements; /** * lazily instantiated @@ -73,7 +73,7 @@ public final class PrivateElementsImpl implements PrivateElements { @Override public List getElements() { if (elements == null) { - elements = ImmutableList.copyOf(elementsMutable); + elements = Collections.unmodifiableList(elementsMutable); elementsMutable = null; } diff --git a/core/src/main/java/org/elasticsearch/common/inject/multibindings/Multibinder.java b/core/src/main/java/org/elasticsearch/common/inject/multibindings/Multibinder.java index 0fb64a4e6d1..a5646315e49 100644 --- a/core/src/main/java/org/elasticsearch/common/inject/multibindings/Multibinder.java +++ b/core/src/main/java/org/elasticsearch/common/inject/multibindings/Multibinder.java @@ -16,7 +16,6 @@ package org.elasticsearch.common.inject.multibindings; -import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableSet; import org.elasticsearch.common.inject.Binder; import org.elasticsearch.common.inject.Binding; @@ -328,6 +327,6 @@ public abstract class Multibinder { NullPointerException npe = new NullPointerException(name); throw new ConfigurationException(ImmutableSet.of( - new Message(ImmutableList.of(), npe.toString(), npe))); + new Message(Collections.emptyList(), npe.toString(), npe))); } } diff --git a/core/src/main/java/org/elasticsearch/common/inject/spi/Elements.java b/core/src/main/java/org/elasticsearch/common/inject/spi/Elements.java index 965223604b5..b4d548d5754 100644 --- a/core/src/main/java/org/elasticsearch/common/inject/spi/Elements.java +++ b/core/src/main/java/org/elasticsearch/common/inject/spi/Elements.java @@ -16,7 +16,6 @@ package org.elasticsearch.common.inject.spi; -import com.google.common.collect.ImmutableList; import com.google.common.collect.Sets; import org.elasticsearch.common.inject.AbstractModule; import org.elasticsearch.common.inject.Binder; @@ -260,7 +259,7 @@ public final class Elements { @Override public void addError(Throwable t) { String message = "An exception was caught and reported. Message: " + t.getMessage(); - elements.add(new Message(ImmutableList.of(getSource()), message, t)); + elements.add(new Message(Collections.singletonList(getSource()), message, t)); } @Override diff --git a/core/src/main/java/org/elasticsearch/common/inject/spi/InjectionPoint.java b/core/src/main/java/org/elasticsearch/common/inject/spi/InjectionPoint.java index aa7e0793838..72fde2c11d1 100644 --- a/core/src/main/java/org/elasticsearch/common/inject/spi/InjectionPoint.java +++ b/core/src/main/java/org/elasticsearch/common/inject/spi/InjectionPoint.java @@ -16,7 +16,6 @@ package org.elasticsearch.common.inject.spi; -import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableSet; import org.elasticsearch.common.inject.ConfigurationException; import org.elasticsearch.common.inject.Inject; @@ -38,6 +37,7 @@ import java.lang.reflect.Modifier; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; +import java.util.Collections; import java.util.Iterator; import java.util.List; import java.util.Set; @@ -56,10 +56,10 @@ public final class InjectionPoint { private final boolean optional; private final Member member; - private final ImmutableList> dependencies; + private final List> dependencies; private InjectionPoint(Member member, - ImmutableList> dependencies, boolean optional) { + List> dependencies, boolean optional) { this.member = member; this.dependencies = dependencies; this.optional = optional; @@ -97,11 +97,11 @@ public final class InjectionPoint { } errors.throwConfigurationExceptionIfErrorsExist(); - this.dependencies = ImmutableList.>of( - newDependency(key, Nullability.allowsNull(annotations), -1)); + this.dependencies = Collections.>singletonList( + newDependency(key, Nullability.allowsNull(annotations), -1)); } - private ImmutableList> forMember(Member member, TypeLiteral type, + private List> forMember(Member member, TypeLiteral type, Annotation[][] parameterAnnotations) { Errors errors = new Errors(member); Iterator annotationsIterator = Arrays.asList(parameterAnnotations).iterator(); @@ -121,7 +121,7 @@ public final class InjectionPoint { } errors.throwConfigurationExceptionIfErrorsExist(); - return ImmutableList.copyOf(dependencies); + return Collections.unmodifiableList(dependencies); } // This metohd is necessary to create a Dependency with proper generic type information diff --git a/core/src/main/java/org/elasticsearch/common/inject/spi/Message.java b/core/src/main/java/org/elasticsearch/common/inject/spi/Message.java index fb778e136f7..0723c0e1377 100644 --- a/core/src/main/java/org/elasticsearch/common/inject/spi/Message.java +++ b/core/src/main/java/org/elasticsearch/common/inject/spi/Message.java @@ -17,13 +17,14 @@ package org.elasticsearch.common.inject.spi; import com.google.common.base.Objects; -import com.google.common.collect.ImmutableList; import org.elasticsearch.common.inject.Binder; import org.elasticsearch.common.inject.internal.Errors; import org.elasticsearch.common.inject.internal.SourceProvider; import java.io.ObjectStreamException; import java.io.Serializable; +import java.util.Arrays; +import java.util.Collections; import java.util.List; import static com.google.common.base.Preconditions.checkNotNull; @@ -50,17 +51,17 @@ public final class Message implements Serializable, Element { * @since 2.0 */ public Message(List sources, String message, Throwable cause) { - this.sources = ImmutableList.copyOf(sources); + this.sources = Collections.unmodifiableList(sources); this.message = checkNotNull(message, "message"); this.cause = cause; } public Message(Object source, String message) { - this(ImmutableList.of(source), message, null); + this(Collections.singletonList(source), message, null); } public Message(String message) { - this(ImmutableList.of(), message, null); + this(Collections.emptyList(), message, null); } @Override @@ -138,7 +139,7 @@ public final class Message implements Serializable, Element { for (int i = 0; i < sourcesAsStrings.length; i++) { sourcesAsStrings[i] = Errors.convert(sourcesAsStrings[i]).toString(); } - return new Message(ImmutableList.copyOf(sourcesAsStrings), message, cause); + return new Message(Arrays.asList(sourcesAsStrings), message, cause); } private static final long serialVersionUID = 0; diff --git a/core/src/main/java/org/elasticsearch/common/io/stream/StreamOutput.java b/core/src/main/java/org/elasticsearch/common/io/stream/StreamOutput.java index 9e449d692a8..a8089198f29 100644 --- a/core/src/main/java/org/elasticsearch/common/io/stream/StreamOutput.java +++ b/core/src/main/java/org/elasticsearch/common/io/stream/StreamOutput.java @@ -30,7 +30,6 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.Version; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.text.Text; import org.elasticsearch.index.query.QueryBuilder; import org.joda.time.ReadableInstant; @@ -44,8 +43,6 @@ import java.util.Date; import java.util.LinkedHashMap; import java.util.List; import java.util.Map; -import java.util.regex.Matcher; -import java.util.regex.Pattern; /** * @@ -457,14 +454,6 @@ public abstract class StreamOutput extends OutputStream { } } - static { - assert Version.CURRENT.luceneVersion == org.apache.lucene.util.Version.LUCENE_5_2_1: "Remove these regex once we upgrade to Lucene 5.3 and get proper getters for these expections"; - } - private final static Pattern CORRUPT_INDEX_EXCEPTION_REGEX = Regex.compile("^(.+) \\(resource=(.+)\\)$", ""); - private final static Pattern INDEX_FORMAT_TOO_NEW_EXCEPTION_REGEX = Regex.compile("Format version is not supported \\(resource (.+)\\): (-?\\d+) \\(needs to be between (-?\\d+) and (-?\\d+)\\)", ""); - private final static Pattern INDEX_FORMAT_TOO_OLD_EXCEPTION_REGEX_1 = Regex.compile("Format version is not supported \\(resource (.+)\\): (-?\\d+)(?: \\(needs to be between (-?\\d+) and (-?\\d+)\\)). This version of Lucene only supports indexes created with release 4.0 and later\\.", ""); - private final static Pattern INDEX_FORMAT_TOO_OLD_EXCEPTION_REGEX_2 = Regex.compile("Format version is not supported \\(resource (.+)\\): (.+). This version of Lucene only supports indexes created with release 4.0 and later\\.", ""); - private static int parseIntSafe(String val, int defaultVal) { try { return Integer.parseInt(val); @@ -482,73 +471,29 @@ public abstract class StreamOutput extends OutputStream { boolean writeMessage = true; if (throwable instanceof CorruptIndexException) { writeVInt(1); - // Lucene 5.3 will have getters for all these - // we should switch to using getters instead of trying to parse the message: - // writeOptionalString(((CorruptIndexException)throwable).getDescription()); - // writeOptionalString(((CorruptIndexException)throwable).getResource()); - Matcher matcher = CORRUPT_INDEX_EXCEPTION_REGEX.matcher(throwable.getMessage()); - if (matcher.find()) { - writeOptionalString(matcher.group(1)); // message - writeOptionalString(matcher.group(2)); // resource - } else { - // didn't match - writeOptionalString("???"); // message - writeOptionalString("???"); // resource - } + writeOptionalString(((CorruptIndexException)throwable).getOriginalMessage()); + writeOptionalString(((CorruptIndexException)throwable).getResourceDescription()); writeMessage = false; } else if (throwable instanceof IndexFormatTooNewException) { writeVInt(2); - // Lucene 5.3 will have getters for all these - // we should switch to using getters instead of trying to parse the message: - // writeOptionalString(((CorruptIndexException)throwable).getResource()); - // writeInt(((IndexFormatTooNewException)throwable).getVersion()); - // writeInt(((IndexFormatTooNewException)throwable).getMinVersion()); - // writeInt(((IndexFormatTooNewException)throwable).getMaxVersion()); - Matcher matcher = INDEX_FORMAT_TOO_NEW_EXCEPTION_REGEX.matcher(throwable.getMessage()); - if (matcher.find()) { - writeOptionalString(matcher.group(1)); // resource - writeInt(parseIntSafe(matcher.group(2), -1)); // version - writeInt(parseIntSafe(matcher.group(3), -1)); // min version - writeInt(parseIntSafe(matcher.group(4), -1)); // max version - } else { - // didn't match - writeOptionalString("???"); // resource - writeInt(-1); // version - writeInt(-1); // min version - writeInt(-1); // max version - } + writeOptionalString(((IndexFormatTooNewException)throwable).getResourceDescription()); + writeInt(((IndexFormatTooNewException)throwable).getVersion()); + writeInt(((IndexFormatTooNewException)throwable).getMinVersion()); + writeInt(((IndexFormatTooNewException)throwable).getMaxVersion()); writeMessage = false; writeCause = false; } else if (throwable instanceof IndexFormatTooOldException) { writeVInt(3); - // Lucene 5.3 will have getters for all these - // we should switch to using getters instead of trying to parse the message: - // writeOptionalString(((CorruptIndexException)throwable).getResource()); - // writeInt(((IndexFormatTooNewException)throwable).getVersion()); - // writeInt(((IndexFormatTooNewException)throwable).getMinVersion()); - // writeInt(((IndexFormatTooNewException)throwable).getMaxVersion()); - Matcher matcher = INDEX_FORMAT_TOO_OLD_EXCEPTION_REGEX_1.matcher(throwable.getMessage()); - if (matcher.find()) { - // version with numeric version in constructor - writeOptionalString(matcher.group(1)); // resource - writeBoolean(true); - writeInt(parseIntSafe(matcher.group(2), -1)); // version - writeInt(parseIntSafe(matcher.group(3), -1)); // min version - writeInt(parseIntSafe(matcher.group(4), -1)); // max version + IndexFormatTooOldException t = (IndexFormatTooOldException) throwable; + writeOptionalString(t.getResourceDescription()); + if (t.getVersion() == null) { + writeBoolean(false); + writeOptionalString(t.getReason()); } else { - matcher = INDEX_FORMAT_TOO_OLD_EXCEPTION_REGEX_2.matcher(throwable.getMessage()); - if (matcher.matches()) { - writeOptionalString(matcher.group(1)); // resource - writeBoolean(false); - writeOptionalString(matcher.group(2)); // version - } else { - // didn't match - writeOptionalString("???"); // resource - writeBoolean(true); - writeInt(-1); // version - writeInt(-1); // min version - writeInt(-1); // max version - } + writeBoolean(true); + writeInt(t.getVersion()); + writeInt(t.getMinVersion()); + writeInt(t.getMaxVersion()); } writeMessage = false; writeCause = false; diff --git a/core/src/main/java/org/elasticsearch/common/logging/log4j/LogConfigurator.java b/core/src/main/java/org/elasticsearch/common/logging/log4j/LogConfigurator.java index 0b4cdbdbd44..2e86beff192 100644 --- a/core/src/main/java/org/elasticsearch/common/logging/log4j/LogConfigurator.java +++ b/core/src/main/java/org/elasticsearch/common/logging/log4j/LogConfigurator.java @@ -19,7 +19,6 @@ package org.elasticsearch.common.logging.log4j; -import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; import org.apache.log4j.PropertyConfigurator; import org.elasticsearch.ElasticsearchException; @@ -35,6 +34,7 @@ import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.SimpleFileVisitor; import java.nio.file.attribute.BasicFileAttributes; +import java.util.Arrays; import java.util.EnumSet; import java.util.List; import java.util.Map; @@ -47,7 +47,7 @@ import static org.elasticsearch.common.settings.Settings.settingsBuilder; */ public class LogConfigurator { - static final List ALLOWED_SUFFIXES = ImmutableList.of(".yml", ".yaml", ".json", ".properties"); + static final List ALLOWED_SUFFIXES = Arrays.asList(".yml", ".yaml", ".json", ".properties"); private static boolean loaded; diff --git a/core/src/main/java/org/elasticsearch/common/lucene/Lucene.java b/core/src/main/java/org/elasticsearch/common/lucene/Lucene.java index 600c23899cb..f82ec128ed7 100644 --- a/core/src/main/java/org/elasticsearch/common/lucene/Lucene.java +++ b/core/src/main/java/org/elasticsearch/common/lucene/Lucene.java @@ -62,7 +62,7 @@ public class Lucene { public static final Version QUERYPARSER_VERSION = VERSION; public static final String LATEST_DOC_VALUES_FORMAT = "Lucene50"; public static final String LATEST_POSTINGS_FORMAT = "Lucene50"; - public static final String LATEST_CODEC = "Lucene50"; + public static final String LATEST_CODEC = "Lucene53"; static { Deprecated annotation = PostingsFormat.forName(LATEST_POSTINGS_FORMAT).getClass().getAnnotation(Deprecated.class); @@ -138,36 +138,6 @@ public class Lucene { return SegmentInfos.readCommit(directory, segmentsFileName); } - /** - * Tries to acquire the {@link IndexWriter#WRITE_LOCK_NAME} on the given directory. The returned lock must be closed once - * the lock is released. If the lock can't be obtained a {@link LockObtainFailedException} is thrown. - * This method uses the {@link IndexWriterConfig#getDefaultWriteLockTimeout()} as the lock timeout. - */ - public static Lock acquireWriteLock(Directory directory) throws IOException { - return acquireLock(directory, IndexWriter.WRITE_LOCK_NAME, IndexWriterConfig.getDefaultWriteLockTimeout()); - } - - /** - * Tries to acquire a lock on the given directory. The returned lock must be closed once - * the lock is released. If the lock can't be obtained a {@link LockObtainFailedException} is thrown. - */ - @SuppressForbidden(reason = "this method uses trappy Directory#makeLock API") - public static Lock acquireLock(Directory directory, String lockName, long timeout) throws IOException { - final Lock writeLock = directory.makeLock(lockName); - boolean success = false; - try { - if (writeLock.obtain(timeout) == false) { - throw new LockObtainFailedException("failed to obtain lock: " + writeLock); - } - success = true; - } finally { - if (success == false) { - writeLock.close(); - } - } - return writeLock; - } - /** * This method removes all files from the given directory that are not referenced by the given segments file. * This method will open an IndexWriter and relies on index file deleter to remove all unreferenced files. Segment files @@ -179,7 +149,7 @@ public class Lucene { */ public static SegmentInfos pruneUnreferencedFiles(String segmentsFileName, Directory directory) throws IOException { final SegmentInfos si = readSegmentInfos(segmentsFileName, directory); - try (Lock writeLock = acquireWriteLock(directory)) { + try (Lock writeLock = directory.obtainLock(IndexWriter.WRITE_LOCK_NAME)) { int foundSegmentFiles = 0; for (final String file : directory.listAll()) { /** @@ -218,7 +188,7 @@ public class Lucene { * this operation fails. */ public static void cleanLuceneIndex(Directory directory) throws IOException { - try (Lock writeLock = acquireWriteLock(directory)) { + try (Lock writeLock = directory.obtainLock(IndexWriter.WRITE_LOCK_NAME)) { for (final String file : directory.listAll()) { if (file.startsWith(IndexFileNames.SEGMENTS) || file.equals(IndexFileNames.OLD_SEGMENTS_GEN)) { directory.deleteFile(file); // remove all segment_N files diff --git a/core/src/main/java/org/elasticsearch/common/lucene/all/AllTermQuery.java b/core/src/main/java/org/elasticsearch/common/lucene/all/AllTermQuery.java index a28d635a04e..9853659ca06 100644 --- a/core/src/main/java/org/elasticsearch/common/lucene/all/AllTermQuery.java +++ b/core/src/main/java/org/elasticsearch/common/lucene/all/AllTermQuery.java @@ -19,27 +19,32 @@ package org.elasticsearch.common.lucene.all; +import org.apache.lucene.analysis.payloads.PayloadHelper; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.PostingsEnum; import org.apache.lucene.index.Term; +import org.apache.lucene.index.TermContext; +import org.apache.lucene.index.TermState; import org.apache.lucene.index.Terms; +import org.apache.lucene.index.TermsEnum; +import org.apache.lucene.search.CollectionStatistics; +import org.apache.lucene.search.Explanation; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.MatchNoDocsQuery; import org.apache.lucene.search.Query; +import org.apache.lucene.search.Scorer; import org.apache.lucene.search.TermQuery; -import org.apache.lucene.search.payloads.AveragePayloadFunction; -import org.apache.lucene.search.payloads.PayloadTermQuery; +import org.apache.lucene.search.TermStatistics; +import org.apache.lucene.search.Weight; import org.apache.lucene.search.similarities.Similarity; import org.apache.lucene.search.similarities.Similarity.SimScorer; -import org.apache.lucene.search.spans.SpanWeight; -import org.apache.lucene.search.spans.TermSpans; -import org.apache.lucene.util.Bits; +import org.apache.lucene.search.similarities.Similarity.SimWeight; import org.apache.lucene.util.BytesRef; +import org.apache.lucene.util.ToStringUtils; import java.io.IOException; - -import static org.apache.lucene.analysis.payloads.PayloadHelper.decodeFloat; +import java.util.Set; /** * A term query that takes all payload boost values into account. @@ -49,78 +54,12 @@ import static org.apache.lucene.analysis.payloads.PayloadHelper.decodeFloat; * determine how the payload should be factored in, it just parses * the float and multiplies the average with the regular score. */ -public final class AllTermQuery extends PayloadTermQuery { +public final class AllTermQuery extends Query { + + private final Term term; public AllTermQuery(Term term) { - super(term, new AveragePayloadFunction()); - } - - @Override - public SpanWeight createWeight(IndexSearcher searcher, boolean needsScores) throws IOException { - // TODO: needsScores - // we should be able to just return a regular SpanTermWeight, at most here if needsScores == false? - return new AllTermWeight(this, searcher, needsScores); - } - - class AllTermWeight extends PayloadTermWeight { - - AllTermWeight(AllTermQuery query, IndexSearcher searcher, boolean needsScores) throws IOException { - super(query, searcher, needsScores); - } - - @Override - public AllTermSpanScorer scorer(LeafReaderContext context, Bits acceptDocs) throws IOException { - if (this.stats == null) { - return null; - } - // we have a custom weight class, we must check in case something is wrong with _all - Terms terms = context.reader().terms(query.getField()); - if (terms != null && terms.hasPositions() == false) { - throw new IllegalStateException("field \"" + term.field() + "\" was indexed without position data; cannot run AllTermQuery (term=" + term.text() + ")"); - } - TermSpans spans = (TermSpans) query.getSpans(context, acceptDocs, termContexts); - if (spans == null) { - return null; - } - SimScorer sloppySimScorer = similarity.simScorer(stats, context); - return new AllTermSpanScorer(spans, this, sloppySimScorer); - } - - class AllTermSpanScorer extends PayloadTermSpanScorer { - final PostingsEnum postings; - - AllTermSpanScorer(TermSpans spans, SpanWeight weight, Similarity.SimScorer docScorer) throws IOException { - super(spans, weight, docScorer); - postings = spans.getPostings(); - } - - @Override - protected void processPayload(Similarity similarity) throws IOException { - // note: similarity is ignored here (we just use decodeFloat always). - // this is the only difference between this class and PayloadTermQuery. - if (spans.isPayloadAvailable()) { - BytesRef payload = postings.getPayload(); - payloadScore += decodeFloat(payload.bytes, payload.offset); - payloadsSeen++; - } - } - } - } - - @Override - public int hashCode() { - return super.hashCode() + 1; - } - - @Override - public boolean equals(Object obj) { - if (this == obj) - return true; - if (!super.equals(obj)) - return false; - if (getClass() != obj.getClass()) - return false; - return true; + this.term = term; } @Override @@ -150,4 +89,144 @@ public final class AllTermQuery extends PayloadTermQuery { return this; } + @Override + public Weight createWeight(IndexSearcher searcher, boolean needsScores) throws IOException { + if (needsScores == false) { + return new TermQuery(term).createWeight(searcher, needsScores); + } + final TermContext termStates = TermContext.build(searcher.getTopReaderContext(), term); + final CollectionStatistics collectionStats = searcher.collectionStatistics(term.field()); + final TermStatistics termStats = searcher.termStatistics(term, termStates); + final Similarity similarity = searcher.getSimilarity(needsScores); + final SimWeight stats = similarity.computeWeight(getBoost(), collectionStats, termStats); + return new Weight(this) { + + @Override + public final float getValueForNormalization() throws IOException { + return stats.getValueForNormalization(); + } + + @Override + public final void normalize(float norm, float topLevelBoost) { + stats.normalize(norm, topLevelBoost); + } + + @Override + public void extractTerms(Set terms) { + terms.add(term); + } + + @Override + public Explanation explain(LeafReaderContext context, int doc) throws IOException { + AllTermScorer scorer = scorer(context); + if (scorer != null) { + int newDoc = scorer.advance(doc); + if (newDoc == doc) { + float score = scorer.score(); + float freq = scorer.freq(); + SimScorer docScorer = similarity.simScorer(stats, context); + Explanation freqExplanation = Explanation.match(freq, "termFreq=" + freq); + Explanation termScoreExplanation = docScorer.explain(doc, freqExplanation); + Explanation payloadBoostExplanation = Explanation.match(scorer.payloadBoost(), "payloadBoost=" + scorer.payloadBoost()); + return Explanation.match( + score, + "weight(" + getQuery() + " in " + doc + ") [" + + similarity.getClass().getSimpleName() + "], product of:", + termScoreExplanation, payloadBoostExplanation); + } + } + return Explanation.noMatch("no matching term"); + } + + @Override + public AllTermScorer scorer(LeafReaderContext context) throws IOException { + final Terms terms = context.reader().terms(term.field()); + if (terms == null) { + return null; + } + final TermsEnum termsEnum = terms.iterator(); + if (termsEnum == null) { + return null; + } + final TermState state = termStates.get(context.ord); + termsEnum.seekExact(term.bytes(), state); + PostingsEnum docs = termsEnum.postings(null, PostingsEnum.PAYLOADS); + assert docs != null; + return new AllTermScorer(this, docs, similarity.simScorer(stats, context)); + } + + }; + } + + private static class AllTermScorer extends Scorer { + + final PostingsEnum postings; + final Similarity.SimScorer docScorer; + int doc = -1; + float payloadBoost; + + AllTermScorer(Weight weight, PostingsEnum postings, Similarity.SimScorer docScorer) { + super(weight); + this.postings = postings; + this.docScorer = docScorer; + } + + float payloadBoost() throws IOException { + if (doc != docID()) { + final int freq = postings.freq(); + payloadBoost = 0; + for (int i = 0; i < freq; ++i) { + postings.nextPosition(); + final BytesRef payload = postings.getPayload(); + float boost; + if (payload == null) { + boost = 1; + } else { + assert payload.length == 4; + boost = PayloadHelper.decodeFloat(payload.bytes, payload.offset); + } + payloadBoost += boost; + } + payloadBoost /= freq; + doc = docID(); + } + return payloadBoost; + } + + @Override + public float score() throws IOException { + return payloadBoost() * docScorer.score(postings.docID(), postings.freq()); + } + + @Override + public int freq() throws IOException { + return postings.freq(); + } + + @Override + public int docID() { + return postings.docID(); + } + + @Override + public int nextDoc() throws IOException { + return postings.nextDoc(); + } + + @Override + public int advance(int target) throws IOException { + return postings.advance(target); + } + + @Override + public long cost() { + return postings.cost(); + } + } + + @Override + public String toString(String field) { + return new TermQuery(term).toString(field) + ToStringUtils.boost(getBoost()); + } + } diff --git a/core/src/main/java/org/elasticsearch/common/lucene/index/FilterableTermsEnum.java b/core/src/main/java/org/elasticsearch/common/lucene/index/FilterableTermsEnum.java index 8e8c47c4614..47ed0dbe3f4 100644 --- a/core/src/main/java/org/elasticsearch/common/lucene/index/FilterableTermsEnum.java +++ b/core/src/main/java/org/elasticsearch/common/lucene/index/FilterableTermsEnum.java @@ -25,11 +25,12 @@ import org.apache.lucene.index.PostingsEnum; import org.apache.lucene.index.Terms; import org.apache.lucene.index.TermsEnum; import org.apache.lucene.search.DocIdSetIterator; +import org.apache.lucene.search.FilteredDocIdSetIterator; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.Query; -import org.apache.lucene.search.Scorer; import org.apache.lucene.search.Weight; import org.apache.lucene.util.BitDocIdSet; +import org.apache.lucene.util.BitSet; import org.apache.lucene.util.Bits; import org.apache.lucene.util.BytesRef; import org.elasticsearch.common.Nullable; @@ -96,25 +97,32 @@ public class FilterableTermsEnum extends TermsEnum { if (termsEnum == null) { continue; } - Bits bits = null; + BitSet bits = null; if (weight != null) { - // we want to force apply deleted docs - Scorer docs = weight.scorer(context, context.reader().getLiveDocs()); + DocIdSetIterator docs = weight.scorer(context); if (docs == null) { // fully filtered, none matching, no need to iterate on this continue; } + // we want to force apply deleted docs + final Bits liveDocs = context.reader().getLiveDocs(); + if (liveDocs != null) { + docs = new FilteredDocIdSetIterator(docs) { + @Override + protected boolean match(int doc) { + return liveDocs.get(doc); + } + }; + } + BitDocIdSet.Builder builder = new BitDocIdSet.Builder(context.reader().maxDoc()); builder.or(docs); bits = builder.build().bits(); // Count how many docs are in our filtered set // TODO make this lazy-loaded only for those that need it? - docs = weight.scorer(context, context.reader().getLiveDocs()); - while (docs.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) { - numDocs++; - } + numDocs += bits.cardinality(); } enums.add(new Holder(termsEnum, bits)); } @@ -147,10 +155,13 @@ public class FilterableTermsEnum extends TermsEnum { totalTermFreq += leafTotalTermFreq; } } else { - final PostingsEnum docsEnum = anEnum.docsEnum = anEnum.termsEnum.postings(anEnum.bits, anEnum.docsEnum, docsEnumFlag); + final PostingsEnum docsEnum = anEnum.docsEnum = anEnum.termsEnum.postings(anEnum.docsEnum, docsEnumFlag); // 2 choices for performing same heavy loop - one attempts to calculate totalTermFreq and other does not if (docsEnumFlag == PostingsEnum.FREQS) { for (int docId = docsEnum.nextDoc(); docId != DocIdSetIterator.NO_MORE_DOCS; docId = docsEnum.nextDoc()) { + if (anEnum.bits != null && anEnum.bits.get(docId) == false) { + continue; + } docFreq++; // docsEnum.freq() returns 1 if doc indexed with IndexOptions.DOCS_ONLY so no way of knowing if value // is really 1 or unrecorded when filtering like this @@ -158,6 +169,9 @@ public class FilterableTermsEnum extends TermsEnum { } } else { for (int docId = docsEnum.nextDoc(); docId != DocIdSetIterator.NO_MORE_DOCS; docId = docsEnum.nextDoc()) { + if (anEnum.bits != null && anEnum.bits.get(docId) == false) { + continue; + } // docsEnum.freq() behaviour is undefined if docsEnumFlag==PostingsEnum.FLAG_NONE so don't bother with call docFreq++; } @@ -204,7 +218,7 @@ public class FilterableTermsEnum extends TermsEnum { } @Override - public PostingsEnum postings(Bits liveDocs, PostingsEnum reuse, int flags) throws IOException { + public PostingsEnum postings(PostingsEnum reuse, int flags) throws IOException { throw new UnsupportedOperationException(UNSUPPORTED_MESSAGE); } diff --git a/core/src/main/java/org/elasticsearch/common/lucene/search/FilteredCollector.java b/core/src/main/java/org/elasticsearch/common/lucene/search/FilteredCollector.java index b1c1b87fd3a..5bb92235044 100644 --- a/core/src/main/java/org/elasticsearch/common/lucene/search/FilteredCollector.java +++ b/core/src/main/java/org/elasticsearch/common/lucene/search/FilteredCollector.java @@ -44,7 +44,7 @@ public class FilteredCollector implements Collector { @Override public LeafCollector getLeafCollector(LeafReaderContext context) throws IOException { - final Scorer filterScorer = filter.scorer(context, null); + final Scorer filterScorer = filter.scorer(context); final LeafCollector in = collector.getLeafCollector(context); final Bits bits = Lucene.asSequentialAccessBits(context.reader().maxDoc(), filterScorer); diff --git a/core/src/main/java/org/elasticsearch/common/lucene/search/MoreLikeThisQuery.java b/core/src/main/java/org/elasticsearch/common/lucene/search/MoreLikeThisQuery.java index 1a5d2687565..6bbd97bfccb 100644 --- a/core/src/main/java/org/elasticsearch/common/lucene/search/MoreLikeThisQuery.java +++ b/core/src/main/java/org/elasticsearch/common/lucene/search/MoreLikeThisQuery.java @@ -166,7 +166,7 @@ public class MoreLikeThisQuery extends Query { BooleanQuery bq = new BooleanQuery(); if (this.likeFields != null) { Query mltQuery = mlt.like(this.likeFields); - Queries.applyMinimumShouldMatch((BooleanQuery) mltQuery, minimumShouldMatch); + mltQuery = Queries.applyMinimumShouldMatch((BooleanQuery) mltQuery, minimumShouldMatch); bq.add(mltQuery, BooleanClause.Occur.SHOULD); } if (this.likeText != null) { @@ -176,7 +176,7 @@ public class MoreLikeThisQuery extends Query { } //LUCENE 4 UPGRADE this mapps the 3.6 behavior (only use the first field) Query mltQuery = mlt.like(moreLikeFields[0], readers); - Queries.applyMinimumShouldMatch((BooleanQuery) mltQuery, minimumShouldMatch); + mltQuery = Queries.applyMinimumShouldMatch((BooleanQuery) mltQuery, minimumShouldMatch); bq.add(mltQuery, BooleanClause.Occur.SHOULD); } diff --git a/core/src/main/java/org/elasticsearch/common/lucene/search/Queries.java b/core/src/main/java/org/elasticsearch/common/lucene/search/Queries.java index 19b94fc6d72..9e49f79921d 100644 --- a/core/src/main/java/org/elasticsearch/common/lucene/search/Queries.java +++ b/core/src/main/java/org/elasticsearch/common/lucene/search/Queries.java @@ -107,9 +107,9 @@ public class Queries { return false; } - public static void applyMinimumShouldMatch(BooleanQuery query, @Nullable String minimumShouldMatch) { + public static BooleanQuery applyMinimumShouldMatch(BooleanQuery query, @Nullable String minimumShouldMatch) { if (minimumShouldMatch == null) { - return; + return query; } int optionalClauses = 0; for (BooleanClause c : query.clauses()) { @@ -120,8 +120,17 @@ public class Queries { int msm = calculateMinShouldMatch(optionalClauses, minimumShouldMatch); if (0 < msm) { - query.setMinimumNumberShouldMatch(msm); + BooleanQuery.Builder builder = new BooleanQuery.Builder(); + builder.setDisableCoord(query.isCoordDisabled()); + for (BooleanClause clause : query) { + builder.add(clause); + } + builder.setMinimumNumberShouldMatch(msm); + BooleanQuery bq = builder.build(); + bq.setBoost(query.getBoost()); + query = bq; } + return query; } private static Pattern spaceAroundLessThanPattern = Pattern.compile("(\\s+<\\s*)|(\\s*<\\s+)"); diff --git a/core/src/main/java/org/elasticsearch/common/lucene/search/XMoreLikeThis.java b/core/src/main/java/org/elasticsearch/common/lucene/search/XMoreLikeThis.java index 4999f2a7cf4..4275647df0a 100644 --- a/core/src/main/java/org/elasticsearch/common/lucene/search/XMoreLikeThis.java +++ b/core/src/main/java/org/elasticsearch/common/lucene/search/XMoreLikeThis.java @@ -852,7 +852,7 @@ public final class XMoreLikeThis { continue; } - final PostingsEnum docs = termsEnum.postings(null, null); + final PostingsEnum docs = termsEnum.postings(null); int freq = 0; while(docs != null && docs.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) { freq += docs.freq(); diff --git a/core/src/main/java/org/elasticsearch/common/lucene/search/function/FieldValueFactorFunction.java b/core/src/main/java/org/elasticsearch/common/lucene/search/function/FieldValueFactorFunction.java index 488c5fec603..cb2babb574f 100644 --- a/core/src/main/java/org/elasticsearch/common/lucene/search/function/FieldValueFactorFunction.java +++ b/core/src/main/java/org/elasticsearch/common/lucene/search/function/FieldValueFactorFunction.java @@ -22,6 +22,7 @@ package org.elasticsearch.common.lucene.search.function; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.search.Explanation; import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.index.fielddata.FieldData; import org.elasticsearch.index.fielddata.IndexNumericFieldData; import org.elasticsearch.index.fielddata.SortedNumericDoubleValues; @@ -54,7 +55,13 @@ public class FieldValueFactorFunction extends ScoreFunction { @Override public LeafScoreFunction getLeafScoreFunction(LeafReaderContext ctx) { - final SortedNumericDoubleValues values = this.indexFieldData.load(ctx).getDoubleValues(); + final SortedNumericDoubleValues values; + if(indexFieldData == null) { + values = FieldData.emptySortedNumericDoubles(ctx.reader().maxDoc()); + } else { + values = this.indexFieldData.load(ctx).getDoubleValues(); + } + return new LeafScoreFunction() { @Override diff --git a/core/src/main/java/org/elasticsearch/common/lucene/search/function/FiltersFunctionScoreQuery.java b/core/src/main/java/org/elasticsearch/common/lucene/search/function/FiltersFunctionScoreQuery.java index 91e93bec943..e95da1d8731 100644 --- a/core/src/main/java/org/elasticsearch/common/lucene/search/function/FiltersFunctionScoreQuery.java +++ b/core/src/main/java/org/elasticsearch/common/lucene/search/function/FiltersFunctionScoreQuery.java @@ -169,11 +169,11 @@ public class FiltersFunctionScoreQuery extends Query { } @Override - public Scorer scorer(LeafReaderContext context, Bits acceptDocs) throws IOException { + public Scorer scorer(LeafReaderContext context) throws IOException { // we ignore scoreDocsInOrder parameter, because we need to score in // order if documents are scored with a script. The // ShardLookup depends on in order scoring. - Scorer subQueryScorer = subQueryWeight.scorer(context, acceptDocs); + Scorer subQueryScorer = subQueryWeight.scorer(context); if (subQueryScorer == null) { return null; } @@ -182,7 +182,7 @@ public class FiltersFunctionScoreQuery extends Query { for (int i = 0; i < filterFunctions.length; i++) { FilterFunction filterFunction = filterFunctions[i]; functions[i] = filterFunction.function.getLeafScoreFunction(context); - Scorer filterScorer = filterWeights[i].scorer(context, null); // no need to apply accepted docs + Scorer filterScorer = filterWeights[i].scorer(context); docSets[i] = Lucene.asSequentialAccessBits(context.reader().maxDoc(), filterScorer); } return new FiltersFunctionFactorScorer(this, subQueryScorer, scoreMode, filterFunctions, maxBoost, functions, docSets, combineFunction, minScore, needsScores); @@ -208,7 +208,7 @@ public class FiltersFunctionScoreQuery extends Query { } Bits docSet = Lucene.asSequentialAccessBits(context.reader().maxDoc(), - filterWeights[i].scorer(context, null)); + filterWeights[i].scorer(context)); if (docSet.get(doc)) { Explanation functionExplanation = filterFunction.function.getLeafScoreFunction(context).explainScore(doc, subQueryExpl); double factor = functionExplanation.getValue(); diff --git a/core/src/main/java/org/elasticsearch/common/lucene/search/function/FunctionScoreQuery.java b/core/src/main/java/org/elasticsearch/common/lucene/search/function/FunctionScoreQuery.java index b3ad83e4d21..448eda8154c 100644 --- a/core/src/main/java/org/elasticsearch/common/lucene/search/function/FunctionScoreQuery.java +++ b/core/src/main/java/org/elasticsearch/common/lucene/search/function/FunctionScoreQuery.java @@ -128,8 +128,8 @@ public class FunctionScoreQuery extends Query { } @Override - public Scorer scorer(LeafReaderContext context, Bits acceptDocs) throws IOException { - Scorer subQueryScorer = subQueryWeight.scorer(context, acceptDocs); + public Scorer scorer(LeafReaderContext context) throws IOException { + Scorer subQueryScorer = subQueryWeight.scorer(context); if (subQueryScorer == null) { return null; } diff --git a/core/src/main/java/org/elasticsearch/common/lucene/uid/PerThreadIDAndVersionLookup.java b/core/src/main/java/org/elasticsearch/common/lucene/uid/PerThreadIDAndVersionLookup.java index bfde845f299..85bb5fe8904 100644 --- a/core/src/main/java/org/elasticsearch/common/lucene/uid/PerThreadIDAndVersionLookup.java +++ b/core/src/main/java/org/elasticsearch/common/lucene/uid/PerThreadIDAndVersionLookup.java @@ -105,9 +105,13 @@ final class PerThreadIDAndVersionLookup { // Use NDV to retrieve the version, in which case we only need PostingsEnum: // there may be more than one matching docID, in the case of nested docs, so we want the last one: - PostingsEnum docs = docsEnums[seg] = termsEnums[seg].postings(liveDocs[seg], docsEnums[seg], 0); + PostingsEnum docs = docsEnums[seg] = termsEnums[seg].postings(docsEnums[seg], 0); + final Bits liveDocs = this.liveDocs[seg]; int docID = DocIdSetIterator.NO_MORE_DOCS; for (int d = docs.nextDoc(); d != DocIdSetIterator.NO_MORE_DOCS; d = docs.nextDoc()) { + if (liveDocs != null && liveDocs.get(d) == false) { + continue; + } docID = d; } @@ -125,9 +129,13 @@ final class PerThreadIDAndVersionLookup { } // ... but used to be stored as payloads; in this case we must use PostingsEnum - PostingsEnum dpe = posEnums[seg] = termsEnums[seg].postings(liveDocs[seg], posEnums[seg], PostingsEnum.PAYLOADS); + PostingsEnum dpe = posEnums[seg] = termsEnums[seg].postings(posEnums[seg], PostingsEnum.PAYLOADS); assert dpe != null; // terms has payloads + final Bits liveDocs = this.liveDocs[seg]; for (int d = dpe.nextDoc(); d != DocIdSetIterator.NO_MORE_DOCS; d = dpe.nextDoc()) { + if (liveDocs != null && liveDocs.get(d) == false) { + continue; + } dpe.nextPosition(); final BytesRef payload = dpe.getPayload(); if (payload != null && payload.length == 8) { diff --git a/core/src/main/java/org/elasticsearch/common/util/MultiDataPathUpgrader.java b/core/src/main/java/org/elasticsearch/common/util/MultiDataPathUpgrader.java index 82ed6f2bde8..b26039141c2 100644 --- a/core/src/main/java/org/elasticsearch/common/util/MultiDataPathUpgrader.java +++ b/core/src/main/java/org/elasticsearch/common/util/MultiDataPathUpgrader.java @@ -33,7 +33,6 @@ import org.elasticsearch.common.io.FileSystemUtils; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.Loggers; -import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.env.ShardLock; @@ -86,7 +85,7 @@ public class MultiDataPathUpgrader { ShardStateMetaData.FORMAT.write(loaded, loaded.version, targetPath.getShardStatePath()); Files.createDirectories(targetPath.resolveIndex()); try (SimpleFSDirectory directory = new SimpleFSDirectory(targetPath.resolveIndex())) { - try (final Lock lock = Lucene.acquireWriteLock(directory)) { + try (final Lock lock = directory.obtainLock(IndexWriter.WRITE_LOCK_NAME)) { upgradeFiles(shard, targetPath, targetPath.resolveIndex(), ShardPath.INDEX_FOLDER_NAME, paths); } catch (LockObtainFailedException ex) { throw new IllegalStateException("Can't obtain lock on " + targetPath.resolveIndex(), ex); diff --git a/core/src/main/java/org/elasticsearch/common/util/concurrent/AtomicArray.java b/core/src/main/java/org/elasticsearch/common/util/concurrent/AtomicArray.java index 38953c51b02..2278220d9dd 100644 --- a/core/src/main/java/org/elasticsearch/common/util/concurrent/AtomicArray.java +++ b/core/src/main/java/org/elasticsearch/common/util/concurrent/AtomicArray.java @@ -19,10 +19,10 @@ package org.elasticsearch.common.util.concurrent; -import com.google.common.collect.ImmutableList; import org.elasticsearch.ElasticsearchGenerationException; import java.util.ArrayList; +import java.util.Collections; import java.util.List; import java.util.concurrent.atomic.AtomicReferenceArray; @@ -93,7 +93,7 @@ public class AtomicArray { public List> asList() { if (nonNullList == null) { if (array == null || array.length() == 0) { - nonNullList = ImmutableList.of(); + nonNullList = Collections.emptyList(); } else { List> list = new ArrayList<>(array.length()); for (int i = 0; i < array.length(); i++) { diff --git a/core/src/main/java/org/elasticsearch/common/xcontent/support/filtering/FilteringJsonGenerator.java b/core/src/main/java/org/elasticsearch/common/xcontent/support/filtering/FilteringJsonGenerator.java index 2748b4b5097..b70a1ae9365 100644 --- a/core/src/main/java/org/elasticsearch/common/xcontent/support/filtering/FilteringJsonGenerator.java +++ b/core/src/main/java/org/elasticsearch/common/xcontent/support/filtering/FilteringJsonGenerator.java @@ -23,7 +23,6 @@ import com.fasterxml.jackson.core.Base64Variant; import com.fasterxml.jackson.core.JsonGenerator; import com.fasterxml.jackson.core.JsonParser; import com.fasterxml.jackson.core.SerializableString; -import com.google.common.collect.ImmutableList; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.xcontent.json.BaseJsonGenerator; @@ -34,6 +33,8 @@ import java.io.OutputStream; import java.math.BigDecimal; import java.math.BigInteger; import java.util.ArrayDeque; +import java.util.ArrayList; +import java.util.Collections; import java.util.List; import java.util.Queue; @@ -61,7 +62,7 @@ public class FilteringJsonGenerator extends BaseJsonGenerator { public FilteringJsonGenerator(JsonGenerator generator, String[] filters) { super(generator); - ImmutableList.Builder builder = ImmutableList.builder(); + List builder = new ArrayList<>(); if (filters != null) { for (String filter : filters) { String[] matcher = Strings.delimitedListToStringArray(filter, "."); @@ -72,7 +73,7 @@ public class FilteringJsonGenerator extends BaseJsonGenerator { } // Creates a root context that matches all filtering rules - this.context = get(null, null, builder.build()); + this.context = get(null, null, Collections.unmodifiableList(builder)); } /** diff --git a/core/src/main/java/org/elasticsearch/env/NodeEnvironment.java b/core/src/main/java/org/elasticsearch/env/NodeEnvironment.java index e4cdeb94db7..c75e26fbf36 100644 --- a/core/src/main/java/org/elasticsearch/env/NodeEnvironment.java +++ b/core/src/main/java/org/elasticsearch/env/NodeEnvironment.java @@ -30,13 +30,11 @@ import org.apache.lucene.util.IOUtils; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.common.Nullable; import org.elasticsearch.common.SuppressForbidden; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.FileSystemUtils; import org.elasticsearch.common.io.PathUtils; -import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.index.Index; @@ -154,7 +152,7 @@ public class NodeEnvironment extends AbstractComponent implements Closeable { try (Directory luceneDir = FSDirectory.open(dir, NativeFSLockFactory.INSTANCE)) { logger.trace("obtaining node lock on {} ...", dir.toAbsolutePath()); try { - locks[dirIndex] = Lucene.acquireLock(luceneDir, NODE_LOCK_FILENAME, 0); + locks[dirIndex] = luceneDir.obtainLock(NODE_LOCK_FILENAME); nodePaths[dirIndex] = new NodePath(dir, environment); localNodeId = possibleLockId; } catch (LockObtainFailedException ex) { @@ -324,7 +322,7 @@ public class NodeEnvironment extends AbstractComponent implements Closeable { dirs[i] = new SimpleFSDirectory(p, FsDirectoryService.buildLockFactory(indexSettings)); // create a lock for the "write.lock" file try { - locks[i] = Lucene.acquireWriteLock(dirs[i]); + locks[i] = dirs[i].obtainLock(IndexWriter.WRITE_LOCK_NAME); } catch (IOException ex) { throw new LockObtainFailedException("unable to acquire " + IndexWriter.WRITE_LOCK_NAME + " for " + p); @@ -730,7 +728,7 @@ public class NodeEnvironment extends AbstractComponent implements Closeable { if (!closed.get() && locks != null) { for (Lock lock : locks) { try { - assert lock.isLocked() : "Lock: " + lock + "is not locked"; + lock.ensureValid(); } catch (IOException e) { logger.warn("lock assertion failed", e); return false; diff --git a/core/src/main/java/org/elasticsearch/gateway/DanglingIndicesState.java b/core/src/main/java/org/elasticsearch/gateway/DanglingIndicesState.java index 869f2dc3e2f..1ccdd55dad7 100644 --- a/core/src/main/java/org/elasticsearch/gateway/DanglingIndicesState.java +++ b/core/src/main/java/org/elasticsearch/gateway/DanglingIndicesState.java @@ -28,6 +28,8 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.env.NodeEnvironment; +import java.util.ArrayList; +import java.util.Collections; import java.util.Map; import java.util.Set; @@ -139,7 +141,7 @@ public class DanglingIndicesState extends AbstractComponent { return; } try { - allocateDangledIndices.allocateDangled(ImmutableList.copyOf(danglingIndices.values()), new LocalAllocateDangledIndices.Listener() { + allocateDangledIndices.allocateDangled(Collections.unmodifiableCollection(new ArrayList<>(danglingIndices.values())), new LocalAllocateDangledIndices.Listener() { @Override public void onResponse(LocalAllocateDangledIndices.AllocateDangledResponse response) { logger.trace("allocated dangled"); diff --git a/core/src/main/java/org/elasticsearch/index/analysis/Analysis.java b/core/src/main/java/org/elasticsearch/index/analysis/Analysis.java index a58b0f20cf8..d97b22d18cb 100644 --- a/core/src/main/java/org/elasticsearch/index/analysis/Analysis.java +++ b/core/src/main/java/org/elasticsearch/index/analysis/Analysis.java @@ -46,6 +46,7 @@ import org.apache.lucene.analysis.hu.HungarianAnalyzer; import org.apache.lucene.analysis.hy.ArmenianAnalyzer; import org.apache.lucene.analysis.id.IndonesianAnalyzer; import org.apache.lucene.analysis.it.ItalianAnalyzer; +import org.apache.lucene.analysis.lt.LithuanianAnalyzer; import org.apache.lucene.analysis.lv.LatvianAnalyzer; import org.apache.lucene.analysis.nl.DutchAnalyzer; import org.apache.lucene.analysis.no.NorwegianAnalyzer; @@ -145,6 +146,7 @@ public class Analysis { .put("_irish_", IrishAnalyzer.getDefaultStopSet()) .put("_italian_", ItalianAnalyzer.getDefaultStopSet()) .put("_latvian_", LatvianAnalyzer.getDefaultStopSet()) + .put("_lithuanian_", LithuanianAnalyzer.getDefaultStopSet()) .put("_norwegian_", NorwegianAnalyzer.getDefaultStopSet()) .put("_persian_", PersianAnalyzer.getDefaultStopSet()) .put("_portuguese_", PortugueseAnalyzer.getDefaultStopSet()) diff --git a/core/src/main/java/org/elasticsearch/index/analysis/AnalysisModule.java b/core/src/main/java/org/elasticsearch/index/analysis/AnalysisModule.java index 21667cf6128..393f1c96317 100644 --- a/core/src/main/java/org/elasticsearch/index/analysis/AnalysisModule.java +++ b/core/src/main/java/org/elasticsearch/index/analysis/AnalysisModule.java @@ -492,6 +492,7 @@ public class AnalysisModule extends AbstractModule { analyzersBindings.processAnalyzer("irish", IrishAnalyzerProvider.class); analyzersBindings.processAnalyzer("italian", ItalianAnalyzerProvider.class); analyzersBindings.processAnalyzer("latvian", LatvianAnalyzerProvider.class); + analyzersBindings.processAnalyzer("lithuanian", LithuanianAnalyzerProvider.class); analyzersBindings.processAnalyzer("norwegian", NorwegianAnalyzerProvider.class); analyzersBindings.processAnalyzer("persian", PersianAnalyzerProvider.class); analyzersBindings.processAnalyzer("portuguese", PortugueseAnalyzerProvider.class); diff --git a/core/src/main/java/org/elasticsearch/index/analysis/LithuanianAnalyzerProvider.java b/core/src/main/java/org/elasticsearch/index/analysis/LithuanianAnalyzerProvider.java new file mode 100644 index 00000000000..bac14a479c9 --- /dev/null +++ b/core/src/main/java/org/elasticsearch/index/analysis/LithuanianAnalyzerProvider.java @@ -0,0 +1,50 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.analysis; + +import org.apache.lucene.analysis.lt.LithuanianAnalyzer; +import org.apache.lucene.analysis.util.CharArraySet; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.inject.assistedinject.Assisted; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.env.Environment; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.settings.IndexSettings; + +/** + * Provider for {@link LithuanianAnalyzer} + */ +public class LithuanianAnalyzerProvider extends AbstractIndexAnalyzerProvider { + + private final LithuanianAnalyzer analyzer; + + @Inject + public LithuanianAnalyzerProvider(Index index, @IndexSettings Settings indexSettings, Environment env, @Assisted String name, @Assisted Settings settings) { + super(index, indexSettings, name, settings); + analyzer = new LithuanianAnalyzer(Analysis.parseStopWords(env, settings, LithuanianAnalyzer.getDefaultStopSet()), + Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET)); + analyzer.setVersion(version); + } + + @Override + public LithuanianAnalyzer get() { + return this.analyzer; + } +} \ No newline at end of file diff --git a/core/src/main/java/org/elasticsearch/index/analysis/NamedAnalyzer.java b/core/src/main/java/org/elasticsearch/index/analysis/NamedAnalyzer.java index f3a50390667..25ff8f96834 100644 --- a/core/src/main/java/org/elasticsearch/index/analysis/NamedAnalyzer.java +++ b/core/src/main/java/org/elasticsearch/index/analysis/NamedAnalyzer.java @@ -22,6 +22,8 @@ package org.elasticsearch.index.analysis; import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.DelegatingAnalyzerWrapper; +import java.util.Objects; + /** * Named analyzer is an analyzer wrapper around an actual analyzer ({@link #analyzer} that is associated * with a name ({@link #name()}. @@ -104,4 +106,17 @@ public class NamedAnalyzer extends DelegatingAnalyzerWrapper { throw new IllegalStateException("NamedAnalyzer cannot be wrapped with a wrapper, only a delegator"); } }; + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (!(o instanceof NamedAnalyzer)) return false; + NamedAnalyzer that = (NamedAnalyzer) o; + return Objects.equals(name, that.name); + } + + @Override + public int hashCode() { + return Objects.hash(name); + } } diff --git a/core/src/main/java/org/elasticsearch/index/analysis/StemmerTokenFilterFactory.java b/core/src/main/java/org/elasticsearch/index/analysis/StemmerTokenFilterFactory.java index 1b2018f07b7..84ebe4087af 100644 --- a/core/src/main/java/org/elasticsearch/index/analysis/StemmerTokenFilterFactory.java +++ b/core/src/main/java/org/elasticsearch/index/analysis/StemmerTokenFilterFactory.java @@ -185,6 +185,9 @@ public class StemmerTokenFilterFactory extends AbstractTokenFilterFactory { } else if ("latvian".equalsIgnoreCase(language)) { return new LatvianStemFilter(tokenStream); + } else if ("lithuanian".equalsIgnoreCase(language)) { + return new SnowballFilter(tokenStream, new LithuanianStemmer()); + // Norwegian (Bokmål) stemmers } else if ("norwegian".equalsIgnoreCase(language)) { return new SnowballFilter(tokenStream, new NorwegianStemmer()); diff --git a/core/src/main/java/org/elasticsearch/index/codec/CodecService.java b/core/src/main/java/org/elasticsearch/index/codec/CodecService.java index 2ba4aeb4c9d..aa29f79ba77 100644 --- a/core/src/main/java/org/elasticsearch/index/codec/CodecService.java +++ b/core/src/main/java/org/elasticsearch/index/codec/CodecService.java @@ -22,8 +22,8 @@ package org.elasticsearch.index.codec; import com.google.common.collect.ImmutableMap; import org.apache.lucene.codecs.Codec; -import org.apache.lucene.codecs.lucene50.Lucene50Codec; import org.apache.lucene.codecs.lucene50.Lucene50StoredFieldsFormat.Mode; +import org.apache.lucene.codecs.lucene53.Lucene53Codec; import org.elasticsearch.common.collect.MapBuilder; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; @@ -65,8 +65,8 @@ public class CodecService extends AbstractIndexComponent { this.mapperService = mapperService; MapBuilder codecs = MapBuilder.newMapBuilder(); if (mapperService == null) { - codecs.put(DEFAULT_CODEC, new Lucene50Codec()); - codecs.put(BEST_COMPRESSION_CODEC, new Lucene50Codec(Mode.BEST_COMPRESSION)); + codecs.put(DEFAULT_CODEC, new Lucene53Codec()); + codecs.put(BEST_COMPRESSION_CODEC, new Lucene53Codec(Mode.BEST_COMPRESSION)); } else { codecs.put(DEFAULT_CODEC, new PerFieldMappingPostingFormatCodec(Mode.BEST_SPEED, mapperService, logger)); diff --git a/core/src/main/java/org/elasticsearch/index/codec/PerFieldMappingPostingFormatCodec.java b/core/src/main/java/org/elasticsearch/index/codec/PerFieldMappingPostingFormatCodec.java index b8f1276d23d..b8e44bdadb6 100644 --- a/core/src/main/java/org/elasticsearch/index/codec/PerFieldMappingPostingFormatCodec.java +++ b/core/src/main/java/org/elasticsearch/index/codec/PerFieldMappingPostingFormatCodec.java @@ -21,11 +21,10 @@ package org.elasticsearch.index.codec; import org.apache.lucene.codecs.Codec; import org.apache.lucene.codecs.PostingsFormat; -import org.apache.lucene.codecs.lucene50.Lucene50Codec; import org.apache.lucene.codecs.lucene50.Lucene50StoredFieldsFormat; +import org.apache.lucene.codecs.lucene53.Lucene53Codec; import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.lucene.Lucene; -import org.elasticsearch.index.mapper.FieldMapper; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.core.CompletionFieldMapper; @@ -39,7 +38,7 @@ import org.elasticsearch.index.mapper.core.CompletionFieldMapper; * configured for a specific field the default postings format is used. */ // LUCENE UPGRADE: make sure to move to a new codec depending on the lucene version -public class PerFieldMappingPostingFormatCodec extends Lucene50Codec { +public class PerFieldMappingPostingFormatCodec extends Lucene53Codec { private final ESLogger logger; private final MapperService mapperService; diff --git a/core/src/main/java/org/elasticsearch/index/codec/postingsformat/BloomFilterPostingsFormat.java b/core/src/main/java/org/elasticsearch/index/codec/postingsformat/BloomFilterPostingsFormat.java index 71ff9e27dbf..9b29c9cc815 100644 --- a/core/src/main/java/org/elasticsearch/index/codec/postingsformat/BloomFilterPostingsFormat.java +++ b/core/src/main/java/org/elasticsearch/index/codec/postingsformat/BloomFilterPostingsFormat.java @@ -323,8 +323,8 @@ public class BloomFilterPostingsFormat extends PostingsFormat { @Override - public PostingsEnum postings(Bits liveDocs, PostingsEnum reuse, int flags) throws IOException { - return getDelegate().postings(liveDocs, reuse, flags); + public PostingsEnum postings(PostingsEnum reuse, int flags) throws IOException { + return getDelegate().postings(reuse, flags); } } @@ -384,7 +384,7 @@ public class BloomFilterPostingsFormat extends PostingsFormat { bloomFilters.put(fieldInfo, bloomFilter); } // Make sure there's at least one doc for this term: - postings = termsEnum.postings(null, postings, 0); + postings = termsEnum.postings(postings, 0); if (postings.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) { bloomFilter.put(term); } diff --git a/core/src/main/java/org/elasticsearch/index/engine/EngineConfig.java b/core/src/main/java/org/elasticsearch/index/engine/EngineConfig.java index 91725899c17..6e6b0cfda69 100644 --- a/core/src/main/java/org/elasticsearch/index/engine/EngineConfig.java +++ b/core/src/main/java/org/elasticsearch/index/engine/EngineConfig.java @@ -20,7 +20,6 @@ package org.elasticsearch.index.engine; import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.codecs.Codec; -import org.apache.lucene.index.IndexWriterConfig; import org.apache.lucene.index.MergePolicy; import org.apache.lucene.search.QueryCache; import org.apache.lucene.search.QueryCachingPolicy; @@ -30,7 +29,6 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.index.codec.CodecService; import org.elasticsearch.index.deletionpolicy.SnapshotDeletionPolicy; import org.elasticsearch.index.indexing.ShardIndexingService; @@ -56,7 +54,6 @@ public final class EngineConfig { private volatile ByteSizeValue indexingBufferSize; private volatile ByteSizeValue versionMapSize; private volatile String versionMapSizeSetting; - private final int indexConcurrency; private volatile boolean compoundOnFlush = true; private long gcDeletesInMillis = DEFAULT_GC_DELETES.millis(); private volatile boolean enableGcDeletes = true; @@ -79,13 +76,6 @@ public final class EngineConfig { private final QueryCachingPolicy queryCachingPolicy; private final IndexSearcherWrappingService wrappingService; - /** - * Index setting for index concurrency / number of threadstates in the indexwriter. - * The default is depending on the number of CPUs in the system. We use a 0.65 the number of CPUs or at least {@value org.apache.lucene.index.IndexWriterConfig#DEFAULT_MAX_THREAD_STATES} - * This setting is not realtime updateable - */ - public static final String INDEX_CONCURRENCY_SETTING = "index.index_concurrency"; - /** * Index setting for compound file on flush. This setting is realtime updateable. */ @@ -161,7 +151,6 @@ public final class EngineConfig { this.wrappingService = wrappingService; this.optimizeAutoGenerateId = indexSettings.getAsBoolean(EngineConfig.INDEX_OPTIMIZE_AUTOGENERATED_ID_SETTING, false); this.compoundOnFlush = indexSettings.getAsBoolean(EngineConfig.INDEX_COMPOUND_ON_FLUSH, compoundOnFlush); - this.indexConcurrency = indexSettings.getAsInt(EngineConfig.INDEX_CONCURRENCY_SETTING, Math.max(IndexWriterConfig.DEFAULT_MAX_THREAD_STATES, (int) (EsExecutors.boundedNumberOfProcessors(indexSettings) * 0.65))); codecName = indexSettings.get(EngineConfig.INDEX_CODEC_SETTING, EngineConfig.DEFAULT_CODEC_NAME); indexingBufferSize = indexSettings.getAsBytesSize(INDEX_BUFFER_SIZE_SETTING, DEFAULT_INDEX_BUFFER_SIZE); gcDeletesInMillis = indexSettings.getAsTime(INDEX_GC_DELETES_SETTING, EngineConfig.DEFAULT_GC_DELETES).millis(); @@ -235,16 +224,6 @@ public final class EngineConfig { return indexingBufferSize; } - /** - * Returns the index concurrency that directly translates into the number of thread states used in the engines - * {@code IndexWriter}. - * - * @see org.apache.lucene.index.IndexWriterConfig#getMaxThreadStates() - */ - public int getIndexConcurrency() { - return indexConcurrency; - } - /** * Returns true iff flushed segments should be written as compound file system. Defaults to true */ diff --git a/core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java b/core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java index 5bd733c48f1..b32a5e06321 100644 --- a/core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java +++ b/core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java @@ -136,7 +136,7 @@ public class InternalEngine extends Engine { this.indexingService = engineConfig.getIndexingService(); this.warmer = engineConfig.getWarmer(); mergeScheduler = scheduler = new EngineMergeScheduler(engineConfig.getShardId(), engineConfig.getIndexSettings(), engineConfig.getMergeSchedulerConfig()); - this.dirtyLocks = new Object[engineConfig.getIndexConcurrency() * 50]; // we multiply it to have enough... + this.dirtyLocks = new Object[Runtime.getRuntime().availableProcessors() * 10]; // we multiply it to have enough... for (int i = 0; i < dirtyLocks.length; i++) { dirtyLocks[i] = new Object(); } @@ -1038,7 +1038,6 @@ public class InternalEngine extends Engine { iwc.setMergePolicy(mergePolicy); iwc.setSimilarity(engineConfig.getSimilarity()); iwc.setRAMBufferSizeMB(engineConfig.getIndexingBufferSize().mbFrac()); - iwc.setMaxThreadStates(engineConfig.getIndexConcurrency()); iwc.setCodec(engineConfig.getCodec()); /* We set this timeout to a highish value to work around * the default poll interval in the Lucene lock that is diff --git a/core/src/main/java/org/elasticsearch/index/fielddata/ScriptDocValues.java b/core/src/main/java/org/elasticsearch/index/fielddata/ScriptDocValues.java index 254b8b344e9..da120657806 100644 --- a/core/src/main/java/org/elasticsearch/index/fielddata/ScriptDocValues.java +++ b/core/src/main/java/org/elasticsearch/index/fielddata/ScriptDocValues.java @@ -19,7 +19,6 @@ package org.elasticsearch.index.fielddata; -import com.google.common.collect.ImmutableList; import org.apache.lucene.index.SortedNumericDocValues; import org.apache.lucene.util.BytesRef; @@ -30,6 +29,7 @@ import org.joda.time.DateTimeZone; import org.joda.time.MutableDateTime; import java.util.AbstractList; +import java.util.Collections; import java.util.List; @@ -85,7 +85,7 @@ public interface ScriptDocValues extends List { @Override public List getValues() { - return ImmutableList.copyOf(this); + return Collections.unmodifiableList(this); } @Override @@ -128,7 +128,7 @@ public interface ScriptDocValues extends List { @Override public List getValues() { - return ImmutableList.copyOf(this); + return Collections.unmodifiableList(this); } public MutableDateTime getDate() { @@ -175,7 +175,7 @@ public interface ScriptDocValues extends List { @Override public List getValues() { - return ImmutableList.copyOf(this); + return Collections.unmodifiableList(this); } @Override @@ -238,7 +238,7 @@ public interface ScriptDocValues extends List { @Override public List getValues() { - return ImmutableList.copyOf(this); + return Collections.unmodifiableList(this); } @Override diff --git a/core/src/main/java/org/elasticsearch/index/fielddata/ordinals/OrdinalsBuilder.java b/core/src/main/java/org/elasticsearch/index/fielddata/ordinals/OrdinalsBuilder.java index 5fe9a4c388d..fa7eef6e6b2 100644 --- a/core/src/main/java/org/elasticsearch/index/fielddata/ordinals/OrdinalsBuilder.java +++ b/core/src/main/java/org/elasticsearch/index/fielddata/ordinals/OrdinalsBuilder.java @@ -470,7 +470,7 @@ public final class OrdinalsBuilder implements Closeable { public BytesRef next() throws IOException { BytesRef ref; if ((ref = termsEnum.next()) != null) { - docsEnum = termsEnum.postings(null, docsEnum, PostingsEnum.NONE); + docsEnum = termsEnum.postings(docsEnum, PostingsEnum.NONE); nextOrdinal(); int docId; while ((docId = docsEnum.nextDoc()) != DocIdSetIterator.NO_MORE_DOCS) { diff --git a/core/src/main/java/org/elasticsearch/index/fielddata/plain/PagedBytesIndexFieldData.java b/core/src/main/java/org/elasticsearch/index/fielddata/plain/PagedBytesIndexFieldData.java index d5ef33ca82e..dce5e403e2a 100644 --- a/core/src/main/java/org/elasticsearch/index/fielddata/plain/PagedBytesIndexFieldData.java +++ b/core/src/main/java/org/elasticsearch/index/fielddata/plain/PagedBytesIndexFieldData.java @@ -97,7 +97,7 @@ public class PagedBytesIndexFieldData extends AbstractIndexOrdinalsFieldData { final long termOrd = builder.nextOrdinal(); assert termOrd == termOrdToBytesOffset.size(); termOrdToBytesOffset.add(bytes.copyUsingLengthPrefix(term)); - docsEnum = termsEnum.postings(null, docsEnum, PostingsEnum.NONE); + docsEnum = termsEnum.postings(docsEnum, PostingsEnum.NONE); for (int docId = docsEnum.nextDoc(); docId != DocIdSetIterator.NO_MORE_DOCS; docId = docsEnum.nextDoc()) { builder.addDoc(docId); } diff --git a/core/src/main/java/org/elasticsearch/index/fielddata/plain/ParentChildIndexFieldData.java b/core/src/main/java/org/elasticsearch/index/fielddata/plain/ParentChildIndexFieldData.java index 1b4b2d5dd67..ae7d4986e47 100644 --- a/core/src/main/java/org/elasticsearch/index/fielddata/plain/ParentChildIndexFieldData.java +++ b/core/src/main/java/org/elasticsearch/index/fielddata/plain/ParentChildIndexFieldData.java @@ -194,7 +194,7 @@ public class ParentChildIndexFieldData extends AbstractIndexFieldData 0; if (size == 1) { // Can't use 'reuse' since we don't know to which previous TermsEnum it belonged to. - return states.get(stateSlots.get(0)).termsEnum.postings(liveDocs, null, flags); + return states.get(stateSlots.get(0)).termsEnum.postings(null, flags); } else { List docsEnums = new ArrayList<>(stateSlots.size()); for (int i = 0; i < stateSlots.size(); i++) { - docsEnums.add(states.get(stateSlots.get(i)).termsEnum.postings(liveDocs, null, flags)); + docsEnums.add(states.get(stateSlots.get(i)).termsEnum.postings(null, flags)); } return new CompoundDocsEnum(docsEnums); } diff --git a/core/src/main/java/org/elasticsearch/index/get/ShardGetService.java b/core/src/main/java/org/elasticsearch/index/get/ShardGetService.java index 632284d2eb8..8331e0af7fe 100644 --- a/core/src/main/java/org/elasticsearch/index/get/ShardGetService.java +++ b/core/src/main/java/org/elasticsearch/index/get/ShardGetService.java @@ -19,7 +19,6 @@ package org.elasticsearch.index.get; -import com.google.common.collect.ImmutableList; import com.google.common.collect.Sets; import org.apache.lucene.index.Term; @@ -49,6 +48,7 @@ import org.elasticsearch.search.lookup.SearchLookup; import java.io.IOException; import java.util.Arrays; +import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.List; @@ -258,7 +258,7 @@ public final class ShardGetService extends AbstractIndexShardComponent { if (value instanceof List) { fields.put(field, new GetField(field, (List) value)); } else { - fields.put(field, new GetField(field, ImmutableList.of(value))); + fields.put(field, new GetField(field, Collections.singletonList(value))); } } } @@ -383,7 +383,7 @@ public final class ShardGetService extends AbstractIndexShardComponent { if (value instanceof List) { fields.put(field, new GetField(field, (List) value)); } else { - fields.put(field, new GetField(field, ImmutableList.of(value))); + fields.put(field, new GetField(field, Collections.singletonList(value))); } } } diff --git a/core/src/main/java/org/elasticsearch/index/mapper/DocumentMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/DocumentMapper.java index 80f2603cf32..ff90dd7af98 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/DocumentMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/DocumentMapper.java @@ -23,6 +23,7 @@ import com.google.common.base.Function; import com.google.common.base.Preconditions; import com.google.common.collect.ImmutableMap; import com.google.common.collect.Maps; + import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.search.DocIdSet; import org.apache.lucene.search.DocIdSetIterator; @@ -199,7 +200,7 @@ public class DocumentMapper implements ToXContent { List newFieldMappers = new ArrayList<>(); for (MetadataFieldMapper metadataMapper : this.mapping.metadataMappers) { if (metadataMapper instanceof FieldMapper) { - newFieldMappers.add((FieldMapper) metadataMapper); + newFieldMappers.add(metadataMapper); } } MapperUtils.collect(this.mapping.root, newObjectMappers, newFieldMappers); @@ -452,7 +453,7 @@ public class DocumentMapper implements ToXContent { public Map transformSourceAsMap(Map sourceAsMap) { try { // We use the ctx variable and the _source name to be consistent with the update api. - ExecutableScript executable = scriptService.executable(script, ScriptContext.Standard.MAPPING); + ExecutableScript executable = scriptService.executable(script, ScriptContext.Standard.MAPPING, null); Map ctx = new HashMap<>(1); ctx.put("_source", sourceAsMap); executable.setNextVar("ctx", ctx); diff --git a/core/src/main/java/org/elasticsearch/index/mapper/FieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/FieldMapper.java index 5f6893ed2aa..2d2ac6c75f8 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/FieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/FieldMapper.java @@ -22,7 +22,6 @@ package org.elasticsearch.index.mapper; import com.carrotsearch.hppc.cursors.ObjectCursor; import com.carrotsearch.hppc.cursors.ObjectObjectCursor; import com.google.common.base.Function; -import com.google.common.collect.ImmutableList; import com.google.common.collect.Iterators; import org.apache.lucene.document.Field; import org.apache.lucene.document.FieldType; @@ -695,9 +694,9 @@ public abstract class FieldMapper extends Mapper { */ public static class CopyTo { - private final ImmutableList copyToFields; + private final List copyToFields; - private CopyTo(ImmutableList copyToFields) { + private CopyTo(List copyToFields) { this.copyToFields = copyToFields; } @@ -713,7 +712,7 @@ public abstract class FieldMapper extends Mapper { } public static class Builder { - private final ImmutableList.Builder copyToBuilders = ImmutableList.builder(); + private final List copyToBuilders = new ArrayList<>(); public Builder add(String field) { copyToBuilders.add(field); @@ -721,7 +720,7 @@ public abstract class FieldMapper extends Mapper { } public CopyTo build() { - return new CopyTo(copyToBuilders.build()); + return new CopyTo(Collections.unmodifiableList(copyToBuilders)); } } diff --git a/core/src/main/java/org/elasticsearch/index/mapper/MappedFieldType.java b/core/src/main/java/org/elasticsearch/index/mapper/MappedFieldType.java index e55f3b1dd32..9b8d4e85850 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/MappedFieldType.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/MappedFieldType.java @@ -192,13 +192,24 @@ public abstract class MappedFieldType extends FieldType { public boolean equals(Object o) { if (!super.equals(o)) return false; MappedFieldType fieldType = (MappedFieldType) o; + // check similarity first because we need to check the name, and it might be null + // TODO: SimilarityProvider should have equals? + if (similarity == null || fieldType.similarity == null) { + if (similarity != fieldType.similarity) { + return false; + } + } else { + if (Objects.equals(similarity.name(), fieldType.similarity.name()) == false) { + return false; + } + } + return boost == fieldType.boost && docValues == fieldType.docValues && Objects.equals(names, fieldType.names) && Objects.equals(indexAnalyzer, fieldType.indexAnalyzer) && Objects.equals(searchAnalyzer, fieldType.searchAnalyzer) && Objects.equals(searchQuoteAnalyzer(), fieldType.searchQuoteAnalyzer()) && - Objects.equals(similarity, fieldType.similarity) && Objects.equals(normsLoading, fieldType.normsLoading) && Objects.equals(fieldDataType, fieldType.fieldDataType) && Objects.equals(nullValue, fieldType.nullValue) && @@ -207,10 +218,11 @@ public abstract class MappedFieldType extends FieldType { @Override public int hashCode() { - return Objects.hash(super.hashCode(), names, boost, docValues, indexAnalyzer, searchAnalyzer, searchQuoteAnalyzer, similarity, normsLoading, fieldDataType, nullValue, nullValueAsString); + return Objects.hash(super.hashCode(), names, boost, docValues, indexAnalyzer, searchAnalyzer, searchQuoteAnalyzer, + similarity == null ? null : similarity.name(), normsLoading, fieldDataType, nullValue, nullValueAsString); } -// norelease: we need to override freeze() and add safety checks that all settings are actually set + // norelease: we need to override freeze() and add safety checks that all settings are actually set /** Returns the name of this type, as would be specified in mapping properties */ public abstract String typeName(); @@ -234,51 +246,48 @@ public abstract class MappedFieldType extends FieldType { boolean mergeWithIndexed = other.indexOptions() != IndexOptions.NONE; // TODO: should be validating if index options go "up" (but "down" is ok) if (indexed != mergeWithIndexed || tokenized() != other.tokenized()) { - conflicts.add("mapper [" + names().fullName() + "] has different index values"); + conflicts.add("mapper [" + names().fullName() + "] has different [index] values"); } if (stored() != other.stored()) { - conflicts.add("mapper [" + names().fullName() + "] has different store values"); + conflicts.add("mapper [" + names().fullName() + "] has different [store] values"); } if (hasDocValues() == false && other.hasDocValues()) { // don't add conflict if this mapper has doc values while the mapper to merge doesn't since doc values are implicitly set // when the doc_values field data format is configured - conflicts.add("mapper [" + names().fullName() + "] has different doc_values values"); + conflicts.add("mapper [" + names().fullName() + "] has different [doc_values] values, cannot change from disabled to enabled"); } if (omitNorms() && !other.omitNorms()) { - conflicts.add("mapper [" + names().fullName() + "] cannot enable norms (`norms.enabled`)"); - } - if (tokenized() != other.tokenized()) { - conflicts.add("mapper [" + names().fullName() + "] has different tokenize values"); + conflicts.add("mapper [" + names().fullName() + "] has different [omit_norms] values, cannot change from disable to enabled"); } if (storeTermVectors() != other.storeTermVectors()) { - conflicts.add("mapper [" + names().fullName() + "] has different store_term_vector values"); + conflicts.add("mapper [" + names().fullName() + "] has different [store_term_vector] values"); } if (storeTermVectorOffsets() != other.storeTermVectorOffsets()) { - conflicts.add("mapper [" + names().fullName() + "] has different store_term_vector_offsets values"); + conflicts.add("mapper [" + names().fullName() + "] has different [store_term_vector_offsets] values"); } if (storeTermVectorPositions() != other.storeTermVectorPositions()) { - conflicts.add("mapper [" + names().fullName() + "] has different store_term_vector_positions values"); + conflicts.add("mapper [" + names().fullName() + "] has different [store_term_vector_positions] values"); } if (storeTermVectorPayloads() != other.storeTermVectorPayloads()) { - conflicts.add("mapper [" + names().fullName() + "] has different store_term_vector_payloads values"); + conflicts.add("mapper [" + names().fullName() + "] has different [store_term_vector_payloads] values"); } // null and "default"-named index analyzers both mean the default is used if (indexAnalyzer() == null || "default".equals(indexAnalyzer().name())) { if (other.indexAnalyzer() != null && "default".equals(other.indexAnalyzer().name()) == false) { - conflicts.add("mapper [" + names().fullName() + "] has different analyzer"); + conflicts.add("mapper [" + names().fullName() + "] has different [analyzer]"); } } else if (other.indexAnalyzer() == null || "default".equals(other.indexAnalyzer().name())) { - conflicts.add("mapper [" + names().fullName() + "] has different analyzer"); + conflicts.add("mapper [" + names().fullName() + "] has different [analyzer]"); } else if (indexAnalyzer().name().equals(other.indexAnalyzer().name()) == false) { - conflicts.add("mapper [" + names().fullName() + "] has different analyzer"); + conflicts.add("mapper [" + names().fullName() + "] has different [analyzer]"); } if (!names().indexName().equals(other.names().indexName())) { - conflicts.add("mapper [" + names().fullName() + "] has different index_name"); + conflicts.add("mapper [" + names().fullName() + "] has different [index_name]"); } if (Objects.equals(similarity(), other.similarity()) == false) { - conflicts.add("mapper [" + names().fullName() + "] has different similarity"); + conflicts.add("mapper [" + names().fullName() + "] has different [similarity]"); } if (strict) { @@ -289,11 +298,14 @@ public abstract class MappedFieldType extends FieldType { conflicts.add("mapper [" + names().fullName() + "] is used by multiple types. Set update_all_types to true to update [boost] across all types."); } if (normsLoading() != other.normsLoading()) { - conflicts.add("mapper [" + names().fullName() + "] is used by multiple types. Set update_all_types to true to update [norms].loading across all types."); + conflicts.add("mapper [" + names().fullName() + "] is used by multiple types. Set update_all_types to true to update [norms.loading] across all types."); } if (Objects.equals(searchAnalyzer(), other.searchAnalyzer()) == false) { conflicts.add("mapper [" + names().fullName() + "] is used by multiple types. Set update_all_types to true to update [search_analyzer] across all types."); } + if (Objects.equals(searchQuoteAnalyzer(), other.searchQuoteAnalyzer()) == false) { + conflicts.add("mapper [" + names().fullName() + "] is used by multiple types. Set update_all_types to true to update [search_quote_analyzer] across all types."); + } if (Objects.equals(fieldDataType(), other.fieldDataType()) == false) { conflicts.add("mapper [" + names().fullName() + "] is used by multiple types. Set update_all_types to true to update [fielddata] across all types."); } diff --git a/core/src/main/java/org/elasticsearch/index/mapper/MapperService.java b/core/src/main/java/org/elasticsearch/index/mapper/MapperService.java index 4300e4cc858..857b2078033 100755 --- a/core/src/main/java/org/elasticsearch/index/mapper/MapperService.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/MapperService.java @@ -22,7 +22,6 @@ package org.elasticsearch.index.mapper; import com.carrotsearch.hppc.ObjectHashSet; import com.google.common.base.Function; import com.google.common.base.Predicate; -import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; import com.google.common.collect.ImmutableSet; import com.google.common.collect.Iterators; @@ -31,8 +30,12 @@ import org.apache.lucene.analysis.DelegatingAnalyzerWrapper; import org.apache.lucene.index.IndexOptions; import org.apache.lucene.index.Term; import org.apache.lucene.queries.TermsQuery; -import org.apache.lucene.search.*; +import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.BooleanClause.Occur; +import org.apache.lucene.search.BooleanQuery; +import org.apache.lucene.search.ConstantScoreQuery; +import org.apache.lucene.search.Query; +import org.apache.lucene.search.TermQuery; import org.apache.lucene.util.BytesRef; import org.elasticsearch.ElasticsearchGenerationException; import org.elasticsearch.Version; @@ -59,7 +62,13 @@ import org.elasticsearch.script.ScriptService; import java.io.Closeable; import java.io.IOException; -import java.util.*; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.Iterator; +import java.util.List; +import java.util.Map; import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.locks.ReentrantReadWriteLock; @@ -519,7 +528,7 @@ public class MapperService extends AbstractIndexComponent implements Closeable { public Collection simpleMatchToIndexNames(String pattern) { if (Regex.isSimpleMatchPattern(pattern) == false) { // no wildcards - return ImmutableList.of(pattern); + return Collections.singletonList(pattern); } return fieldTypes.simpleMatchToIndexNames(pattern); } diff --git a/core/src/main/java/org/elasticsearch/index/mapper/ObjectMappers.java b/core/src/main/java/org/elasticsearch/index/mapper/ObjectMappers.java deleted file mode 100644 index 52d4ea33f44..00000000000 --- a/core/src/main/java/org/elasticsearch/index/mapper/ObjectMappers.java +++ /dev/null @@ -1,124 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.index.mapper; - -import com.google.common.collect.ImmutableList; -import com.google.common.collect.UnmodifiableIterator; -import org.elasticsearch.index.mapper.object.ObjectMapper; - -/** - * A holder for several {@link org.elasticsearch.index.mapper.object.ObjectMapper}. - */ -public class ObjectMappers implements Iterable { - - private final ImmutableList objectMappers; - private final boolean hasNested; - - public ObjectMappers() { - this(ImmutableList.of()); - } - - public ObjectMappers(ObjectMapper objectMapper) { - this(new ObjectMapper[]{objectMapper}); - } - - public ObjectMappers(ObjectMapper[] objectMappers) { - this(ImmutableList.copyOf(objectMappers)); - } - - public ObjectMappers(ImmutableList objectMappers) { - this.objectMappers = objectMappers; - boolean hasNested = false; - for (ObjectMapper objectMapper : objectMappers) { - if (objectMapper.nested().isNested()) { - hasNested = true; - break; - } - } - this.hasNested = hasNested; - } - - /** - * Is one of the object mappers has a nested mapping set? - */ - public boolean hasNested() { - return this.hasNested; - } - - public ObjectMapper mapper() { - if (objectMappers.isEmpty()) { - return null; - } - return objectMappers.get(0); - } - - public boolean isEmpty() { - return objectMappers.isEmpty(); - } - - public ImmutableList mappers() { - return this.objectMappers; - } - - @Override - public UnmodifiableIterator iterator() { - return objectMappers.iterator(); - } - - /** - * Concats and returns a new {@link org.elasticsearch.index.mapper.ObjectMappers}. - */ - public ObjectMappers concat(ObjectMapper mapper) { - return new ObjectMappers(new ImmutableList.Builder().addAll(objectMappers).add(mapper).build()); - } - - /** - * Concats and returns a new {@link org.elasticsearch.index.mapper.ObjectMappers}. - */ - public ObjectMappers concat(ObjectMappers mappers) { - return new ObjectMappers(new ImmutableList.Builder().addAll(objectMappers).addAll(mappers).build()); - } - - public ObjectMappers remove(Iterable mappers) { - ImmutableList.Builder builder = new ImmutableList.Builder<>(); - for (ObjectMapper objectMapper : objectMappers) { - boolean found = false; - for (ObjectMapper mapper : mappers) { - if (objectMapper == mapper) { // identify equality - found = true; - } - } - if (!found) { - builder.add(objectMapper); - } - } - return new ObjectMappers(builder.build()); - } - - public ObjectMappers remove(ObjectMapper mapper) { - ImmutableList.Builder builder = new ImmutableList.Builder<>(); - for (ObjectMapper objectMapper : objectMappers) { - if (objectMapper != mapper) { // identify equality - builder.add(objectMapper); - } - } - return new ObjectMappers(builder.build()); - } -} diff --git a/core/src/main/java/org/elasticsearch/index/mapper/core/BinaryFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/core/BinaryFieldMapper.java index d225b3f6ae6..78d038526b3 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/core/BinaryFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/core/BinaryFieldMapper.java @@ -134,6 +134,15 @@ public class BinaryFieldMapper extends FieldMapper { return CONTENT_TYPE; } + @Override + public void checkCompatibility(MappedFieldType fieldType, List conflicts, boolean strict) { + super.checkCompatibility(fieldType, conflicts, strict); + BinaryFieldType other = (BinaryFieldType)fieldType; + if (tryUncompressing() != other.tryUncompressing()) { + conflicts.add("mapper [" + names().fullName() + "] has different [try_uncompressing] (IMPOSSIBLE)"); + } + } + public boolean tryUncompressing() { return tryUncompressing; } diff --git a/core/src/main/java/org/elasticsearch/index/mapper/core/CompletionFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/core/CompletionFieldMapper.java index dab41b4a78a..7eb01fd352e 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/core/CompletionFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/core/CompletionFieldMapper.java @@ -57,6 +57,7 @@ import java.util.Iterator; import java.util.List; import java.util.Locale; import java.util.Map; +import java.util.Objects; import java.util.Set; import java.util.SortedMap; @@ -237,6 +238,27 @@ public class CompletionFieldMapper extends FieldMapper { this.contextMapping = ref.contextMapping; } + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (!(o instanceof CompletionFieldType)) return false; + if (!super.equals(o)) return false; + CompletionFieldType fieldType = (CompletionFieldType) o; + return analyzingSuggestLookupProvider.getPreserveSep() == fieldType.analyzingSuggestLookupProvider.getPreserveSep() && + analyzingSuggestLookupProvider.getPreservePositionsIncrements() == fieldType.analyzingSuggestLookupProvider.getPreservePositionsIncrements() && + analyzingSuggestLookupProvider.hasPayloads() == fieldType.analyzingSuggestLookupProvider.hasPayloads() && + Objects.equals(getContextMapping(), fieldType.getContextMapping()); + } + + @Override + public int hashCode() { + return Objects.hash(super.hashCode(), + analyzingSuggestLookupProvider.getPreserveSep(), + analyzingSuggestLookupProvider.getPreservePositionsIncrements(), + analyzingSuggestLookupProvider.hasPayloads(), + getContextMapping()); + } + @Override public CompletionFieldType clone() { return new CompletionFieldType(this); @@ -252,16 +274,16 @@ public class CompletionFieldMapper extends FieldMapper { super.checkCompatibility(fieldType, conflicts, strict); CompletionFieldType other = (CompletionFieldType)fieldType; if (analyzingSuggestLookupProvider.hasPayloads() != other.analyzingSuggestLookupProvider.hasPayloads()) { - conflicts.add("mapper [" + names().fullName() + "] has different payload values"); + conflicts.add("mapper [" + names().fullName() + "] has different [payload] values"); } if (analyzingSuggestLookupProvider.getPreservePositionsIncrements() != other.analyzingSuggestLookupProvider.getPreservePositionsIncrements()) { - conflicts.add("mapper [" + names().fullName() + "] has different 'preserve_position_increments' values"); + conflicts.add("mapper [" + names().fullName() + "] has different [preserve_position_increments] values"); } if (analyzingSuggestLookupProvider.getPreserveSep() != other.analyzingSuggestLookupProvider.getPreserveSep()) { - conflicts.add("mapper [" + names().fullName() + "] has different 'preserve_separators' values"); + conflicts.add("mapper [" + names().fullName() + "] has different [preserve_separators] values"); } if(!ContextMapping.mappingsAreEqual(getContextMapping(), other.getContextMapping())) { - conflicts.add("mapper [" + names().fullName() + "] has different 'context_mapping' values"); + conflicts.add("mapper [" + names().fullName() + "] has different [context_mapping] values"); } } diff --git a/core/src/main/java/org/elasticsearch/index/mapper/geo/GeoPointFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/geo/GeoPointFieldMapper.java index 8addceff7e0..c958998fcf6 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/geo/GeoPointFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/geo/GeoPointFieldMapper.java @@ -350,20 +350,26 @@ public class GeoPointFieldMapper extends FieldMapper implements ArrayValueMapper super.checkCompatibility(fieldType, conflicts, strict); GeoPointFieldType other = (GeoPointFieldType)fieldType; if (isLatLonEnabled() != other.isLatLonEnabled()) { - conflicts.add("mapper [" + names().fullName() + "] has different lat_lon"); + conflicts.add("mapper [" + names().fullName() + "] has different [lat_lon]"); } if (isGeohashEnabled() != other.isGeohashEnabled()) { - conflicts.add("mapper [" + names().fullName() + "] has different geohash"); + conflicts.add("mapper [" + names().fullName() + "] has different [geohash]"); } if (geohashPrecision() != other.geohashPrecision()) { - conflicts.add("mapper [" + names().fullName() + "] has different geohash_precision"); + conflicts.add("mapper [" + names().fullName() + "] has different [geohash_precision]"); } if (isGeohashPrefixEnabled() != other.isGeohashPrefixEnabled()) { - conflicts.add("mapper [" + names().fullName() + "] has different geohash_prefix"); + conflicts.add("mapper [" + names().fullName() + "] has different [geohash_prefix]"); } if (isLatLonEnabled() && other.isLatLonEnabled() && latFieldType().numericPrecisionStep() != other.latFieldType().numericPrecisionStep()) { - conflicts.add("mapper [" + names().fullName() + "] has different precision_step"); + conflicts.add("mapper [" + names().fullName() + "] has different [precision_step]"); + } + if (ignoreMalformed() != other.ignoreMalformed()) { + conflicts.add("mapper [" + names().fullName() + "] has different [ignore_malformed]"); + } + if (coerce() != other.coerce()) { + conflicts.add("mapper [" + names().fullName() + "] has different [coerce]"); } } diff --git a/core/src/main/java/org/elasticsearch/index/mapper/geo/GeoShapeFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/geo/GeoShapeFieldMapper.java index 3e7aa39e42e..c28285f95a5 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/geo/GeoShapeFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/geo/GeoShapeFieldMapper.java @@ -280,21 +280,30 @@ public class GeoShapeFieldMapper extends FieldMapper { GeoShapeFieldType other = (GeoShapeFieldType)fieldType; // prevent user from changing strategies if (strategyName().equals(other.strategyName()) == false) { - conflicts.add("mapper [" + names().fullName() + "] has different strategy"); + conflicts.add("mapper [" + names().fullName() + "] has different [strategy]"); } // prevent user from changing trees (changes encoding) if (tree().equals(other.tree()) == false) { - conflicts.add("mapper [" + names().fullName() + "] has different tree"); + conflicts.add("mapper [" + names().fullName() + "] has different [tree]"); } // TODO we should allow this, but at the moment levels is used to build bookkeeping variables // in lucene's SpatialPrefixTree implementations, need a patch to correct that first if (treeLevels() != other.treeLevels()) { - conflicts.add("mapper [" + names().fullName() + "] has different tree_levels"); + conflicts.add("mapper [" + names().fullName() + "] has different [tree_levels]"); } if (precisionInMeters() != other.precisionInMeters()) { - conflicts.add("mapper [" + names().fullName() + "] has different precision"); + conflicts.add("mapper [" + names().fullName() + "] has different [precision]"); + } + + if (strict) { + if (orientation() != other.orientation()) { + conflicts.add("mapper [" + names().fullName() + "] is used by multiple types. Set update_all_types to true to update [orientation] across all types."); + } + if (distanceErrorPct() != other.distanceErrorPct()) { + conflicts.add("mapper [" + names().fullName() + "] is used by multiple types. Set update_all_types to true to update [distance_error_pct] across all types."); + } } } diff --git a/core/src/main/java/org/elasticsearch/index/mapper/internal/FieldNamesFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/internal/FieldNamesFieldMapper.java index 53c07c41309..ac2ef99c20b 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/internal/FieldNamesFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/internal/FieldNamesFieldMapper.java @@ -167,6 +167,7 @@ public class FieldNamesFieldMapper extends MetadataFieldMapper { @Override public void checkCompatibility(MappedFieldType fieldType, List conflicts, boolean strict) { + super.checkCompatibility(fieldType, conflicts, strict); if (strict) { FieldNamesFieldType other = (FieldNamesFieldType)fieldType; if (isEnabled() != other.isEnabled()) { diff --git a/core/src/main/java/org/elasticsearch/index/query/BoolQueryBuilder.java b/core/src/main/java/org/elasticsearch/index/query/BoolQueryBuilder.java index f76aab9a045..9a15f79e6b9 100644 --- a/core/src/main/java/org/elasticsearch/index/query/BoolQueryBuilder.java +++ b/core/src/main/java/org/elasticsearch/index/query/BoolQueryBuilder.java @@ -260,7 +260,7 @@ public class BoolQueryBuilder extends AbstractQueryBuilder { return new MatchAllDocsQuery(); } - Queries.applyMinimumShouldMatch(booleanQuery, minimumShouldMatch); + booleanQuery = Queries.applyMinimumShouldMatch(booleanQuery, minimumShouldMatch); return adjustPureNegative ? fixNegativeQueryIfNeeded(booleanQuery) : booleanQuery; } diff --git a/core/src/main/java/org/elasticsearch/index/query/BoolQueryParser.java b/core/src/main/java/org/elasticsearch/index/query/BoolQueryParser.java index ac859ec348b..a1ff2faa794 100644 --- a/core/src/main/java/org/elasticsearch/index/query/BoolQueryParser.java +++ b/core/src/main/java/org/elasticsearch/index/query/BoolQueryParser.java @@ -19,9 +19,9 @@ package org.elasticsearch.index.query; -import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.BooleanQuery; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentParser; diff --git a/core/src/main/java/org/elasticsearch/index/query/IdsQueryParser.java b/core/src/main/java/org/elasticsearch/index/query/IdsQueryParser.java index 3403517d560..661214098cd 100644 --- a/core/src/main/java/org/elasticsearch/index/query/IdsQueryParser.java +++ b/core/src/main/java/org/elasticsearch/index/query/IdsQueryParser.java @@ -19,12 +19,12 @@ package org.elasticsearch.index.query; -import com.google.common.collect.ImmutableList; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.xcontent.XContentParser; import java.io.IOException; import java.util.ArrayList; +import java.util.Collections; import java.util.List; /** @@ -87,7 +87,7 @@ public class IdsQueryParser extends BaseQueryParser { } } else if (token.isValue()) { if ("type".equals(currentFieldName) || "_type".equals(currentFieldName)) { - types = ImmutableList.of(parser.text()); + types = Collections.singletonList(parser.text()); } else if ("boost".equals(currentFieldName)) { boost = parser.floatValue(); } else if ("_name".equals(currentFieldName)) { diff --git a/core/src/main/java/org/elasticsearch/index/query/MatchQueryParser.java b/core/src/main/java/org/elasticsearch/index/query/MatchQueryParser.java index 5d9ba66ecc0..7997af6fd21 100644 --- a/core/src/main/java/org/elasticsearch/index/query/MatchQueryParser.java +++ b/core/src/main/java/org/elasticsearch/index/query/MatchQueryParser.java @@ -158,7 +158,7 @@ public class MatchQueryParser extends BaseQueryParserTemp { } if (query instanceof BooleanQuery) { - Queries.applyMinimumShouldMatch((BooleanQuery) query, minimumShouldMatch); + query = Queries.applyMinimumShouldMatch((BooleanQuery) query, minimumShouldMatch); } else if (query instanceof ExtendedCommonTermsQuery) { ((ExtendedCommonTermsQuery)query).setLowFreqMinimumNumberShouldMatch(minimumShouldMatch); } diff --git a/core/src/main/java/org/elasticsearch/index/query/QueryStringQueryParser.java b/core/src/main/java/org/elasticsearch/index/query/QueryStringQueryParser.java index cf636c139d1..dcca13309cd 100644 --- a/core/src/main/java/org/elasticsearch/index/query/QueryStringQueryParser.java +++ b/core/src/main/java/org/elasticsearch/index/query/QueryStringQueryParser.java @@ -233,7 +233,7 @@ public class QueryStringQueryParser extends BaseQueryParserTemp { } query = fixNegativeQueryIfNeeded(query); if (query instanceof BooleanQuery) { - Queries.applyMinimumShouldMatch((BooleanQuery) query, qpSettings.minimumShouldMatch()); + query = Queries.applyMinimumShouldMatch((BooleanQuery) query, qpSettings.minimumShouldMatch()); } if (queryName != null) { context.addNamedQuery(queryName, query); diff --git a/core/src/main/java/org/elasticsearch/index/query/SimpleQueryParser.java b/core/src/main/java/org/elasticsearch/index/query/SimpleQueryParser.java index 06a3ccb7767..027f350be81 100644 --- a/core/src/main/java/org/elasticsearch/index/query/SimpleQueryParser.java +++ b/core/src/main/java/org/elasticsearch/index/query/SimpleQueryParser.java @@ -172,29 +172,26 @@ public class SimpleQueryParser extends org.apache.lucene.queryparser.simple.Simp // rewind buffer buffer.reset(); - BytesRef bytes = termAtt == null ? null : termAtt.getBytesRef(); if (numTokens == 0) { return null; } else if (numTokens == 1) { try { boolean hasNext = buffer.incrementToken(); assert hasNext == true; - termAtt.fillBytesRef(); } catch (IOException e) { // safe to ignore, because we know the number of tokens } - return new PrefixQuery(new Term(field, BytesRef.deepCopyOf(bytes))); + return new PrefixQuery(new Term(field, BytesRef.deepCopyOf(termAtt.getBytesRef()))); } else { BooleanQuery bq = new BooleanQuery(); for (int i = 0; i < numTokens; i++) { try { boolean hasNext = buffer.incrementToken(); assert hasNext == true; - termAtt.fillBytesRef(); } catch (IOException e) { // safe to ignore, because we know the number of tokens } - bq.add(new BooleanClause(new PrefixQuery(new Term(field, BytesRef.deepCopyOf(bytes))), BooleanClause.Occur.SHOULD)); + bq.add(new BooleanClause(new PrefixQuery(new Term(field, BytesRef.deepCopyOf(termAtt.getBytesRef()))), BooleanClause.Occur.SHOULD)); } return bq; } diff --git a/core/src/main/java/org/elasticsearch/index/query/SimpleQueryStringBuilder.java b/core/src/main/java/org/elasticsearch/index/query/SimpleQueryStringBuilder.java index 65727d5dbcd..9cab8998039 100644 --- a/core/src/main/java/org/elasticsearch/index/query/SimpleQueryStringBuilder.java +++ b/core/src/main/java/org/elasticsearch/index/query/SimpleQueryStringBuilder.java @@ -298,7 +298,7 @@ public class SimpleQueryStringBuilder extends AbstractQueryBuilder { bq.add(new TermQuery(new Term(indexFieldName, BytesRefs.toBytesRef(term))), BooleanClause.Occur.SHOULD); } } - Queries.applyMinimumShouldMatch(bq, minimumShouldMatch); + bq = Queries.applyMinimumShouldMatch(bq, minimumShouldMatch); query = bq; } return query; diff --git a/core/src/main/java/org/elasticsearch/index/query/functionscore/fieldvaluefactor/FieldValueFactorFunctionParser.java b/core/src/main/java/org/elasticsearch/index/query/functionscore/fieldvaluefactor/FieldValueFactorFunctionParser.java index a91d954c8f0..140f541ef7a 100644 --- a/core/src/main/java/org/elasticsearch/index/query/functionscore/fieldvaluefactor/FieldValueFactorFunctionParser.java +++ b/core/src/main/java/org/elasticsearch/index/query/functionscore/fieldvaluefactor/FieldValueFactorFunctionParser.java @@ -87,11 +87,15 @@ public class FieldValueFactorFunctionParser implements ScoreFunctionParser { SearchContext searchContext = SearchContext.current(); MappedFieldType fieldType = searchContext.mapperService().smartNameFieldType(field); + IndexNumericFieldData fieldData = null; if (fieldType == null) { - throw new ElasticsearchException("Unable to find a field mapper for field [" + field + "]"); + if(missing == null) { + throw new ElasticsearchException("Unable to find a field mapper for field [" + field + "]. No 'missing' value defined."); + } + } else { + fieldData = searchContext.fieldData().getForField(fieldType); } - return new FieldValueFactorFunction(field, boostFactor, modifier, missing, - (IndexNumericFieldData)searchContext.fieldData().getForField(fieldType)); + return new FieldValueFactorFunction(field, boostFactor, modifier, missing, fieldData); } @Override diff --git a/core/src/main/java/org/elasticsearch/index/search/MultiMatchQuery.java b/core/src/main/java/org/elasticsearch/index/search/MultiMatchQuery.java index b85dcfdb504..363e4278f8f 100644 --- a/core/src/main/java/org/elasticsearch/index/search/MultiMatchQuery.java +++ b/core/src/main/java/org/elasticsearch/index/search/MultiMatchQuery.java @@ -54,7 +54,7 @@ public class MultiMatchQuery extends MatchQuery { private Query parseAndApply(Type type, String fieldName, Object value, String minimumShouldMatch, Float boostValue) throws IOException { Query query = parse(type, fieldName, value); if (query instanceof BooleanQuery) { - Queries.applyMinimumShouldMatch((BooleanQuery) query, minimumShouldMatch); + query = Queries.applyMinimumShouldMatch((BooleanQuery) query, minimumShouldMatch); } if (boostValue != null && query != null) { query.setBoost(boostValue); diff --git a/core/src/main/java/org/elasticsearch/index/search/child/ChildrenConstantScoreQuery.java b/core/src/main/java/org/elasticsearch/index/search/child/ChildrenConstantScoreQuery.java index 55484996232..4ec1007bbb1 100644 --- a/core/src/main/java/org/elasticsearch/index/search/child/ChildrenConstantScoreQuery.java +++ b/core/src/main/java/org/elasticsearch/index/search/child/ChildrenConstantScoreQuery.java @@ -198,13 +198,13 @@ public class ChildrenConstantScoreQuery extends IndexCacheableQuery { } @Override - public Scorer scorer(LeafReaderContext context, Bits acceptDocs) throws IOException { + public Scorer scorer(LeafReaderContext context) throws IOException { if (remaining == 0) { return null; } if (shortCircuitFilter != null) { - DocIdSet docIdSet = shortCircuitFilter.getDocIdSet(context, acceptDocs); + DocIdSet docIdSet = shortCircuitFilter.getDocIdSet(context, null); if (!Lucene.isEmpty(docIdSet)) { DocIdSetIterator iterator = docIdSet.iterator(); if (iterator != null) { @@ -214,7 +214,7 @@ public class ChildrenConstantScoreQuery extends IndexCacheableQuery { return null; } - DocIdSet parentDocIdSet = this.parentFilter.getDocIdSet(context, acceptDocs); + DocIdSet parentDocIdSet = this.parentFilter.getDocIdSet(context, null); if (!Lucene.isEmpty(parentDocIdSet)) { // We can't be sure of the fact that liveDocs have been applied, so we apply it here. The "remaining" // count down (short circuit) logic will then work as expected. diff --git a/core/src/main/java/org/elasticsearch/index/search/child/ChildrenQuery.java b/core/src/main/java/org/elasticsearch/index/search/child/ChildrenQuery.java index c07ccbacbae..b869a4f7cb6 100644 --- a/core/src/main/java/org/elasticsearch/index/search/child/ChildrenQuery.java +++ b/core/src/main/java/org/elasticsearch/index/search/child/ChildrenQuery.java @@ -262,8 +262,8 @@ public final class ChildrenQuery extends IndexCacheableQuery { } @Override - public Scorer scorer(LeafReaderContext context, Bits acceptDocs) throws IOException { - DocIdSet parentsSet = parentFilter.getDocIdSet(context, acceptDocs); + public Scorer scorer(LeafReaderContext context) throws IOException { + DocIdSet parentsSet = parentFilter.getDocIdSet(context, null); if (Lucene.isEmpty(parentsSet) || remaining == 0) { return null; } diff --git a/core/src/main/java/org/elasticsearch/index/search/child/ParentConstantScoreQuery.java b/core/src/main/java/org/elasticsearch/index/search/child/ParentConstantScoreQuery.java index bad39130e75..af764bd70e7 100644 --- a/core/src/main/java/org/elasticsearch/index/search/child/ParentConstantScoreQuery.java +++ b/core/src/main/java/org/elasticsearch/index/search/child/ParentConstantScoreQuery.java @@ -22,7 +22,17 @@ import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.SortedDocValues; import org.apache.lucene.index.Term; -import org.apache.lucene.search.*; +import org.apache.lucene.search.BitsFilteredDocIdSet; +import org.apache.lucene.search.BooleanQuery; +import org.apache.lucene.search.DocIdSet; +import org.apache.lucene.search.DocIdSetIterator; +import org.apache.lucene.search.Explanation; +import org.apache.lucene.search.Filter; +import org.apache.lucene.search.FilteredDocIdSetIterator; +import org.apache.lucene.search.IndexSearcher; +import org.apache.lucene.search.Query; +import org.apache.lucene.search.Scorer; +import org.apache.lucene.search.Weight; import org.apache.lucene.util.Bits; import org.apache.lucene.util.LongBitSet; import org.elasticsearch.common.lucene.IndexCacheableQuery; @@ -162,14 +172,16 @@ public class ParentConstantScoreQuery extends IndexCacheableQuery { } @Override - public Scorer scorer(LeafReaderContext context, Bits acceptDocs) throws IOException { - DocIdSet childrenDocIdSet = childrenFilter.getDocIdSet(context, acceptDocs); + public Scorer scorer(LeafReaderContext context) throws IOException { + DocIdSet childrenDocIdSet = childrenFilter.getDocIdSet(context, null); if (Lucene.isEmpty(childrenDocIdSet)) { return null; } SortedDocValues globalValues = globalIfd.load(context).getOrdinalsValues(parentType); if (globalValues != null) { + // we forcefully apply live docs here so that deleted children don't give matching parents + childrenDocIdSet = BitsFilteredDocIdSet.wrap(childrenDocIdSet, context.reader().getLiveDocs()); DocIdSetIterator innerIterator = childrenDocIdSet.iterator(); if (innerIterator != null) { ChildrenDocIdIterator childrenDocIdIterator = new ChildrenDocIdIterator( diff --git a/core/src/main/java/org/elasticsearch/index/search/child/ParentIdsFilter.java b/core/src/main/java/org/elasticsearch/index/search/child/ParentIdsFilter.java index cc34da404bb..7743cfe0ab4 100644 --- a/core/src/main/java/org/elasticsearch/index/search/child/ParentIdsFilter.java +++ b/core/src/main/java/org/elasticsearch/index/search/child/ParentIdsFilter.java @@ -158,27 +158,25 @@ final class ParentIdsFilter extends Filter { parentIds.get(i, idSpare); BytesRef uid = Uid.createUidAsBytes(parentTypeBr, idSpare, uidSpare); if (termsEnum.seekExact(uid)) { + docsEnum = termsEnum.postings(docsEnum, PostingsEnum.NONE); int docId; - docsEnum = termsEnum.postings(acceptDocs, docsEnum, PostingsEnum.NONE); - if (result == null) { - docId = docsEnum.nextDoc(); - if (docId != DocIdSetIterator.NO_MORE_DOCS) { - // very rough heuristic that tries to get an idea of the number of documents - // in the set based on the number of parent ids that we didn't find in this segment - final int expectedCardinality = size / (i + 1); - // similar heuristic to BitDocIdSet.Builder - if (expectedCardinality >= (context.reader().maxDoc() >>> 10)) { - result = new FixedBitSet(context.reader().maxDoc()); - } else { - result = new SparseFixedBitSet(context.reader().maxDoc()); - } - } else { - continue; + for (docId = docsEnum.nextDoc(); docId != DocIdSetIterator.NO_MORE_DOCS; docId = docsEnum.nextDoc()) { + if (acceptDocs == null || acceptDocs.get(docId)) { + break; } - } else { - docId = docsEnum.nextDoc(); - if (docId == DocIdSetIterator.NO_MORE_DOCS) { - continue; + } + if (docId == DocIdSetIterator.NO_MORE_DOCS) { + continue; + } + if (result == null) { + // very rough heuristic that tries to get an idea of the number of documents + // in the set based on the number of parent ids that we didn't find in this segment + final int expectedCardinality = size / (i + 1); + // similar heuristic to BitDocIdSet.Builder + if (expectedCardinality >= (context.reader().maxDoc() >>> 10)) { + result = new FixedBitSet(context.reader().maxDoc()); + } else { + result = new SparseFixedBitSet(context.reader().maxDoc()); } } if (nonNestedDocs != null) { diff --git a/core/src/main/java/org/elasticsearch/index/search/child/ParentQuery.java b/core/src/main/java/org/elasticsearch/index/search/child/ParentQuery.java index d574066e08d..dff42416af1 100644 --- a/core/src/main/java/org/elasticsearch/index/search/child/ParentQuery.java +++ b/core/src/main/java/org/elasticsearch/index/search/child/ParentQuery.java @@ -23,6 +23,7 @@ import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.SortedDocValues; import org.apache.lucene.index.SortedSetDocValues; import org.apache.lucene.index.Term; +import org.apache.lucene.search.BitsFilteredDocIdSet; import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.Collector; import org.apache.lucene.search.DocIdSet; @@ -243,8 +244,10 @@ public class ParentQuery extends IndexCacheableQuery { } @Override - public Scorer scorer(LeafReaderContext context, Bits acceptDocs) throws IOException { - DocIdSet childrenDocSet = childrenFilter.getDocIdSet(context, acceptDocs); + public Scorer scorer(LeafReaderContext context) throws IOException { + DocIdSet childrenDocSet = childrenFilter.getDocIdSet(context, null); + // we forcefully apply live docs here so that deleted children don't give matching parents + childrenDocSet = BitsFilteredDocIdSet.wrap(childrenDocSet, context.reader().getLiveDocs()); if (Lucene.isEmpty(childrenDocSet)) { return null; } diff --git a/core/src/main/java/org/elasticsearch/index/search/geo/GeoDistanceRangeQuery.java b/core/src/main/java/org/elasticsearch/index/search/geo/GeoDistanceRangeQuery.java index e951f78cd64..c590ea08301 100644 --- a/core/src/main/java/org/elasticsearch/index/search/geo/GeoDistanceRangeQuery.java +++ b/core/src/main/java/org/elasticsearch/index/search/geo/GeoDistanceRangeQuery.java @@ -141,10 +141,10 @@ public class GeoDistanceRangeQuery extends Query { } return new ConstantScoreWeight(this) { @Override - public Scorer scorer(LeafReaderContext context, final Bits acceptDocs) throws IOException { + public Scorer scorer(LeafReaderContext context) throws IOException { final DocIdSetIterator approximation; if (boundingBoxWeight != null) { - approximation = boundingBoxWeight.scorer(context, null); + approximation = boundingBoxWeight.scorer(context); } else { approximation = DocIdSetIterator.all(context.reader().maxDoc()); } @@ -157,9 +157,6 @@ public class GeoDistanceRangeQuery extends Query { @Override public boolean matches() throws IOException { final int doc = approximation.docID(); - if (acceptDocs != null && acceptDocs.get(doc) == false) { - return false; - } values.setDocument(doc); final int length = values.count(); for (int i = 0; i < length; i++) { diff --git a/core/src/main/java/org/elasticsearch/index/search/nested/IncludeNestedDocsQuery.java b/core/src/main/java/org/elasticsearch/index/search/nested/IncludeNestedDocsQuery.java index 8b2f7a7720a..553685d0b56 100644 --- a/core/src/main/java/org/elasticsearch/index/search/nested/IncludeNestedDocsQuery.java +++ b/core/src/main/java/org/elasticsearch/index/search/nested/IncludeNestedDocsQuery.java @@ -107,8 +107,8 @@ public class IncludeNestedDocsQuery extends Query { } @Override - public Scorer scorer(LeafReaderContext context, Bits acceptDocs) throws IOException { - final Scorer parentScorer = parentWeight.scorer(context, acceptDocs); + public Scorer scorer(LeafReaderContext context) throws IOException { + final Scorer parentScorer = parentWeight.scorer(context); // no matches if (parentScorer == null) { diff --git a/core/src/main/java/org/elasticsearch/index/search/stats/StatsGroupsParseElement.java b/core/src/main/java/org/elasticsearch/index/search/stats/StatsGroupsParseElement.java index e35607ac4bf..f4dcebd9d54 100644 --- a/core/src/main/java/org/elasticsearch/index/search/stats/StatsGroupsParseElement.java +++ b/core/src/main/java/org/elasticsearch/index/search/stats/StatsGroupsParseElement.java @@ -19,12 +19,12 @@ package org.elasticsearch.index.search.stats; -import com.google.common.collect.ImmutableList; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.search.SearchParseElement; import org.elasticsearch.search.internal.SearchContext; import java.util.ArrayList; +import java.util.Collections; import java.util.List; /** @@ -35,7 +35,7 @@ public class StatsGroupsParseElement implements SearchParseElement { public void parse(XContentParser parser, SearchContext context) throws Exception { XContentParser.Token token = parser.currentToken(); if (token.isValue()) { - context.groupStats(ImmutableList.of(parser.text())); + context.groupStats(Collections.singletonList(parser.text())); } else if (token == XContentParser.Token.START_ARRAY) { List groupStats = new ArrayList<>(4); while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { diff --git a/core/src/main/java/org/elasticsearch/index/shard/CommitPoint.java b/core/src/main/java/org/elasticsearch/index/shard/CommitPoint.java index bae4ae8baf5..916cf563fb8 100644 --- a/core/src/main/java/org/elasticsearch/index/shard/CommitPoint.java +++ b/core/src/main/java/org/elasticsearch/index/shard/CommitPoint.java @@ -19,10 +19,11 @@ package org.elasticsearch.index.shard; -import com.google.common.collect.ImmutableList; import org.elasticsearch.common.Nullable; import org.elasticsearch.index.store.StoreFileMetaData; +import java.util.ArrayList; +import java.util.Collections; import java.util.List; /** @@ -30,7 +31,7 @@ import java.util.List; */ public class CommitPoint { - public static final CommitPoint NULL = new CommitPoint(-1, "_null_", Type.GENERATED, ImmutableList.of(), ImmutableList.of()); + public static final CommitPoint NULL = new CommitPoint(-1, "_null_", Type.GENERATED, Collections.emptyList(), Collections.emptyList()); public static class FileInfo { private final String name; @@ -81,16 +82,16 @@ public class CommitPoint { private final Type type; - private final ImmutableList indexFiles; + private final List indexFiles; - private final ImmutableList translogFiles; + private final List translogFiles; public CommitPoint(long version, String name, Type type, List indexFiles, List translogFiles) { this.version = version; this.name = name; this.type = type; - this.indexFiles = ImmutableList.copyOf(indexFiles); - this.translogFiles = ImmutableList.copyOf(translogFiles); + this.indexFiles = Collections.unmodifiableList(new ArrayList<>(indexFiles)); + this.translogFiles = Collections.unmodifiableList(new ArrayList<>(translogFiles)); } public long version() { diff --git a/core/src/main/java/org/elasticsearch/index/shard/CommitPoints.java b/core/src/main/java/org/elasticsearch/index/shard/CommitPoints.java index ff06850feb6..19c19b4ab21 100644 --- a/core/src/main/java/org/elasticsearch/index/shard/CommitPoints.java +++ b/core/src/main/java/org/elasticsearch/index/shard/CommitPoints.java @@ -19,7 +19,6 @@ package org.elasticsearch.index.shard; -import com.google.common.collect.ImmutableList; import org.apache.lucene.util.CollectionUtil; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; @@ -28,6 +27,7 @@ import org.elasticsearch.common.xcontent.XContentType; import java.io.IOException; import java.util.ArrayList; +import java.util.Collections; import java.util.Comparator; import java.util.Iterator; import java.util.List; @@ -37,7 +37,7 @@ import java.util.List; */ public class CommitPoints implements Iterable { - private final ImmutableList commitPoints; + private final List commitPoints; public CommitPoints(List commitPoints) { CollectionUtil.introSort(commitPoints, new Comparator() { @@ -46,7 +46,7 @@ public class CommitPoints implements Iterable { return (o2.version() < o1.version() ? -1 : (o2.version() == o1.version() ? 0 : 1)); } }); - this.commitPoints = ImmutableList.copyOf(commitPoints); + this.commitPoints = Collections.unmodifiableList(new ArrayList<>(commitPoints)); } public List commits() { diff --git a/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java b/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java index 10217983f40..0a804bf8694 100644 --- a/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java +++ b/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java @@ -111,6 +111,7 @@ import java.io.IOException; import java.io.PrintStream; import java.nio.channels.ClosedByInterruptException; import java.util.Arrays; +import java.util.EnumSet; import java.util.Locale; import java.util.Map; import java.util.concurrent.CopyOnWriteArrayList; @@ -191,6 +192,8 @@ public class IndexShard extends AbstractIndexShardComponent { private final IndexShardOperationCounter indexShardOperationCounter; + private EnumSet readAllowedStates = EnumSet.of(IndexShardState.STARTED, IndexShardState.RELOCATED, IndexShardState.POST_RECOVERY); + @Inject public IndexShard(ShardId shardId, IndexSettingsService indexSettingsService, IndicesLifecycle indicesLifecycle, Store store, StoreRecoveryService storeRecoveryService, ThreadPool threadPool, MapperService mapperService, IndexQueryParserService queryParserService, IndexCache indexCache, IndexAliasesService indexAliasesService, @@ -251,7 +254,7 @@ public class IndexShard extends AbstractIndexShardComponent { if (indexSettings.getAsBoolean(IndexCacheModule.QUERY_CACHE_EVERYTHING, false)) { cachingPolicy = QueryCachingPolicy.ALWAYS_CACHE; } else { - assert Version.CURRENT.luceneVersion == org.apache.lucene.util.Version.LUCENE_5_2_1; + assert Version.CURRENT.luceneVersion == org.apache.lucene.util.Version.LUCENE_5_3_0; // TODO: remove this hack in Lucene 5.4, use UsageTrackingQueryCachingPolicy directly // See https://issues.apache.org/jira/browse/LUCENE-6748 // cachingPolicy = new UsageTrackingQueryCachingPolicy(); @@ -953,8 +956,8 @@ public class IndexShard extends AbstractIndexShardComponent { public void readAllowed() throws IllegalIndexShardStateException { IndexShardState state = this.state; // one time volatile read - if (state != IndexShardState.STARTED && state != IndexShardState.RELOCATED) { - throw new IllegalIndexShardStateException(shardId, state, "operations only allowed when started/relocated"); + if (readAllowedStates.contains(state) == false) { + throw new IllegalIndexShardStateException(shardId, state, "operations only allowed when shard state is one of " + readAllowedStates.toString()); } } diff --git a/core/src/main/java/org/elasticsearch/index/shard/MergePolicyConfig.java b/core/src/main/java/org/elasticsearch/index/shard/MergePolicyConfig.java index cf4a4c85d10..3895bbed2c4 100644 --- a/core/src/main/java/org/elasticsearch/index/shard/MergePolicyConfig.java +++ b/core/src/main/java/org/elasticsearch/index/shard/MergePolicyConfig.java @@ -28,6 +28,92 @@ import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.index.settings.IndexSettingsService; +/** + * A shard in elasticsearch is a Lucene index, and a Lucene index is broken + * down into segments. Segments are internal storage elements in the index + * where the index data is stored, and are immutable up to delete markers. + * Segments are, periodically, merged into larger segments to keep the + * index size at bay and expunge deletes. + * + *

+ * Merges select segments of approximately equal size, subject to an allowed + * number of segments per tier. The merge policy is able to merge + * non-adjacent segments, and separates how many segments are merged at once from how many + * segments are allowed per tier. It also does not over-merge (i.e., cascade merges). + * + *

+ * All merge policy settings are dynamic and can be updated on a live index. + * The merge policy has the following settings: + * + *

    + *
  • index.merge.policy.expunge_deletes_allowed: + * + * When expungeDeletes is called, we only merge away a segment if its delete + * percentage is over this threshold. Default is 10. + * + *
  • index.merge.policy.floor_segment: + * + * Segments smaller than this are "rounded up" to this size, i.e. treated as + * equal (floor) size for merge selection. This is to prevent frequent + * flushing of tiny segments, thus preventing a long tail in the index. Default + * is 2mb. + * + *
  • index.merge.policy.max_merge_at_once: + * + * Maximum number of segments to be merged at a time during "normal" merging. + * Default is 10. + * + *
  • index.merge.policy.max_merge_at_once_explicit: + * + * Maximum number of segments to be merged at a time, during optimize or + * expungeDeletes. Default is 30. + * + *
  • index.merge.policy.max_merged_segment: + * + * Maximum sized segment to produce during normal merging (not explicit + * optimize). This setting is approximate: the estimate of the merged segment + * size is made by summing sizes of to-be-merged segments (compensating for + * percent deleted docs). Default is 5gb. + * + *
  • index.merge.policy.segments_per_tier: + * + * Sets the allowed number of segments per tier. Smaller values mean more + * merging but fewer segments. Default is 10. Note, this value needs to be + * >= than the max_merge_at_once otherwise you'll force too many merges to + * occur. + * + *
  • index.merge.policy.reclaim_deletes_weight: + * + * Controls how aggressively merges that reclaim more deletions are favored. + * Higher values favor selecting merges that reclaim deletions. A value of + * 0.0 means deletions don't impact merge selection. Defaults to 2.0. + *
+ * + *

+ * For normal merging, the policy first computes a "budget" of how many + * segments are allowed to be in the index. If the index is over-budget, + * then the policy sorts segments by decreasing size (proportionally considering percent + * deletes), and then finds the least-cost merge. Merge cost is measured by + * a combination of the "skew" of the merge (size of largest seg divided by + * smallest seg), total merge size and pct deletes reclaimed, so that + * merges with lower skew, smaller size and those reclaiming more deletes, + * are favored. + * + *

+ * If a merge will produce a segment that's larger than + * max_merged_segment then the policy will merge fewer segments (down to + * 1 at once, if that one has deletions) to keep the segment size under + * budget. + * + *

+ * Note, this can mean that for large shards that holds many gigabytes of + * data, the default of max_merged_segment (5gb) can cause for many + * segments to be in an index, and causing searches to be slower. Use the + * indices segments API to see the segments that an index has, and + * possibly either increase the max_merged_segment or issue an optimize + * call for the index (try and aim to issue it on a low traffic time). + */ + public final class MergePolicyConfig implements IndexSettingsService.Listener{ private final TieredMergePolicy mergePolicy = new TieredMergePolicy(); private final ESLogger logger; @@ -187,4 +273,4 @@ public final class MergePolicyConfig implements IndexSettingsService.Listener{ return Double.toString(ratio); } } -} \ No newline at end of file +} diff --git a/core/src/main/java/org/elasticsearch/index/shard/MergeSchedulerConfig.java b/core/src/main/java/org/elasticsearch/index/shard/MergeSchedulerConfig.java index 9c8aba25ee3..f061a95f2af 100644 --- a/core/src/main/java/org/elasticsearch/index/shard/MergeSchedulerConfig.java +++ b/core/src/main/java/org/elasticsearch/index/shard/MergeSchedulerConfig.java @@ -24,7 +24,30 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.EsExecutors; /** - * + * The merge scheduler (ConcurrentMergeScheduler) controls the execution of + * merge operations once they are needed (according to the merge policy). Merges + * run in separate threads, and when the maximum number of threads is reached, + * further merges will wait until a merge thread becomes available. + * + *

The merge scheduler supports the following dynamic settings: + * + *

    + *
  • index.merge.scheduler.max_thread_count: + * + * The maximum number of threads that may be merging at once. Defaults to + * Math.max(1, Math.min(4, Runtime.getRuntime().availableProcessors() / 2)) + * which works well for a good solid-state-disk (SSD). If your index is on + * spinning platter drives instead, decrease this to 1. + * + *
  • index.merge.scheduler.auto_throttle: + * + * If this is true (the default), then the merge scheduler will rate-limit IO + * (writes) for merges to an adaptive value depending on how many merges are + * requested over time. An application with a low indexing rate that + * unluckily suddenly requires a large merge will see that merge aggressively + * throttled, while an application doing heavy indexing will see the throttle + * move higher to allow merges to keep up with ongoing indexing. + *
*/ public final class MergeSchedulerConfig { diff --git a/core/src/main/java/org/elasticsearch/index/shard/VersionFieldUpgrader.java b/core/src/main/java/org/elasticsearch/index/shard/VersionFieldUpgrader.java index 04517b028d3..42bd5420ac3 100644 --- a/core/src/main/java/org/elasticsearch/index/shard/VersionFieldUpgrader.java +++ b/core/src/main/java/org/elasticsearch/index/shard/VersionFieldUpgrader.java @@ -133,9 +133,13 @@ class VersionFieldUpgrader extends FilterCodecReader { final GrowableWriter versions = new GrowableWriter(2, reader.maxDoc(), PackedInts.COMPACT); PostingsEnum dpe = null; for (BytesRef uid = uids.next(); uid != null; uid = uids.next()) { - dpe = uids.postings(reader.getLiveDocs(), dpe, PostingsEnum.PAYLOADS); + dpe = uids.postings(dpe, PostingsEnum.PAYLOADS); assert terms.hasPayloads() : "field has payloads"; + final Bits liveDocs = reader.getLiveDocs(); for (int doc = dpe.nextDoc(); doc != DocIdSetIterator.NO_MORE_DOCS; doc = dpe.nextDoc()) { + if (liveDocs != null && liveDocs.get(doc) == false) { + continue; + } dpe.nextPosition(); final BytesRef payload = dpe.getPayload(); if (payload != null && payload.length == 8) { diff --git a/core/src/main/java/org/elasticsearch/index/snapshots/blobstore/BlobStoreIndexShardSnapshot.java b/core/src/main/java/org/elasticsearch/index/snapshots/blobstore/BlobStoreIndexShardSnapshot.java index 2c413c48ecd..808f13ba23d 100644 --- a/core/src/main/java/org/elasticsearch/index/snapshots/blobstore/BlobStoreIndexShardSnapshot.java +++ b/core/src/main/java/org/elasticsearch/index/snapshots/blobstore/BlobStoreIndexShardSnapshot.java @@ -19,7 +19,6 @@ package org.elasticsearch.index.snapshots.blobstore; -import com.google.common.collect.ImmutableList; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.Version; import org.elasticsearch.ElasticsearchParseException; @@ -38,6 +37,7 @@ import org.elasticsearch.index.store.StoreFileMetaData; import java.io.IOException; import java.util.ArrayList; +import java.util.Collections; import java.util.List; /** @@ -326,7 +326,7 @@ public class BlobStoreIndexShardSnapshot implements ToXContent, FromXContentBuil private final long totalSize; - private final ImmutableList indexFiles; + private final List indexFiles; /** * Constructs new shard snapshot metadata from snapshot metadata @@ -345,7 +345,7 @@ public class BlobStoreIndexShardSnapshot implements ToXContent, FromXContentBuil assert indexVersion >= 0; this.snapshot = snapshot; this.indexVersion = indexVersion; - this.indexFiles = ImmutableList.copyOf(indexFiles); + this.indexFiles = Collections.unmodifiableList(new ArrayList<>(indexFiles)); this.startTime = startTime; this.time = time; this.numberOfFiles = numberOfFiles; @@ -358,7 +358,7 @@ public class BlobStoreIndexShardSnapshot implements ToXContent, FromXContentBuil private BlobStoreIndexShardSnapshot() { this.snapshot = ""; this.indexVersion = 0; - this.indexFiles = ImmutableList.of(); + this.indexFiles = Collections.emptyList(); this.startTime = 0; this.time = 0; this.numberOfFiles = 0; @@ -523,7 +523,7 @@ public class BlobStoreIndexShardSnapshot implements ToXContent, FromXContentBuil } } } - return new BlobStoreIndexShardSnapshot(snapshot, indexVersion, ImmutableList.copyOf(indexFiles), + return new BlobStoreIndexShardSnapshot(snapshot, indexVersion, Collections.unmodifiableList(indexFiles), startTime, time, numberOfFiles, totalSize); } } diff --git a/core/src/main/java/org/elasticsearch/index/snapshots/blobstore/BlobStoreIndexShardSnapshots.java b/core/src/main/java/org/elasticsearch/index/snapshots/blobstore/BlobStoreIndexShardSnapshots.java index 0a22b1ec553..dbb66a782ab 100644 --- a/core/src/main/java/org/elasticsearch/index/snapshots/blobstore/BlobStoreIndexShardSnapshots.java +++ b/core/src/main/java/org/elasticsearch/index/snapshots/blobstore/BlobStoreIndexShardSnapshots.java @@ -19,7 +19,6 @@ package org.elasticsearch.index.snapshots.blobstore; -import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.common.ParseField; @@ -33,6 +32,7 @@ import org.elasticsearch.index.snapshots.blobstore.BlobStoreIndexShardSnapshot.F import java.io.IOException; import java.util.ArrayList; +import java.util.Collections; import java.util.Iterator; import java.util.List; import java.util.Map; @@ -49,12 +49,12 @@ public class BlobStoreIndexShardSnapshots implements Iterable, To public static final BlobStoreIndexShardSnapshots PROTO = new BlobStoreIndexShardSnapshots(); - private final ImmutableList shardSnapshots; + private final List shardSnapshots; private final ImmutableMap files; - private final ImmutableMap> physicalFiles; + private final ImmutableMap> physicalFiles; public BlobStoreIndexShardSnapshots(List shardSnapshots) { - this.shardSnapshots = ImmutableList.copyOf(shardSnapshots); + this.shardSnapshots = Collections.unmodifiableList(new ArrayList<>(shardSnapshots)); // Map between blob names and file info Map newFiles = newHashMap(); // Map between original physical names and file info @@ -78,15 +78,15 @@ public class BlobStoreIndexShardSnapshots implements Iterable, To physicalFileList.add(newFiles.get(fileInfo.name())); } } - ImmutableMap.Builder> mapBuilder = ImmutableMap.builder(); + ImmutableMap.Builder> mapBuilder = ImmutableMap.builder(); for (Map.Entry> entry : physicalFiles.entrySet()) { - mapBuilder.put(entry.getKey(), ImmutableList.copyOf(entry.getValue())); + mapBuilder.put(entry.getKey(), Collections.unmodifiableList(new ArrayList<>(entry.getValue()))); } this.physicalFiles = mapBuilder.build(); this.files = ImmutableMap.copyOf(newFiles); } - private BlobStoreIndexShardSnapshots(ImmutableMap files, ImmutableList shardSnapshots) { + private BlobStoreIndexShardSnapshots(ImmutableMap files, List shardSnapshots) { this.shardSnapshots = shardSnapshots; this.files = files; Map> physicalFiles = newHashMap(); @@ -100,15 +100,15 @@ public class BlobStoreIndexShardSnapshots implements Iterable, To physicalFileList.add(files.get(fileInfo.name())); } } - ImmutableMap.Builder> mapBuilder = ImmutableMap.builder(); + ImmutableMap.Builder> mapBuilder = ImmutableMap.builder(); for (Map.Entry> entry : physicalFiles.entrySet()) { - mapBuilder.put(entry.getKey(), ImmutableList.copyOf(entry.getValue())); + mapBuilder.put(entry.getKey(), Collections.unmodifiableList(new ArrayList<>(entry.getValue()))); } this.physicalFiles = mapBuilder.build(); } private BlobStoreIndexShardSnapshots() { - shardSnapshots = ImmutableList.of(); + shardSnapshots = Collections.emptyList(); files = ImmutableMap.of(); physicalFiles = ImmutableMap.of(); } @@ -289,17 +289,17 @@ public class BlobStoreIndexShardSnapshots implements Iterable, To } ImmutableMap files = filesBuilder.build(); - ImmutableList.Builder snapshots = ImmutableList.builder(); + List snapshots = new ArrayList<>(); for (Map.Entry> entry : snapshotsMap.entrySet()) { - ImmutableList.Builder fileInfosBuilder = ImmutableList.builder(); + List fileInfosBuilder = new ArrayList<>(); for (String file : entry.getValue()) { FileInfo fileInfo = files.get(file); assert fileInfo != null; fileInfosBuilder.add(fileInfo); } - snapshots.add(new SnapshotFiles(entry.getKey(), fileInfosBuilder.build())); + snapshots.add(new SnapshotFiles(entry.getKey(), Collections.unmodifiableList(fileInfosBuilder))); } - return new BlobStoreIndexShardSnapshots(files, snapshots.build()); + return new BlobStoreIndexShardSnapshots(files, Collections.unmodifiableList(snapshots)); } } diff --git a/core/src/main/java/org/elasticsearch/index/snapshots/blobstore/SnapshotFiles.java b/core/src/main/java/org/elasticsearch/index/snapshots/blobstore/SnapshotFiles.java index aa265f17fbd..44d40c99f37 100644 --- a/core/src/main/java/org/elasticsearch/index/snapshots/blobstore/SnapshotFiles.java +++ b/core/src/main/java/org/elasticsearch/index/snapshots/blobstore/SnapshotFiles.java @@ -18,7 +18,6 @@ */ package org.elasticsearch.index.snapshots.blobstore; -import com.google.common.collect.ImmutableList; import org.elasticsearch.index.snapshots.blobstore.BlobStoreIndexShardSnapshot.FileInfo; import java.util.List; diff --git a/core/src/main/java/org/elasticsearch/index/store/FsDirectoryService.java b/core/src/main/java/org/elasticsearch/index/store/FsDirectoryService.java index ba301b4835f..dfd6cdf6b50 100644 --- a/core/src/main/java/org/elasticsearch/index/store/FsDirectoryService.java +++ b/core/src/main/java/org/elasticsearch/index/store/FsDirectoryService.java @@ -22,7 +22,6 @@ package org.elasticsearch.index.store; import com.google.common.collect.Sets; import org.apache.lucene.store.*; import org.apache.lucene.util.Constants; -import org.elasticsearch.ElasticsearchException; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.metrics.CounterMetric; import org.elasticsearch.common.settings.Settings; @@ -94,11 +93,11 @@ public class FsDirectoryService extends DirectoryService implements StoreRateLim } /* - * We are mmapping docvalues as well as term dictionaries, all other files are served through NIOFS + * We are mmapping norms, docvalues as well as term dictionaries, all other files are served through NIOFS * this provides good random access performance while not creating unnecessary mmaps for files like stored * fields etc. */ - private static final Set PRIMARY_EXTENSIONS = Collections.unmodifiableSet(Sets.newHashSet("dvd", "tim")); + private static final Set PRIMARY_EXTENSIONS = Collections.unmodifiableSet(Sets.newHashSet("nvd", "dvd", "tim")); protected Directory newFSDirectory(Path location, LockFactory lockFactory) throws IOException { diff --git a/core/src/main/java/org/elasticsearch/index/store/Store.java b/core/src/main/java/org/elasticsearch/index/store/Store.java index 27367177b6f..39a0f5365ba 100644 --- a/core/src/main/java/org/elasticsearch/index/store/Store.java +++ b/core/src/main/java/org/elasticsearch/index/store/Store.java @@ -19,7 +19,6 @@ package org.elasticsearch.index.store; -import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; import com.google.common.collect.Iterables; import org.apache.lucene.codecs.CodecUtil; @@ -259,7 +258,7 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref metadataLock.writeLock().lock(); // we make sure that nobody fetches the metadata while we do this rename operation here to ensure we don't // get exceptions if files are still open. - try (Lock writeLock = Lucene.acquireWriteLock(directory())) { + try (Lock writeLock = directory().obtainLock(IndexWriter.WRITE_LOCK_NAME)) { for (Map.Entry entry : entries) { String tempFile = entry.getKey(); String origFile = entry.getValue(); @@ -594,7 +593,7 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref */ public void cleanupAndVerify(String reason, MetadataSnapshot sourceMetaData) throws IOException { metadataLock.writeLock().lock(); - try (Lock writeLock = Lucene.acquireWriteLock(directory)) { + try (Lock writeLock = directory.obtainLock(IndexWriter.WRITE_LOCK_NAME)) { final StoreDirectory dir = directory; for (String existingFile : dir.listAll()) { if (Store.isAutogenerated(existingFile) || sourceMetaData.contains(existingFile)) { @@ -1005,9 +1004,9 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref * NOTE: this diff will not contain the segments.gen file. This file is omitted on recovery. */ public RecoveryDiff recoveryDiff(MetadataSnapshot recoveryTargetSnapshot) { - final ImmutableList.Builder identical = ImmutableList.builder(); - final ImmutableList.Builder different = ImmutableList.builder(); - final ImmutableList.Builder missing = ImmutableList.builder(); + final List identical = new ArrayList<>(); + final List different = new ArrayList<>(); + final List missing = new ArrayList<>(); final Map> perSegment = new HashMap<>(); final List perCommitStoreFiles = new ArrayList<>(); @@ -1053,7 +1052,7 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref different.addAll(identicalFiles); } } - RecoveryDiff recoveryDiff = new RecoveryDiff(identical.build(), different.build(), missing.build()); + RecoveryDiff recoveryDiff = new RecoveryDiff(Collections.unmodifiableList(identical), Collections.unmodifiableList(different), Collections.unmodifiableList(missing)); assert recoveryDiff.size() == this.metadata.size() - (metadata.containsKey(IndexFileNames.OLD_SEGMENTS_GEN) ? 1 : 0) : "some files are missing recoveryDiff size: [" + recoveryDiff.size() + "] metadata size: [" + this.metadata.size() + "] contains segments.gen: [" + metadata.containsKey(IndexFileNames.OLD_SEGMENTS_GEN) + "]"; return recoveryDiff; diff --git a/core/src/main/java/org/elasticsearch/indices/analysis/PreBuiltAnalyzers.java b/core/src/main/java/org/elasticsearch/indices/analysis/PreBuiltAnalyzers.java index 7aeafcf3ee7..cb07779164e 100644 --- a/core/src/main/java/org/elasticsearch/indices/analysis/PreBuiltAnalyzers.java +++ b/core/src/main/java/org/elasticsearch/indices/analysis/PreBuiltAnalyzers.java @@ -46,6 +46,7 @@ import org.apache.lucene.analysis.hu.HungarianAnalyzer; import org.apache.lucene.analysis.hy.ArmenianAnalyzer; import org.apache.lucene.analysis.id.IndonesianAnalyzer; import org.apache.lucene.analysis.it.ItalianAnalyzer; +import org.apache.lucene.analysis.lt.LithuanianAnalyzer; import org.apache.lucene.analysis.lv.LatvianAnalyzer; import org.apache.lucene.analysis.nl.DutchAnalyzer; import org.apache.lucene.analysis.no.NorwegianAnalyzer; @@ -378,6 +379,15 @@ public enum PreBuiltAnalyzers { } }, + LITHUANIAN { + @Override + protected Analyzer create(Version version) { + Analyzer a = new LithuanianAnalyzer(); + a.setVersion(version.luceneVersion); + return a; + } + }, + NORWEGIAN { @Override protected Analyzer create(Version version) { diff --git a/core/src/main/java/org/elasticsearch/indices/cache/query/IndicesQueryCache.java b/core/src/main/java/org/elasticsearch/indices/cache/query/IndicesQueryCache.java index 70c14b1295e..30cd6de1233 100644 --- a/core/src/main/java/org/elasticsearch/indices/cache/query/IndicesQueryCache.java +++ b/core/src/main/java/org/elasticsearch/indices/cache/query/IndicesQueryCache.java @@ -28,7 +28,6 @@ import org.apache.lucene.search.QueryCache; import org.apache.lucene.search.QueryCachingPolicy; import org.apache.lucene.search.Scorer; import org.apache.lucene.search.Weight; -import org.apache.lucene.util.Bits; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.lucene.ShardCoreKeyMap; @@ -253,9 +252,9 @@ public class IndicesQueryCache extends AbstractComponent implements QueryCache, } @Override - public Scorer scorer(LeafReaderContext context, Bits acceptDocs) throws IOException { + public Scorer scorer(LeafReaderContext context) throws IOException { shardKeyMap.add(context.reader()); - return in.scorer(context, acceptDocs); + return in.scorer(context); } } diff --git a/core/src/main/java/org/elasticsearch/indices/fielddata/cache/IndicesFieldDataCache.java b/core/src/main/java/org/elasticsearch/indices/fielddata/cache/IndicesFieldDataCache.java index 4e764654603..1bf020315c5 100644 --- a/core/src/main/java/org/elasticsearch/indices/fielddata/cache/IndicesFieldDataCache.java +++ b/core/src/main/java/org/elasticsearch/indices/fielddata/cache/IndicesFieldDataCache.java @@ -54,7 +54,6 @@ public class IndicesFieldDataCache extends AbstractComponent implements RemovalL public static final String FIELDDATA_CLEAN_INTERVAL_SETTING = "indices.fielddata.cache.cleanup_interval"; public static final String FIELDDATA_CACHE_CONCURRENCY_LEVEL = "indices.fielddata.cache.concurrency_level"; public static final String INDICES_FIELDDATA_CACHE_SIZE_KEY = "indices.fielddata.cache.size"; - public static final String INDICES_FIELDDATA_CACHE_EXPIRE_KEY = "indices.fielddata.cache.expire"; private final IndicesFieldDataCacheListener indicesFieldDataCacheListener; @@ -70,7 +69,6 @@ public class IndicesFieldDataCache extends AbstractComponent implements RemovalL this.indicesFieldDataCacheListener = indicesFieldDataCacheListener; final String size = settings.get(INDICES_FIELDDATA_CACHE_SIZE_KEY, "-1"); final long sizeInBytes = settings.getAsMemory(INDICES_FIELDDATA_CACHE_SIZE_KEY, "-1").bytes(); - final TimeValue expire = settings.getAsTime(INDICES_FIELDDATA_CACHE_EXPIRE_KEY, null); CacheBuilder cacheBuilder = CacheBuilder.newBuilder() .removalListener(this); if (sizeInBytes > 0) { @@ -82,10 +80,8 @@ public class IndicesFieldDataCache extends AbstractComponent implements RemovalL throw new IllegalArgumentException("concurrency_level must be > 0 but was: " + concurrencyLevel); } cacheBuilder.concurrencyLevel(concurrencyLevel); - if (expire != null && expire.millis() > 0) { - cacheBuilder.expireAfterAccess(expire.millis(), TimeUnit.MILLISECONDS); - } - logger.debug("using size [{}] [{}], expire [{}]", size, new ByteSizeValue(sizeInBytes), expire); + + logger.debug("using size [{}] [{}]", size, new ByteSizeValue(sizeInBytes)); cache = cacheBuilder.build(); this.cleanInterval = settings.getAsTime(FIELDDATA_CLEAN_INTERVAL_SETTING, TimeValue.timeValueMinutes(1)); diff --git a/core/src/main/java/org/elasticsearch/indices/recovery/RecoveryState.java b/core/src/main/java/org/elasticsearch/indices/recovery/RecoveryState.java index a0ef2eff76c..cc58305a49d 100644 --- a/core/src/main/java/org/elasticsearch/indices/recovery/RecoveryState.java +++ b/core/src/main/java/org/elasticsearch/indices/recovery/RecoveryState.java @@ -19,7 +19,6 @@ package org.elasticsearch.indices.recovery; -import com.google.common.collect.ImmutableList; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.routing.RestoreSource; import org.elasticsearch.common.Nullable; @@ -34,6 +33,8 @@ import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.index.shard.ShardId; import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Locale; @@ -712,7 +713,7 @@ public class RecoveryState implements ToXContent, Streamable { private long targetThrottleTimeInNanos = UNKNOWN; public synchronized List fileDetails() { - return ImmutableList.copyOf(fileDetails.values()); + return Collections.unmodifiableList(new ArrayList<>(fileDetails.values())); } public synchronized void reset() { diff --git a/core/src/main/java/org/elasticsearch/node/internal/InternalSettingsPreparer.java b/core/src/main/java/org/elasticsearch/node/internal/InternalSettingsPreparer.java index 3eb2ed67ccd..350ced260c4 100644 --- a/core/src/main/java/org/elasticsearch/node/internal/InternalSettingsPreparer.java +++ b/core/src/main/java/org/elasticsearch/node/internal/InternalSettingsPreparer.java @@ -20,7 +20,6 @@ package org.elasticsearch.node.internal; import com.google.common.base.Charsets; -import com.google.common.collect.ImmutableList; import com.google.common.collect.Sets; import com.google.common.collect.UnmodifiableIterator; import org.elasticsearch.cluster.ClusterName; @@ -39,6 +38,7 @@ import java.io.InputStreamReader; import java.nio.file.Files; import java.nio.file.Path; import java.util.ArrayList; +import java.util.Arrays; import java.util.List; import java.util.Map; import java.util.Set; @@ -52,7 +52,7 @@ import static org.elasticsearch.common.settings.Settings.settingsBuilder; */ public class InternalSettingsPreparer { - static final List ALLOWED_SUFFIXES = ImmutableList.of(".yml", ".yaml", ".json", ".properties"); + static final List ALLOWED_SUFFIXES = Arrays.asList(".yml", ".yaml", ".json", ".properties"); public static final String SECRET_PROMPT_VALUE = "${prompt.secret}"; public static final String TEXT_PROMPT_VALUE = "${prompt.text}"; diff --git a/core/src/main/java/org/elasticsearch/percolator/PercolateContext.java b/core/src/main/java/org/elasticsearch/percolator/PercolateContext.java index d42b858ce7b..c54cce7d361 100644 --- a/core/src/main/java/org/elasticsearch/percolator/PercolateContext.java +++ b/core/src/main/java/org/elasticsearch/percolator/PercolateContext.java @@ -19,7 +19,6 @@ package org.elasticsearch.percolator; import com.carrotsearch.hppc.ObjectObjectAssociativeContainer; -import com.google.common.collect.ImmutableList; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexableField; @@ -33,7 +32,10 @@ import org.apache.lucene.util.Counter; import org.elasticsearch.action.percolate.PercolateShardRequest; import org.elasticsearch.action.search.SearchType; import org.elasticsearch.cache.recycler.PageCacheRecycler; -import org.elasticsearch.common.*; +import org.elasticsearch.common.HasContext; +import org.elasticsearch.common.HasContextAndHeaders; +import org.elasticsearch.common.HasHeaders; +import org.elasticsearch.common.ParseFieldMatcher; import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.lease.Releasables; import org.elasticsearch.common.text.StringText; @@ -76,7 +78,11 @@ import org.elasticsearch.search.rescore.RescoreSearchContext; import org.elasticsearch.search.scan.ScanContext; import org.elasticsearch.search.suggest.SuggestionSearchContext; -import java.util.*; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Set; import java.util.concurrent.ConcurrentMap; /** @@ -122,7 +128,7 @@ public class PercolateContext extends SearchContext { public PercolateContext(PercolateShardRequest request, SearchShardTarget searchShardTarget, IndexShard indexShard, IndexService indexService, PageCacheRecycler pageCacheRecycler, BigArrays bigArrays, ScriptService scriptService, Query aliasFilter, ParseFieldMatcher parseFieldMatcher) { - super(parseFieldMatcher); + super(parseFieldMatcher, request); this.indexShard = indexShard; this.indexService = indexService; this.fieldDataService = indexService.fieldData(); @@ -155,7 +161,7 @@ public class PercolateContext extends SearchContext { Map fields = new HashMap<>(); for (IndexableField field : parsedDocument.rootDoc().getFields()) { - fields.put(field.name(), new InternalSearchHitField(field.name(), ImmutableList.of())); + fields.put(field.name(), new InternalSearchHitField(field.name(), Collections.emptyList())); } hitContext().reset( new InternalSearchHit(0, "unknown", new StringText(parsedDocument.type()), fields), diff --git a/core/src/main/java/org/elasticsearch/percolator/PercolatorService.java b/core/src/main/java/org/elasticsearch/percolator/PercolatorService.java index 6096f29262f..73ca113165a 100644 --- a/core/src/main/java/org/elasticsearch/percolator/PercolatorService.java +++ b/core/src/main/java/org/elasticsearch/percolator/PercolatorService.java @@ -19,6 +19,7 @@ package org.elasticsearch.percolator; import com.carrotsearch.hppc.IntObjectHashMap; + import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.ReaderUtil; import org.apache.lucene.index.memory.ExtendedMemoryIndex; @@ -40,6 +41,7 @@ import org.elasticsearch.cache.recycler.PageCacheRecycler; import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.action.index.MappingUpdatedAction; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.common.HasContextAndHeaders; import org.elasticsearch.common.ParseFieldMatcher; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; @@ -63,9 +65,11 @@ import org.elasticsearch.index.IndexService; import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.fielddata.IndexFieldData; import org.elasticsearch.index.fielddata.SortedBinaryDocValues; +import org.elasticsearch.index.mapper.*; import org.elasticsearch.index.mapper.DocumentMapperForType; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.MapperService; +import org.elasticsearch.index.mapper.Mapping; import org.elasticsearch.index.mapper.ParsedDocument; import org.elasticsearch.index.mapper.Uid; import org.elasticsearch.index.mapper.internal.UidFieldMapper; @@ -73,7 +77,10 @@ import org.elasticsearch.index.percolator.stats.ShardPercolateService; import org.elasticsearch.index.query.ParsedQuery; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.indices.IndicesService; -import org.elasticsearch.percolator.QueryCollector.*; +import org.elasticsearch.percolator.QueryCollector.Count; +import org.elasticsearch.percolator.QueryCollector.Match; +import org.elasticsearch.percolator.QueryCollector.MatchAndScore; +import org.elasticsearch.percolator.QueryCollector.MatchAndSort; import org.elasticsearch.script.ScriptService; import org.elasticsearch.search.SearchParseElement; import org.elasticsearch.search.SearchShardTarget; @@ -95,7 +102,9 @@ import java.util.Map; import static org.elasticsearch.common.util.CollectionUtils.eagerTransform; import static org.elasticsearch.index.mapper.SourceToParse.source; -import static org.elasticsearch.percolator.QueryCollector.*; +import static org.elasticsearch.percolator.QueryCollector.count; +import static org.elasticsearch.percolator.QueryCollector.match; +import static org.elasticsearch.percolator.QueryCollector.matchAndScore; public class PercolatorService extends AbstractComponent { @@ -162,9 +171,9 @@ public class PercolatorService extends AbstractComponent { } - public ReduceResult reduce(byte percolatorTypeId, List shardResults) { + public ReduceResult reduce(byte percolatorTypeId, List shardResults, HasContextAndHeaders headersContext) { PercolatorType percolatorType = percolatorTypes.get(percolatorTypeId); - return percolatorType.reduce(shardResults); + return percolatorType.reduce(shardResults, headersContext); } public PercolateShardResponse percolate(PercolateShardRequest request) { @@ -423,7 +432,7 @@ public class PercolatorService extends AbstractComponent { // 0x00 is reserved for empty type. byte id(); - ReduceResult reduce(List shardResults); + ReduceResult reduce(List shardResults, HasContextAndHeaders headersContext); PercolateShardResponse doPercolate(PercolateShardRequest request, PercolateContext context, boolean isNested); @@ -437,14 +446,14 @@ public class PercolatorService extends AbstractComponent { } @Override - public ReduceResult reduce(List shardResults) { + public ReduceResult reduce(List shardResults, HasContextAndHeaders headersContext) { long finalCount = 0; for (PercolateShardResponse shardResponse : shardResults) { finalCount += shardResponse.count(); } assert !shardResults.isEmpty(); - InternalAggregations reducedAggregations = reduceAggregations(shardResults); + InternalAggregations reducedAggregations = reduceAggregations(shardResults, headersContext); return new ReduceResult(finalCount, reducedAggregations); } @@ -481,8 +490,8 @@ public class PercolatorService extends AbstractComponent { } @Override - public ReduceResult reduce(List shardResults) { - return countPercolator.reduce(shardResults); + public ReduceResult reduce(List shardResults, HasContextAndHeaders headersContext) { + return countPercolator.reduce(shardResults, headersContext); } @Override @@ -511,7 +520,7 @@ public class PercolatorService extends AbstractComponent { } @Override - public ReduceResult reduce(List shardResults) { + public ReduceResult reduce(List shardResults, HasContextAndHeaders headersContext) { long foundMatches = 0; int numMatches = 0; for (PercolateShardResponse response : shardResults) { @@ -537,7 +546,7 @@ public class PercolatorService extends AbstractComponent { } assert !shardResults.isEmpty(); - InternalAggregations reducedAggregations = reduceAggregations(shardResults); + InternalAggregations reducedAggregations = reduceAggregations(shardResults, headersContext); return new ReduceResult(foundMatches, finalMatches.toArray(new PercolateResponse.Match[finalMatches.size()]), reducedAggregations); } @@ -589,8 +598,8 @@ public class PercolatorService extends AbstractComponent { } @Override - public ReduceResult reduce(List shardResults) { - return matchPercolator.reduce(shardResults); + public ReduceResult reduce(List shardResults, HasContextAndHeaders headersContext) { + return matchPercolator.reduce(shardResults, headersContext); } @Override @@ -622,8 +631,8 @@ public class PercolatorService extends AbstractComponent { } @Override - public ReduceResult reduce(List shardResults) { - return matchPercolator.reduce(shardResults); + public ReduceResult reduce(List shardResults, HasContextAndHeaders headersContext) { + return matchPercolator.reduce(shardResults, headersContext); } @Override @@ -656,7 +665,7 @@ public class PercolatorService extends AbstractComponent { } @Override - public ReduceResult reduce(List shardResults) { + public ReduceResult reduce(List shardResults, HasContextAndHeaders headersContext) { long foundMatches = 0; int nonEmptyResponses = 0; int firstNonEmptyIndex = 0; @@ -735,7 +744,7 @@ public class PercolatorService extends AbstractComponent { } assert !shardResults.isEmpty(); - InternalAggregations reducedAggregations = reduceAggregations(shardResults); + InternalAggregations reducedAggregations = reduceAggregations(shardResults, headersContext); return new ReduceResult(foundMatches, finalMatches.toArray(new PercolateResponse.Match[finalMatches.size()]), reducedAggregations); } @@ -843,7 +852,7 @@ public class PercolatorService extends AbstractComponent { } } - private InternalAggregations reduceAggregations(List shardResults) { + private InternalAggregations reduceAggregations(List shardResults, HasContextAndHeaders headersContext) { if (shardResults.get(0).aggregations() == null) { return null; } @@ -852,14 +861,15 @@ public class PercolatorService extends AbstractComponent { for (PercolateShardResponse shardResult : shardResults) { aggregationsList.add(shardResult.aggregations()); } - InternalAggregations aggregations = InternalAggregations.reduce(aggregationsList, new ReduceContext(bigArrays, scriptService)); + InternalAggregations aggregations = InternalAggregations.reduce(aggregationsList, new ReduceContext(bigArrays, scriptService, + headersContext)); if (aggregations != null) { List pipelineAggregators = shardResults.get(0).pipelineAggregators(); if (pipelineAggregators != null) { List newAggs = new ArrayList<>(eagerTransform(aggregations.asList(), PipelineAggregator.AGGREGATION_TRANFORM_FUNCTION)); for (SiblingPipelineAggregator pipelineAggregator : pipelineAggregators) { - InternalAggregation newAgg = pipelineAggregator.doReduce(new InternalAggregations(newAggs), new ReduceContext(bigArrays, - scriptService)); + InternalAggregation newAgg = pipelineAggregator.doReduce(new InternalAggregations(newAggs), new ReduceContext( + bigArrays, scriptService, headersContext)); newAggs.add(newAgg); } aggregations = new InternalAggregations(newAggs); diff --git a/core/src/main/java/org/elasticsearch/plugins/PluginsService.java b/core/src/main/java/org/elasticsearch/plugins/PluginsService.java index 84b640e4c1b..6172122d029 100644 --- a/core/src/main/java/org/elasticsearch/plugins/PluginsService.java +++ b/core/src/main/java/org/elasticsearch/plugins/PluginsService.java @@ -19,7 +19,6 @@ package org.elasticsearch.plugins; -import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; import org.apache.lucene.analysis.util.CharFilterFactory; @@ -328,7 +327,7 @@ public class PluginsService extends AbstractComponent { } private List> loadBundles(List bundles) { - ImmutableList.Builder> plugins = ImmutableList.builder(); + List> plugins = new ArrayList<>(); for (Bundle bundle : bundles) { // jar-hell check the bundle against the parent classloader @@ -363,7 +362,7 @@ public class PluginsService extends AbstractComponent { } } - return plugins.build(); + return Collections.unmodifiableList(plugins); } /** diff --git a/core/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java b/core/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java index fd712bff928..109202bc7d1 100644 --- a/core/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java +++ b/core/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java @@ -19,7 +19,6 @@ package org.elasticsearch.repositories.blobstore; -import com.google.common.collect.ImmutableList; import com.google.common.io.ByteStreams; import org.apache.lucene.store.RateLimiter; import org.elasticsearch.ElasticsearchParseException; @@ -330,13 +329,13 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent snapshotIds = snapshots(); if (snapshotIds.contains(snapshotId)) { - ImmutableList.Builder builder = ImmutableList.builder(); + List builder = new ArrayList<>(); for (SnapshotId id : snapshotIds) { if (!snapshotId.equals(id)) { builder.add(id); } } - snapshotIds = builder.build(); + snapshotIds = Collections.unmodifiableList(builder); } writeSnapshotList(snapshotIds); // Now delete all indices @@ -377,7 +376,9 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent snapshotIds = snapshots(); if (!snapshotIds.contains(snapshotId)) { - snapshotIds = ImmutableList.builder().addAll(snapshotIds).add(snapshotId).build(); + snapshotIds = new ArrayList<>(snapshotIds); + snapshotIds.add(snapshotId); + snapshotIds = Collections.unmodifiableList(snapshotIds); } writeSnapshotList(snapshotIds); return blobStoreSnapshot; @@ -416,7 +417,7 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent aliases, XContentBuilder builder, Params params) throws IOException { + private void writeAliases(List aliases, XContentBuilder builder, Params params) throws IOException { builder.startObject(Fields.ALIASES); if (aliases != null) { for (AliasMetaData alias : aliases) { @@ -144,7 +144,7 @@ public class RestGetIndicesAction extends BaseRestHandler { builder.endObject(); } - private void writeWarmers(ImmutableList warmers, XContentBuilder builder, Params params) throws IOException { + private void writeWarmers(List warmers, XContentBuilder builder, Params params) throws IOException { builder.startObject(Fields.WARMERS); if (warmers != null) { for (IndexWarmersMetaData.Entry warmer : warmers) { diff --git a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/warmer/get/RestGetWarmerAction.java b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/warmer/get/RestGetWarmerAction.java index be83ccbe4b5..67e01017678 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/warmer/get/RestGetWarmerAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/warmer/get/RestGetWarmerAction.java @@ -19,7 +19,6 @@ package org.elasticsearch.rest.action.admin.indices.warmer.get; import com.carrotsearch.hppc.cursors.ObjectObjectCursor; -import com.google.common.collect.ImmutableList; import org.elasticsearch.action.admin.indices.warmer.get.GetWarmersRequest; import org.elasticsearch.action.admin.indices.warmer.get.GetWarmersResponse; import org.elasticsearch.action.support.IndicesOptions; @@ -32,6 +31,8 @@ import org.elasticsearch.rest.*; import org.elasticsearch.rest.action.support.RestBuilderListener; import org.elasticsearch.search.warmer.IndexWarmersMetaData; +import java.util.List; + import static org.elasticsearch.rest.RestRequest.Method.GET; import static org.elasticsearch.rest.RestStatus.OK; @@ -68,7 +69,7 @@ public class RestGetWarmerAction extends BaseRestHandler { } builder.startObject(); - for (ObjectObjectCursor> entry : response.warmers()) { + for (ObjectObjectCursor> entry : response.warmers()) { builder.startObject(entry.key, XContentBuilder.FieldCaseConversion.NONE); builder.startObject(IndexWarmersMetaData.TYPE, XContentBuilder.FieldCaseConversion.NONE); for (IndexWarmersMetaData.Entry warmerEntry : entry.value) { diff --git a/core/src/main/java/org/elasticsearch/rest/action/search/RestMultiSearchAction.java b/core/src/main/java/org/elasticsearch/rest/action/search/RestMultiSearchAction.java index 6dfe605d96b..af1f2f464a7 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/search/RestMultiSearchAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/search/RestMultiSearchAction.java @@ -50,6 +50,13 @@ public class RestMultiSearchAction extends BaseRestHandler { controller.registerHandler(GET, "/{index}/{type}/_msearch", this); controller.registerHandler(POST, "/{index}/{type}/_msearch", this); + controller.registerHandler(GET, "/_msearch/template", this); + controller.registerHandler(POST, "/_msearch/template", this); + controller.registerHandler(GET, "/{index}/_msearch/template", this); + controller.registerHandler(POST, "/{index}/_msearch/template", this); + controller.registerHandler(GET, "/{index}/{type}/_msearch/template", this); + controller.registerHandler(POST, "/{index}/{type}/_msearch/template", this); + this.allowExplicitIndex = settings.getAsBoolean("rest.action.multi.allow_explicit_index", true); } @@ -59,9 +66,15 @@ public class RestMultiSearchAction extends BaseRestHandler { String[] indices = Strings.splitStringByCommaToArray(request.param("index")); String[] types = Strings.splitStringByCommaToArray(request.param("type")); + String path = request.path(); + boolean isTemplateRequest = isTemplateRequest(path); IndicesOptions indicesOptions = IndicesOptions.fromRequest(request, multiSearchRequest.indicesOptions()); - multiSearchRequest.add(RestActions.getRestContent(request), indices, types, request.param("search_type"), request.param("routing"), indicesOptions, allowExplicitIndex); + multiSearchRequest.add(RestActions.getRestContent(request), isTemplateRequest, indices, types, request.param("search_type"), request.param("routing"), indicesOptions, allowExplicitIndex); client.multiSearch(multiSearchRequest, new RestToXContentListener(channel)); } + + private boolean isTemplateRequest(String path) { + return (path != null && path.endsWith("/template")); + } } diff --git a/core/src/main/java/org/elasticsearch/script/ScriptService.java b/core/src/main/java/org/elasticsearch/script/ScriptService.java index c63cf47f4ae..3c356a4120d 100644 --- a/core/src/main/java/org/elasticsearch/script/ScriptService.java +++ b/core/src/main/java/org/elasticsearch/script/ScriptService.java @@ -25,6 +25,7 @@ import com.google.common.cache.CacheBuilder; import com.google.common.cache.RemovalListener; import com.google.common.cache.RemovalNotification; import com.google.common.collect.ImmutableMap; + import org.apache.lucene.util.IOUtils; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.delete.DeleteRequest; @@ -37,6 +38,7 @@ import org.elasticsearch.action.indexedscripts.delete.DeleteIndexedScriptRequest import org.elasticsearch.action.indexedscripts.get.GetIndexedScriptRequest; import org.elasticsearch.action.indexedscripts.put.PutIndexedScriptRequest; import org.elasticsearch.client.Client; +import org.elasticsearch.common.HasContextAndHeaders; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.ParseFieldMatcher; import org.elasticsearch.common.Strings; @@ -114,21 +116,25 @@ public class ScriptService extends AbstractComponent implements Closeable { * @deprecated Use {@link org.elasticsearch.script.Script.ScriptField} instead. This should be removed in * 2.0 */ + @Deprecated public static final ParseField SCRIPT_LANG = new ParseField("lang","script_lang"); /** * @deprecated Use {@link ScriptType#getParseField()} instead. This should * be removed in 2.0 */ + @Deprecated public static final ParseField SCRIPT_FILE = new ParseField("script_file"); /** * @deprecated Use {@link ScriptType#getParseField()} instead. This should * be removed in 2.0 */ + @Deprecated public static final ParseField SCRIPT_ID = new ParseField("script_id"); /** * @deprecated Use {@link ScriptType#getParseField()} instead. This should * be removed in 2.0 */ + @Deprecated public static final ParseField SCRIPT_INLINE = new ParseField("script"); @Inject @@ -220,7 +226,7 @@ public class ScriptService extends AbstractComponent implements Closeable { /** * Checks if a script can be executed and compiles it if needed, or returns the previously compiled and cached script. */ - public CompiledScript compile(Script script, ScriptContext scriptContext) { + public CompiledScript compile(Script script, ScriptContext scriptContext, HasContextAndHeaders headersContext) { if (script == null) { throw new IllegalArgumentException("The parameter script (Script) must not be null."); } @@ -248,14 +254,14 @@ public class ScriptService extends AbstractComponent implements Closeable { " operation [" + scriptContext.getKey() + "] and lang [" + lang + "] are not supported"); } - return compileInternal(script); + return compileInternal(script, headersContext); } /** * Compiles a script straight-away, or returns the previously compiled and cached script, * without checking if it can be executed based on settings. */ - public CompiledScript compileInternal(Script script) { + public CompiledScript compileInternal(Script script, HasContextAndHeaders context) { if (script == null) { throw new IllegalArgumentException("The parameter script (Script) must not be null."); } @@ -292,7 +298,7 @@ public class ScriptService extends AbstractComponent implements Closeable { //the script has been updated in the index since the last look up. final IndexedScript indexedScript = new IndexedScript(lang, name); name = indexedScript.id; - code = getScriptFromIndex(indexedScript.lang, indexedScript.id); + code = getScriptFromIndex(indexedScript.lang, indexedScript.id, context); } String cacheKey = getCacheKey(scriptEngineService, type == ScriptType.INLINE ? null : name, code); @@ -333,13 +339,13 @@ public class ScriptService extends AbstractComponent implements Closeable { return scriptLang; } - String getScriptFromIndex(String scriptLang, String id) { + String getScriptFromIndex(String scriptLang, String id, HasContextAndHeaders context) { if (client == null) { throw new IllegalArgumentException("Got an indexed script with no Client registered."); } scriptLang = validateScriptLanguage(scriptLang); GetRequest getRequest = new GetRequest(SCRIPT_INDEX, scriptLang, id); - getRequest.copyContextAndHeadersFrom(SearchContext.current()); + getRequest.copyContextAndHeadersFrom(context); GetResponse responseFields = client.get(getRequest).actionGet(); if (responseFields.isExists()) { return getScriptFromResponse(responseFields); @@ -432,8 +438,8 @@ public class ScriptService extends AbstractComponent implements Closeable { /** * Compiles (or retrieves from cache) and executes the provided script */ - public ExecutableScript executable(Script script, ScriptContext scriptContext) { - return executable(compile(script, scriptContext), script.getParams()); + public ExecutableScript executable(Script script, ScriptContext scriptContext, HasContextAndHeaders headersContext) { + return executable(compile(script, scriptContext, headersContext), script.getParams()); } /** @@ -447,7 +453,7 @@ public class ScriptService extends AbstractComponent implements Closeable { * Compiles (or retrieves from cache) and executes the provided search script */ public SearchScript search(SearchLookup lookup, Script script, ScriptContext scriptContext) { - CompiledScript compiledScript = compile(script, scriptContext); + CompiledScript compiledScript = compile(script, scriptContext, SearchContext.current()); return getScriptEngineServiceForLang(compiledScript.lang()).search(compiledScript, lookup, script.getParams()); } diff --git a/core/src/main/java/org/elasticsearch/search/SearchService.java b/core/src/main/java/org/elasticsearch/search/SearchService.java index 15eb3c0e8dc..4288e1d098a 100644 --- a/core/src/main/java/org/elasticsearch/search/SearchService.java +++ b/core/src/main/java/org/elasticsearch/search/SearchService.java @@ -23,6 +23,7 @@ import com.carrotsearch.hppc.ObjectHashSet; import com.carrotsearch.hppc.ObjectSet; import com.carrotsearch.hppc.cursors.ObjectCursor; import com.google.common.collect.ImmutableMap; + import org.apache.lucene.index.IndexOptions; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.NumericDocValues; @@ -82,10 +83,23 @@ import org.elasticsearch.script.Template; import org.elasticsearch.script.mustache.MustacheScriptEngineService; import org.elasticsearch.search.dfs.DfsPhase; import org.elasticsearch.search.dfs.DfsSearchResult; -import org.elasticsearch.search.fetch.*; -import org.elasticsearch.search.internal.*; +import org.elasticsearch.search.fetch.FetchPhase; +import org.elasticsearch.search.fetch.FetchSearchResult; +import org.elasticsearch.search.fetch.QueryFetchSearchResult; +import org.elasticsearch.search.fetch.ScrollQueryFetchSearchResult; +import org.elasticsearch.search.fetch.ShardFetchRequest; +import org.elasticsearch.search.internal.DefaultSearchContext; +import org.elasticsearch.search.internal.InternalScrollSearchRequest; +import org.elasticsearch.search.internal.ScrollContext; +import org.elasticsearch.search.internal.SearchContext; import org.elasticsearch.search.internal.SearchContext.Lifetime; -import org.elasticsearch.search.query.*; +import org.elasticsearch.search.internal.ShardSearchLocalRequest; +import org.elasticsearch.search.internal.ShardSearchRequest; +import org.elasticsearch.search.query.QueryPhase; +import org.elasticsearch.search.query.QuerySearchRequest; +import org.elasticsearch.search.query.QuerySearchResult; +import org.elasticsearch.search.query.QuerySearchResultProvider; +import org.elasticsearch.search.query.ScrollQuerySearchResult; import org.elasticsearch.search.warmer.IndexWarmersMetaData; import org.elasticsearch.threadpool.ThreadPool; @@ -736,7 +750,7 @@ public class SearchService extends AbstractLifecycleComponent { BytesReference processedQuery; if (request.template() != null) { - ExecutableScript executable = this.scriptService.executable(request.template(), ScriptContext.Standard.SEARCH); + ExecutableScript executable = this.scriptService.executable(request.template(), ScriptContext.Standard.SEARCH, searchContext); processedQuery = (BytesReference) executable.run(); } else { if (!hasLength(request.templateSource())) { @@ -753,7 +767,7 @@ public class SearchService extends AbstractLifecycleComponent { //Try to double parse for nested template id/file parser = null; try { - ExecutableScript executable = this.scriptService.executable(template, ScriptContext.Standard.SEARCH); + ExecutableScript executable = this.scriptService.executable(template, ScriptContext.Standard.SEARCH, searchContext); processedQuery = (BytesReference) executable.run(); parser = XContentFactory.xContent(processedQuery).createParser(processedQuery); } catch (ElasticsearchParseException epe) { @@ -761,7 +775,7 @@ public class SearchService extends AbstractLifecycleComponent { //for backwards compatibility and keep going template = new Template(template.getScript(), ScriptService.ScriptType.FILE, MustacheScriptEngineService.NAME, null, template.getParams()); - ExecutableScript executable = this.scriptService.executable(template, ScriptContext.Standard.SEARCH); + ExecutableScript executable = this.scriptService.executable(template, ScriptContext.Standard.SEARCH, searchContext); processedQuery = (BytesReference) executable.run(); } if (parser != null) { @@ -771,7 +785,8 @@ public class SearchService extends AbstractLifecycleComponent { //An inner template referring to a filename or id template = new Template(innerTemplate.getScript(), innerTemplate.getType(), MustacheScriptEngineService.NAME, null, template.getParams()); - ExecutableScript executable = this.scriptService.executable(template, ScriptContext.Standard.SEARCH); + ExecutableScript executable = this.scriptService.executable(template, ScriptContext.Standard.SEARCH, + searchContext); processedQuery = (BytesReference) executable.run(); } } catch (ScriptParseException e) { @@ -779,7 +794,7 @@ public class SearchService extends AbstractLifecycleComponent { } } } else { - ExecutableScript executable = this.scriptService.executable(template, ScriptContext.Standard.SEARCH); + ExecutableScript executable = this.scriptService.executable(template, ScriptContext.Standard.SEARCH, searchContext); processedQuery = (BytesReference) executable.run(); } } catch (IOException e) { diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/InternalAggregation.java b/core/src/main/java/org/elasticsearch/search/aggregations/InternalAggregation.java index 90c1dee9e50..1c67a941daf 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/InternalAggregation.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/InternalAggregation.java @@ -18,7 +18,8 @@ */ package org.elasticsearch.search.aggregations; -import com.google.common.collect.ImmutableList; +import org.elasticsearch.common.DelegatingHasContextAndHeaders; +import org.elasticsearch.common.HasContextAndHeaders; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.StreamInput; @@ -35,6 +36,7 @@ import org.elasticsearch.search.aggregations.support.AggregationPath; import java.io.IOException; import java.util.ArrayList; +import java.util.Collections; import java.util.List; import java.util.Map; @@ -90,12 +92,13 @@ public abstract class InternalAggregation implements Aggregation, ToXContent, St } } - public static class ReduceContext { + public static class ReduceContext extends DelegatingHasContextAndHeaders { private final BigArrays bigArrays; private ScriptService scriptService; - public ReduceContext(BigArrays bigArrays, ScriptService scriptService) { + public ReduceContext(BigArrays bigArrays, ScriptService scriptService, HasContextAndHeaders headersContext) { + super(headersContext); this.bigArrays = bigArrays; this.scriptService = scriptService; } @@ -225,7 +228,7 @@ public abstract class InternalAggregation implements Aggregation, ToXContent, St metaData = in.readMap(); int size = in.readVInt(); if (size == 0) { - pipelineAggregators = ImmutableList.of(); + pipelineAggregators = Collections.emptyList(); } else { pipelineAggregators = new ArrayList<>(size); for (int i = 0; i < size; i++) { diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/InternalAggregations.java b/core/src/main/java/org/elasticsearch/search/aggregations/InternalAggregations.java index e90547eddf4..bb5f074b179 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/InternalAggregations.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/InternalAggregations.java @@ -19,7 +19,6 @@ package org.elasticsearch.search.aggregations; import com.google.common.base.Function; -import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; import com.google.common.collect.Iterators; import com.google.common.collect.Maps; @@ -35,6 +34,7 @@ import org.elasticsearch.search.aggregations.support.AggregationPath; import java.io.IOException; import java.util.ArrayList; +import java.util.Collections; import java.util.HashMap; import java.util.Iterator; import java.util.List; @@ -56,7 +56,7 @@ public class InternalAggregations implements Aggregations, ToXContent, Streamabl } }; - private List aggregations = ImmutableList.of(); + private List aggregations = Collections.emptyList(); private Map aggregationsAsMap; @@ -211,7 +211,7 @@ public class InternalAggregations implements Aggregations, ToXContent, Streamabl public void readFrom(StreamInput in) throws IOException { int size = in.readVInt(); if (size == 0) { - aggregations = ImmutableList.of(); + aggregations = Collections.emptyList(); aggregationsAsMap = ImmutableMap.of(); } else { aggregations = new ArrayList<>(size); diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/children/ParentToChildrenAggregator.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/children/ParentToChildrenAggregator.java index beefbc64508..ba776e33d35 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/children/ParentToChildrenAggregator.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/children/ParentToChildrenAggregator.java @@ -109,9 +109,9 @@ public class ParentToChildrenAggregator extends SingleBucketAggregator { final SortedDocValues globalOrdinals = valuesSource.globalOrdinalsValues(parentType, ctx); assert globalOrdinals != null; - Scorer parentScorer = parentFilter.scorer(ctx, null); + Scorer parentScorer = parentFilter.scorer(ctx); final Bits parentDocs = Lucene.asSequentialAccessBits(ctx.reader().maxDoc(), parentScorer); - if (childFilter.scorer(ctx, null) != null) { + if (childFilter.scorer(ctx) != null) { replay.add(ctx); } return new LeafBucketCollector() { @@ -146,7 +146,7 @@ public class ParentToChildrenAggregator extends SingleBucketAggregator { this.replay = null; for (LeafReaderContext ctx : replay) { - DocIdSetIterator childDocsIter = childFilter.scorer(ctx, ctx.reader().getLiveDocs()); + DocIdSetIterator childDocsIter = childFilter.scorer(ctx); if (childDocsIter == null) { continue; } @@ -157,7 +157,11 @@ public class ParentToChildrenAggregator extends SingleBucketAggregator { // Set the scorer, since we now replay only the child docIds sub.setScorer(ConstantScorer.create(childDocsIter, null, 1f)); + final Bits liveDocs = ctx.reader().getLiveDocs(); for (int docId = childDocsIter.nextDoc(); docId != DocIdSetIterator.NO_MORE_DOCS; docId = childDocsIter.nextDoc()) { + if (liveDocs != null && liveDocs.get(docId) == false) { + continue; + } long globalOrdinal = globalOrdinals.getOrd(docId); if (globalOrdinal != -1) { long bucketOrd = parentOrdToBuckets.get(globalOrdinal); diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/FilterAggregator.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/FilterAggregator.java index 0f904e4da03..b1308444894 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/FilterAggregator.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/FilterAggregator.java @@ -58,7 +58,7 @@ public class FilterAggregator extends SingleBucketAggregator { public LeafBucketCollector getLeafCollector(LeafReaderContext ctx, final LeafBucketCollector sub) throws IOException { // no need to provide deleted docs to the filter - final Bits bits = Lucene.asSequentialAccessBits(ctx.reader().maxDoc(), filter.scorer(ctx, null)); + final Bits bits = Lucene.asSequentialAccessBits(ctx.reader().maxDoc(), filter.scorer(ctx)); return new LeafBucketCollectorBase(sub, null) { @Override public void collect(int doc, long bucket) throws IOException { diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/filters/FiltersAggregator.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/filters/FiltersAggregator.java index 781d47f68eb..3cd67f835ec 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/filters/FiltersAggregator.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/filters/FiltersAggregator.java @@ -91,7 +91,7 @@ public class FiltersAggregator extends BucketsAggregator { // no need to provide deleted docs to the filter final Bits[] bits = new Bits[filters.length]; for (int i = 0; i < filters.length; ++i) { - bits[i] = Lucene.asSequentialAccessBits(ctx.reader().maxDoc(), filters[i].scorer(ctx, null)); + bits[i] = Lucene.asSequentialAccessBits(ctx.reader().maxDoc(), filters[i].scorer(ctx)); } return new LeafBucketCollectorBase(sub, null) { @Override diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantTermsParametersParser.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantTermsParametersParser.java index ba37c162245..9768a8617da 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantTermsParametersParser.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantTermsParametersParser.java @@ -60,11 +60,11 @@ public class SignificantTermsParametersParser extends AbstractTermsParametersPar @Override public void parseSpecial(String aggregationName, XContentParser parser, SearchContext context, XContentParser.Token token, String currentFieldName) throws IOException { - + if (token == XContentParser.Token.START_OBJECT) { SignificanceHeuristicParser significanceHeuristicParser = significanceHeuristicParserMapper.get(currentFieldName); if (significanceHeuristicParser != null) { - significanceHeuristic = significanceHeuristicParser.parse(parser, context.parseFieldMatcher()); + significanceHeuristic = significanceHeuristicParser.parse(parser, context.parseFieldMatcher(), context); } else if (context.parseFieldMatcher().match(currentFieldName, BACKGROUND_FILTER)) { filter = context.queryParserService().parseInnerFilter(parser).query(); } else { diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/GND.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/GND.java index f85bd80cc4d..99ee7c73b2b 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/GND.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/GND.java @@ -29,6 +29,7 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.query.QueryShardException; +import org.elasticsearch.search.internal.SearchContext; import java.io.IOException; @@ -115,7 +116,8 @@ public class GND extends NXYSignificanceHeuristic { } @Override - public SignificanceHeuristic parse(XContentParser parser, ParseFieldMatcher parseFieldMatcher) throws IOException, QueryShardException { + public SignificanceHeuristic parse(XContentParser parser, ParseFieldMatcher parseFieldMatcher, SearchContext context) + throws IOException, QueryShardException { String givenName = parser.currentName(); boolean backgroundIsSuperset = true; XContentParser.Token token = parser.nextToken(); diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/JLHScore.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/JLHScore.java index 5c9794a115a..97264e7d53f 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/JLHScore.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/JLHScore.java @@ -28,6 +28,7 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.query.QueryShardException; +import org.elasticsearch.search.internal.SearchContext; import java.io.IOException; @@ -108,7 +109,8 @@ public class JLHScore extends SignificanceHeuristic { public static class JLHScoreParser implements SignificanceHeuristicParser { @Override - public SignificanceHeuristic parse(XContentParser parser, ParseFieldMatcher parseFieldMatcher) throws IOException, QueryShardException { + public SignificanceHeuristic parse(XContentParser parser, ParseFieldMatcher parseFieldMatcher, SearchContext context) + throws IOException, QueryShardException { // move to the closing bracket if (!parser.nextToken().equals(XContentParser.Token.END_OBJECT)) { throw new ElasticsearchParseException("failed to parse [jhl] significance heuristic. expected an empty object, but found [{}] instead", parser.currentToken()); diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/NXYSignificanceHeuristic.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/NXYSignificanceHeuristic.java index d21b319916a..c6a6924108c 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/NXYSignificanceHeuristic.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/NXYSignificanceHeuristic.java @@ -28,6 +28,7 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.query.QueryShardException; +import org.elasticsearch.search.internal.SearchContext; import java.io.IOException; @@ -138,7 +139,8 @@ public abstract class NXYSignificanceHeuristic extends SignificanceHeuristic { public static abstract class NXYParser implements SignificanceHeuristicParser { @Override - public SignificanceHeuristic parse(XContentParser parser, ParseFieldMatcher parseFieldMatcher) throws IOException, QueryShardException { + public SignificanceHeuristic parse(XContentParser parser, ParseFieldMatcher parseFieldMatcher, SearchContext context) + throws IOException, QueryShardException { String givenName = parser.currentName(); boolean includeNegatives = false; boolean backgroundIsSuperset = true; diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/PercentageScore.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/PercentageScore.java index 25556c9d004..aceae8c251b 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/PercentageScore.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/PercentageScore.java @@ -28,6 +28,7 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.query.QueryShardException; +import org.elasticsearch.search.internal.SearchContext; import java.io.IOException; @@ -57,7 +58,7 @@ public class PercentageScore extends SignificanceHeuristic { /** * Indicates the significance of a term in a sample by determining what percentage - * of all occurrences of a term are found in the sample. + * of all occurrences of a term are found in the sample. */ @Override public double getScore(long subsetFreq, long subsetSize, long supersetFreq, long supersetSize) { @@ -65,7 +66,7 @@ public class PercentageScore extends SignificanceHeuristic { if (supersetFreq == 0) { // avoid a divide by zero issue return 0; - } + } return (double) subsetFreq / (double) supersetFreq; } @@ -77,7 +78,8 @@ public class PercentageScore extends SignificanceHeuristic { public static class PercentageScoreParser implements SignificanceHeuristicParser { @Override - public SignificanceHeuristic parse(XContentParser parser, ParseFieldMatcher parseFieldMatcher) throws IOException, QueryShardException { + public SignificanceHeuristic parse(XContentParser parser, ParseFieldMatcher parseFieldMatcher, SearchContext context) + throws IOException, QueryShardException { // move to the closing bracket if (!parser.nextToken().equals(XContentParser.Token.END_OBJECT)) { throw new ElasticsearchParseException("failed to parse [percentage] significance heuristic. expected an empty object, but got [{}] instead", parser.currentToken()); diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/ScriptHeuristic.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/ScriptHeuristic.java index d3a4e64ce1b..d1179693a7f 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/ScriptHeuristic.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/ScriptHeuristic.java @@ -24,7 +24,6 @@ package org.elasticsearch.search.aggregations.bucket.significant.heuristics; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.ParseFieldMatcher; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.logging.ESLoggerFactory; @@ -35,6 +34,7 @@ import org.elasticsearch.script.*; import org.elasticsearch.script.Script.ScriptField; import org.elasticsearch.script.ScriptParameterParser.ScriptParameterValue; import org.elasticsearch.search.aggregations.InternalAggregation; +import org.elasticsearch.search.internal.SearchContext; import java.io.IOException; import java.util.Map; @@ -81,8 +81,9 @@ public class ScriptHeuristic extends SignificanceHeuristic { } + @Override public void initialize(InternalAggregation.ReduceContext context) { - searchScript = context.scriptService().executable(script, ScriptContext.Standard.AGGS); + searchScript = context.scriptService().executable(script, ScriptContext.Standard.AGGS, context); searchScript.setNextVar("_subset_freq", subsetDfHolder); searchScript.setNextVar("_subset_size", subsetSizeHolder); searchScript.setNextVar("_superset_freq", supersetDfHolder); @@ -129,7 +130,8 @@ public class ScriptHeuristic extends SignificanceHeuristic { } @Override - public SignificanceHeuristic parse(XContentParser parser, ParseFieldMatcher parseFieldMatcher) throws IOException, QueryShardException { + public SignificanceHeuristic parse(XContentParser parser, ParseFieldMatcher parseFieldMatcher, SearchContext context) + throws IOException, QueryShardException { String heuristicName = parser.currentName(); Script script = null; XContentParser.Token token; @@ -169,7 +171,7 @@ public class ScriptHeuristic extends SignificanceHeuristic { } ExecutableScript searchScript; try { - searchScript = scriptService.executable(script, ScriptContext.Standard.AGGS); + searchScript = scriptService.executable(script, ScriptContext.Standard.AGGS, context); } catch (Exception e) { throw new ElasticsearchParseException("failed to parse [{}] significance heuristic. the script [{}] could not be loaded", e, script, heuristicName); } @@ -204,21 +206,23 @@ public class ScriptHeuristic extends SignificanceHeuristic { public final class LongAccessor extends Number { public long value; + @Override public int intValue() { return (int)value; } + @Override public long longValue() { return value; } @Override public float floatValue() { - return (float)value; + return value; } @Override public double doubleValue() { - return (double)value; + return value; } @Override diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/SignificanceHeuristicParser.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/SignificanceHeuristicParser.java index a6489220f51..cd6f7802dab 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/SignificanceHeuristicParser.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/SignificanceHeuristicParser.java @@ -23,12 +23,14 @@ package org.elasticsearch.search.aggregations.bucket.significant.heuristics; import org.elasticsearch.common.ParseFieldMatcher; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.query.QueryParsingException; +import org.elasticsearch.search.internal.SearchContext; import java.io.IOException; public interface SignificanceHeuristicParser { - SignificanceHeuristic parse(XContentParser parser, ParseFieldMatcher parseFieldMatcher) throws IOException, QueryParsingException; + SignificanceHeuristic parse(XContentParser parser, ParseFieldMatcher parseFieldMatcher, SearchContext context) throws IOException, + QueryParsingException; String[] getNames(); } diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/cardinality/HyperLogLogPlusPlus.java b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/cardinality/HyperLogLogPlusPlus.java index b21bd5224c3..6eb5153e59d 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/cardinality/HyperLogLogPlusPlus.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/cardinality/HyperLogLogPlusPlus.java @@ -62,6 +62,7 @@ public final class HyperLogLogPlusPlus implements Releasable { private static final boolean HYPERLOGLOG = true; private static final float MAX_LOAD_FACTOR = 0.75f; private static final int P2 = 25; + private static final int BIAS_K = 6; /** * Compute the required precision so that count distinct entries @@ -374,23 +375,30 @@ public final class HyperLogLogPlusPlus implements Releasable { private double estimateBias(double e) { final double[] rawEstimateData = rawEstimateData(); final double[] biasData = biasData(); - int index = Arrays.binarySearch(rawEstimateData, e); - if (index >= 0) { - return biasData[index]; - } else { - index = -1 - index; - if (index == 0) { - return biasData[0]; - } else if (index >= biasData.length) { - return biasData[biasData.length - 1]; - } else { - double w1 = (e - rawEstimateData[index - 1]); - assert w1 >= 0; - double w2 = (rawEstimateData[index] - e); - assert w2 >= 0; - return (biasData[index - 1] * w1 + biasData[index] * w2) / (w1 + w2); + + final double[] weights = new double[BIAS_K]; + int index = biasData.length - BIAS_K; + for (int i = 0; i < rawEstimateData.length; ++i) { + final double w = 1.0 / Math.abs(rawEstimateData[i] - e); + final int j = i % weights.length; + if (Double.isInfinite(w)) { + return biasData[i]; + } else if (weights[j] >= w) { + index = i - BIAS_K; + break; } + weights[j] = w; } + + double weightSum = 0.0; + double biasSum = 0.0; + for (int i = 0, j = index; i < BIAS_K; ++i, ++j) { + final double w = weights[i]; + final double b = biasData[j]; + biasSum += w * b; + weightSum += w; + } + return biasSum / weightSum; } private double[] biasData() { diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/InternalScriptedMetric.java b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/InternalScriptedMetric.java index f4d726aa49f..d39a0335ac3 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/InternalScriptedMetric.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/InternalScriptedMetric.java @@ -91,7 +91,7 @@ public class InternalScriptedMetric extends InternalMetricsAggregation implement vars.putAll(firstAggregation.reduceScript.getParams()); } CompiledScript compiledScript = reduceContext.scriptService().compile(firstAggregation.reduceScript, - ScriptContext.Standard.AGGS); + ScriptContext.Standard.AGGS, reduceContext); ExecutableScript script = reduceContext.scriptService().executable(compiledScript, vars); aggregation = script.run(); } else { diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricAggregator.java b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricAggregator.java index a24dd4d3a7e..2c1caaa5241 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricAggregator.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricAggregator.java @@ -58,11 +58,11 @@ public class ScriptedMetricAggregator extends MetricsAggregator { this.params = params; ScriptService scriptService = context.searchContext().scriptService(); if (initScript != null) { - scriptService.executable(initScript, ScriptContext.Standard.AGGS).run(); + scriptService.executable(initScript, ScriptContext.Standard.AGGS, context.searchContext()).run(); } this.mapScript = scriptService.search(context.searchContext().lookup(), mapScript, ScriptContext.Standard.AGGS); if (combineScript != null) { - this.combineScript = scriptService.executable(combineScript, ScriptContext.Standard.AGGS); + this.combineScript = scriptService.executable(combineScript, ScriptContext.Standard.AGGS, context.searchContext()); } else { this.combineScript = null; } @@ -159,7 +159,7 @@ public class ScriptedMetricAggregator extends MetricsAggregator { return null; } } - + @SuppressWarnings({ "unchecked" }) private static T deepCopyParams(T original, SearchContext context) { T clone; diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketscript/BucketScriptPipelineAggregator.java b/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketscript/BucketScriptPipelineAggregator.java index 06559645821..ff324d849c1 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketscript/BucketScriptPipelineAggregator.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketscript/BucketScriptPipelineAggregator.java @@ -104,7 +104,7 @@ public class BucketScriptPipelineAggregator extends PipelineAggregator { InternalMultiBucketAggregation originalAgg = (InternalMultiBucketAggregation) aggregation; List buckets = originalAgg.getBuckets(); - CompiledScript compiledScript = reduceContext.scriptService().compile(script, ScriptContext.Standard.AGGS); + CompiledScript compiledScript = reduceContext.scriptService().compile(script, ScriptContext.Standard.AGGS, reduceContext); List newBuckets = new ArrayList<>(); for (Bucket bucket : buckets) { Map vars = new HashMap<>(); diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/having/BucketSelectorPipelineAggregator.java b/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/having/BucketSelectorPipelineAggregator.java index 154a729d046..9e7654fd8e6 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/having/BucketSelectorPipelineAggregator.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/having/BucketSelectorPipelineAggregator.java @@ -98,7 +98,7 @@ public class BucketSelectorPipelineAggregator extends PipelineAggregator { InternalMultiBucketAggregation originalAgg = (InternalMultiBucketAggregation) aggregation; List buckets = originalAgg.getBuckets(); - CompiledScript compiledScript = reduceContext.scriptService().compile(script, ScriptContext.Standard.AGGS); + CompiledScript compiledScript = reduceContext.scriptService().compile(script, ScriptContext.Standard.AGGS, reduceContext); List newBuckets = new ArrayList<>(); for (Bucket bucket : buckets) { Map vars = new HashMap<>(); diff --git a/core/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java b/core/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java index 90ce07b87ff..7778ac4963e 100644 --- a/core/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java @@ -21,7 +21,6 @@ package org.elasticsearch.search.builder; import com.carrotsearch.hppc.ObjectFloatHashMap; import com.google.common.base.Charsets; -import com.google.common.collect.ImmutableList; import org.elasticsearch.ElasticsearchGenerationException; import org.elasticsearch.action.support.QuerySourceBuilder; import org.elasticsearch.action.support.ToXContentToBytes; @@ -540,7 +539,7 @@ public class SearchSourceBuilder extends ToXContentToBytes { * per field. */ public SearchSourceBuilder noFields() { - this.fieldNames = ImmutableList.of(); + this.fieldNames = Collections.emptyList(); return this; } diff --git a/core/src/main/java/org/elasticsearch/search/controller/SearchPhaseController.java b/core/src/main/java/org/elasticsearch/search/controller/SearchPhaseController.java index 74e263220e4..5836611e2ba 100644 --- a/core/src/main/java/org/elasticsearch/search/controller/SearchPhaseController.java +++ b/core/src/main/java/org/elasticsearch/search/controller/SearchPhaseController.java @@ -31,6 +31,7 @@ import org.apache.lucene.search.TermStatistics; import org.apache.lucene.search.TopDocs; import org.apache.lucene.search.TopFieldDocs; import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.common.HasContextAndHeaders; import org.elasticsearch.common.collect.HppcMaps; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; @@ -296,7 +297,8 @@ public class SearchPhaseController extends AbstractComponent { } } - public InternalSearchResponse merge(ScoreDoc[] sortedDocs, AtomicArray queryResultsArr, AtomicArray fetchResultsArr) { + public InternalSearchResponse merge(ScoreDoc[] sortedDocs, AtomicArray queryResultsArr, + AtomicArray fetchResultsArr, HasContextAndHeaders headersContext) { List> queryResults = queryResultsArr.asList(); List> fetchResults = fetchResultsArr.asList(); @@ -404,7 +406,7 @@ public class SearchPhaseController extends AbstractComponent { for (AtomicArray.Entry entry : queryResults) { aggregationsList.add((InternalAggregations) entry.value.queryResult().aggregations()); } - aggregations = InternalAggregations.reduce(aggregationsList, new ReduceContext(bigArrays, scriptService)); + aggregations = InternalAggregations.reduce(aggregationsList, new ReduceContext(bigArrays, scriptService, headersContext)); } } @@ -413,8 +415,8 @@ public class SearchPhaseController extends AbstractComponent { if (pipelineAggregators != null) { List newAggs = new ArrayList<>(eagerTransform(aggregations.asList(), PipelineAggregator.AGGREGATION_TRANFORM_FUNCTION)); for (SiblingPipelineAggregator pipelineAggregator : pipelineAggregators) { - InternalAggregation newAgg = pipelineAggregator.doReduce(new InternalAggregations(newAggs), new ReduceContext(bigArrays, - scriptService)); + InternalAggregation newAgg = pipelineAggregator.doReduce(new InternalAggregations(newAggs), new ReduceContext( + bigArrays, scriptService, headersContext)); newAggs.add(newAgg); } aggregations = new InternalAggregations(newAggs); diff --git a/core/src/main/java/org/elasticsearch/search/fetch/FetchPhase.java b/core/src/main/java/org/elasticsearch/search/fetch/FetchPhase.java index f9156866200..12bb85b7697 100644 --- a/core/src/main/java/org/elasticsearch/search/fetch/FetchPhase.java +++ b/core/src/main/java/org/elasticsearch/search/fetch/FetchPhase.java @@ -19,7 +19,6 @@ package org.elasticsearch.search.fetch; -import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.ReaderUtil; @@ -59,6 +58,7 @@ import org.elasticsearch.search.lookup.SourceLookup; import java.io.IOException; import java.util.ArrayList; +import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.List; @@ -285,7 +285,7 @@ public class FetchPhase implements SearchPhase { nestedParsedSource = (List>) extractedValue; } else if (extractedValue instanceof Map) { // nested field has an object value in the _source. This just means the nested field has just one inner object, which is valid, but uncommon. - nestedParsedSource = ImmutableList.of((Map < String, Object >) extractedValue); + nestedParsedSource = Collections.singletonList((Map) extractedValue); } else { throw new IllegalStateException("extracted source isn't an object or an array"); } diff --git a/core/src/main/java/org/elasticsearch/search/fetch/innerhits/InnerHitsContext.java b/core/src/main/java/org/elasticsearch/search/fetch/innerhits/InnerHitsContext.java index 75b4b4f912d..460346c44c0 100644 --- a/core/src/main/java/org/elasticsearch/search/fetch/innerhits/InnerHitsContext.java +++ b/core/src/main/java/org/elasticsearch/search/fetch/innerhits/InnerHitsContext.java @@ -190,7 +190,7 @@ public final class InnerHitsContext { public Weight createWeight(IndexSearcher searcher, boolean needsScores) throws IOException { return new ConstantScoreWeight(this) { @Override - public Scorer scorer(LeafReaderContext context, Bits acceptDocs) throws IOException { + public Scorer scorer(LeafReaderContext context) throws IOException { // Nested docs only reside in a single segment, so no need to evaluate all segments if (!context.reader().getCoreCacheKey().equals(leafReader.getCoreCacheKey())) { return null; @@ -209,7 +209,7 @@ public final class InnerHitsContext { return null; } - final DocIdSet children = childFilter.getDocIdSet(context, acceptDocs); + final DocIdSet children = childFilter.getDocIdSet(context, null); if (children == null) { return null; } diff --git a/core/src/main/java/org/elasticsearch/search/fetch/matchedqueries/MatchedQueriesFetchSubPhase.java b/core/src/main/java/org/elasticsearch/search/fetch/matchedqueries/MatchedQueriesFetchSubPhase.java index 75e62d63f9d..87965321af4 100644 --- a/core/src/main/java/org/elasticsearch/search/fetch/matchedqueries/MatchedQueriesFetchSubPhase.java +++ b/core/src/main/java/org/elasticsearch/search/fetch/matchedqueries/MatchedQueriesFetchSubPhase.java @@ -85,7 +85,7 @@ public class MatchedQueriesFetchSubPhase implements FetchSubPhase { Query filter = entry.getValue(); final Weight weight = hitContext.topLevelSearcher().createNormalizedWeight(filter, false); - final Scorer scorer = weight.scorer(hitContext.readerContext(), null); + final Scorer scorer = weight.scorer(hitContext.readerContext()); if (scorer == null) { continue; } diff --git a/core/src/main/java/org/elasticsearch/search/highlight/CustomQueryScorer.java b/core/src/main/java/org/elasticsearch/search/highlight/CustomQueryScorer.java index 8f12dd0f9b4..a2d762461c0 100644 --- a/core/src/main/java/org/elasticsearch/search/highlight/CustomQueryScorer.java +++ b/core/src/main/java/org/elasticsearch/search/highlight/CustomQueryScorer.java @@ -20,7 +20,6 @@ package org.elasticsearch.search.highlight; import org.apache.lucene.index.IndexReader; -import org.apache.lucene.queries.BlendedTermQuery; import org.apache.lucene.search.FilteredQuery; import org.apache.lucene.search.Query; import org.apache.lucene.search.highlight.QueryScorer; @@ -87,7 +86,7 @@ public final class CustomQueryScorer extends QueryScorer { } else if (query instanceof FilteredQuery) { query = ((FilteredQuery) query).getQuery(); extract(query, terms); - } else if (query instanceof BlendedTermQuery) { + } else { extractWeightedTerms(terms, query); } } diff --git a/core/src/main/java/org/elasticsearch/search/highlight/HighlightPhase.java b/core/src/main/java/org/elasticsearch/search/highlight/HighlightPhase.java index 5b9ab72641a..07e931c722a 100644 --- a/core/src/main/java/org/elasticsearch/search/highlight/HighlightPhase.java +++ b/core/src/main/java/org/elasticsearch/search/highlight/HighlightPhase.java @@ -19,7 +19,6 @@ package org.elasticsearch.search.highlight; -import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; import org.apache.lucene.search.Query; import org.elasticsearch.common.component.AbstractComponent; @@ -34,7 +33,10 @@ import org.elasticsearch.search.fetch.FetchSubPhase; import org.elasticsearch.search.internal.InternalSearchHit; import org.elasticsearch.search.internal.SearchContext; +import java.util.Arrays; import java.util.Collection; +import java.util.Collections; +import java.util.List; import java.util.Map; import static com.google.common.collect.Maps.newHashMap; @@ -44,7 +46,7 @@ import static com.google.common.collect.Maps.newHashMap; */ public class HighlightPhase extends AbstractComponent implements FetchSubPhase { - private static final ImmutableList STANDARD_HIGHLIGHTERS_BY_PRECEDENCE = ImmutableList.of("fvh", "postings", "plain"); + private static final List STANDARD_HIGHLIGHTERS_BY_PRECEDENCE = Arrays.asList("fvh", "postings", "plain"); private final Highlighters highlighters; @@ -82,7 +84,7 @@ public class HighlightPhase extends AbstractComponent implements FetchSubPhase { DocumentMapper documentMapper = context.mapperService().documentMapper(hitContext.hit().type()); fieldNamesToHighlight = documentMapper.mappers().simpleMatchToFullName(field.field()); } else { - fieldNamesToHighlight = ImmutableList.of(field.field()); + fieldNamesToHighlight = Collections.singletonList(field.field()); } if (context.highlight().forceSource(field)) { diff --git a/core/src/main/java/org/elasticsearch/search/highlight/HighlightUtils.java b/core/src/main/java/org/elasticsearch/search/highlight/HighlightUtils.java index 3358aec0ed7..b26be721d93 100644 --- a/core/src/main/java/org/elasticsearch/search/highlight/HighlightUtils.java +++ b/core/src/main/java/org/elasticsearch/search/highlight/HighlightUtils.java @@ -18,7 +18,6 @@ */ package org.elasticsearch.search.highlight; -import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableSet; import org.apache.lucene.search.highlight.DefaultEncoder; import org.apache.lucene.search.highlight.Encoder; @@ -30,6 +29,7 @@ import org.elasticsearch.search.internal.SearchContext; import org.elasticsearch.search.lookup.SourceLookup; import java.io.IOException; +import java.util.Collections; import java.util.List; public final class HighlightUtils { @@ -52,7 +52,7 @@ public final class HighlightUtils { textsToHighlight = fieldVisitor.fields().get(mapper.fieldType().names().indexName()); if (textsToHighlight == null) { // Can happen if the document doesn't have the field to highlight - textsToHighlight = ImmutableList.of(); + textsToHighlight = Collections.emptyList(); } } else { SourceLookup sourceLookup = searchContext.lookup().source(); diff --git a/core/src/main/java/org/elasticsearch/search/internal/DefaultSearchContext.java b/core/src/main/java/org/elasticsearch/search/internal/DefaultSearchContext.java index fdaac5e96aa..2d24d26bae4 100644 --- a/core/src/main/java/org/elasticsearch/search/internal/DefaultSearchContext.java +++ b/core/src/main/java/org/elasticsearch/search/internal/DefaultSearchContext.java @@ -19,8 +19,6 @@ package org.elasticsearch.search.internal; -import com.carrotsearch.hppc.ObjectObjectAssociativeContainer; -import com.google.common.collect.ImmutableList; import org.apache.lucene.search.BooleanClause.Occur; import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.Collector; @@ -31,12 +29,8 @@ import org.apache.lucene.search.Sort; import org.apache.lucene.util.Counter; import org.elasticsearch.action.search.SearchType; import org.elasticsearch.cache.recycler.PageCacheRecycler; -import org.elasticsearch.common.HasContext; -import org.elasticsearch.common.HasContextAndHeaders; -import org.elasticsearch.common.HasHeaders; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.ParseFieldMatcher; -import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.lease.Releasables; import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.common.lucene.search.function.BoostScoreFunction; @@ -79,7 +73,6 @@ import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; -import java.util.Set; /** * @@ -147,7 +140,7 @@ public class DefaultSearchContext extends SearchContext { BigArrays bigArrays, Counter timeEstimateCounter, ParseFieldMatcher parseFieldMatcher, TimeValue timeout ) { - super(parseFieldMatcher); + super(parseFieldMatcher, request); this.id = id; this.request = request; this.searchType = request.searchType(); @@ -586,7 +579,7 @@ public class DefaultSearchContext extends SearchContext { @Override public void emptyFieldNames() { - this.fieldNames = ImmutableList.of(); + this.fieldNames = Collections.emptyList(); } @Override @@ -725,81 +718,6 @@ public class DefaultSearchContext extends SearchContext { return innerHitsContext; } - @Override - public V putInContext(Object key, Object value) { - return request.putInContext(key, value); - } - - @Override - public void putAllInContext(ObjectObjectAssociativeContainer map) { - request.putAllInContext(map); - } - - @Override - public V getFromContext(Object key) { - return request.getFromContext(key); - } - - @Override - public V getFromContext(Object key, V defaultValue) { - return request.getFromContext(key, defaultValue); - } - - @Override - public boolean hasInContext(Object key) { - return request.hasInContext(key); - } - - @Override - public int contextSize() { - return request.contextSize(); - } - - @Override - public boolean isContextEmpty() { - return request.isContextEmpty(); - } - - @Override - public ImmutableOpenMap getContext() { - return request.getContext(); - } - - @Override - public void copyContextFrom(HasContext other) { - request.copyContextFrom(other); - } - - @Override - public void putHeader(String key, V value) { - request.putHeader(key, value); - } - - @Override - public V getHeader(String key) { - return request.getHeader(key); - } - - @Override - public boolean hasHeader(String key) { - return request.hasHeader(key); - } - - @Override - public Set getHeaders() { - return request.getHeaders(); - } - - @Override - public void copyHeadersFrom(HasHeaders from) { - request.copyHeadersFrom(from); - } - - @Override - public void copyContextAndHeadersFrom(HasContextAndHeaders other) { - request.copyContextAndHeadersFrom(other); - } - @Override public Map, Collector> queryCollectors() { return queryCollectors; diff --git a/core/src/main/java/org/elasticsearch/search/internal/FilteredSearchContext.java b/core/src/main/java/org/elasticsearch/search/internal/FilteredSearchContext.java index 2f79d03234e..a4b9c4d852b 100644 --- a/core/src/main/java/org/elasticsearch/search/internal/FilteredSearchContext.java +++ b/core/src/main/java/org/elasticsearch/search/internal/FilteredSearchContext.java @@ -19,16 +19,13 @@ package org.elasticsearch.search.internal; -import com.carrotsearch.hppc.ObjectObjectAssociativeContainer; - import org.apache.lucene.search.Collector; import org.apache.lucene.search.Query; import org.apache.lucene.search.Sort; import org.apache.lucene.util.Counter; import org.elasticsearch.action.search.SearchType; import org.elasticsearch.cache.recycler.PageCacheRecycler; -import org.elasticsearch.common.*; -import org.elasticsearch.common.collect.ImmutableOpenMap; +import org.elasticsearch.common.ParseFieldMatcher; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.index.analysis.AnalysisService; import org.elasticsearch.index.cache.bitset.BitsetFilterCache; @@ -59,7 +56,6 @@ import org.elasticsearch.search.suggest.SuggestionSearchContext; import java.util.List; import java.util.Map; -import java.util.Set; public abstract class FilteredSearchContext extends SearchContext { @@ -67,7 +63,7 @@ public abstract class FilteredSearchContext extends SearchContext { public FilteredSearchContext(SearchContext in) { //inner_hits in percolator ends up with null inner search context - super(in == null ? ParseFieldMatcher.EMPTY : in.parseFieldMatcher()); + super(in == null ? ParseFieldMatcher.EMPTY : in.parseFieldMatcher(), in); this.in = in; } @@ -526,81 +522,6 @@ public abstract class FilteredSearchContext extends SearchContext { return in.timeEstimateCounter(); } - @Override - public V putInContext(Object key, Object value) { - return in.putInContext(key, value); - } - - @Override - public void putAllInContext(ObjectObjectAssociativeContainer map) { - in.putAllInContext(map); - } - - @Override - public V getFromContext(Object key) { - return in.getFromContext(key); - } - - @Override - public V getFromContext(Object key, V defaultValue) { - return in.getFromContext(key, defaultValue); - } - - @Override - public boolean hasInContext(Object key) { - return in.hasInContext(key); - } - - @Override - public int contextSize() { - return in.contextSize(); - } - - @Override - public boolean isContextEmpty() { - return in.isContextEmpty(); - } - - @Override - public ImmutableOpenMap getContext() { - return in.getContext(); - } - - @Override - public void copyContextFrom(HasContext other) { - in.copyContextFrom(other); - } - - @Override - public void putHeader(String key, V value) { - in.putHeader(key, value); - } - - @Override - public V getHeader(String key) { - return in.getHeader(key); - } - - @Override - public boolean hasHeader(String key) { - return in.hasHeader(key); - } - - @Override - public Set getHeaders() { - return in.getHeaders(); - } - - @Override - public void copyHeadersFrom(HasHeaders from) { - in.copyHeadersFrom(from); - } - - @Override - public void copyContextAndHeadersFrom(HasContextAndHeaders other) { - in.copyContextAndHeadersFrom(other); - } - @Override public SubPhaseContext getFetchSubPhaseContext(FetchSubPhase.ContextFactory contextFactory) { return in.getFetchSubPhaseContext(contextFactory); diff --git a/core/src/main/java/org/elasticsearch/search/internal/SearchContext.java b/core/src/main/java/org/elasticsearch/search/internal/SearchContext.java index 3c958689b05..19cbf938fdf 100644 --- a/core/src/main/java/org/elasticsearch/search/internal/SearchContext.java +++ b/core/src/main/java/org/elasticsearch/search/internal/SearchContext.java @@ -21,12 +21,14 @@ package org.elasticsearch.search.internal; import com.google.common.collect.Iterables; import com.google.common.collect.Multimap; import com.google.common.collect.MultimapBuilder; + import org.apache.lucene.search.Collector; import org.apache.lucene.search.Query; import org.apache.lucene.search.Sort; import org.apache.lucene.util.Counter; import org.elasticsearch.action.search.SearchType; import org.elasticsearch.cache.recycler.PageCacheRecycler; +import org.elasticsearch.common.DelegatingHasContextAndHeaders; import org.elasticsearch.common.HasContextAndHeaders; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.ParseFieldMatcher; @@ -67,7 +69,7 @@ import java.util.List; import java.util.Map; import java.util.concurrent.atomic.AtomicBoolean; -public abstract class SearchContext implements Releasable, HasContextAndHeaders { +public abstract class SearchContext extends DelegatingHasContextAndHeaders implements Releasable { private static ThreadLocal current = new ThreadLocal<>(); public final static int DEFAULT_TERMINATE_AFTER = 0; @@ -91,7 +93,8 @@ public abstract class SearchContext implements Releasable, HasContextAndHeaders protected final ParseFieldMatcher parseFieldMatcher; - protected SearchContext(ParseFieldMatcher parseFieldMatcher) { + protected SearchContext(ParseFieldMatcher parseFieldMatcher, HasContextAndHeaders contextHeaders) { + super(contextHeaders); this.parseFieldMatcher = parseFieldMatcher; } diff --git a/core/src/main/java/org/elasticsearch/search/internal/SubSearchContext.java b/core/src/main/java/org/elasticsearch/search/internal/SubSearchContext.java index 4596375d119..670f6788d4e 100644 --- a/core/src/main/java/org/elasticsearch/search/internal/SubSearchContext.java +++ b/core/src/main/java/org/elasticsearch/search/internal/SubSearchContext.java @@ -18,7 +18,6 @@ */ package org.elasticsearch.search.internal; -import com.google.common.collect.ImmutableList; import org.apache.lucene.search.Filter; import org.apache.lucene.search.Sort; import org.apache.lucene.util.Counter; @@ -36,6 +35,7 @@ import org.elasticsearch.search.rescore.RescoreSearchContext; import org.elasticsearch.search.suggest.SuggestionSearchContext; import java.util.ArrayList; +import java.util.Collections; import java.util.List; /** @@ -240,7 +240,7 @@ public class SubSearchContext extends FilteredSearchContext { @Override public void emptyFieldNames() { - this.fieldNames = ImmutableList.of(); + this.fieldNames = Collections.emptyList(); } @Override diff --git a/core/src/main/java/org/elasticsearch/search/lookup/IndexFieldTerm.java b/core/src/main/java/org/elasticsearch/search/lookup/IndexFieldTerm.java index 43c28d05304..09c78d250fb 100644 --- a/core/src/main/java/org/elasticsearch/search/lookup/IndexFieldTerm.java +++ b/core/src/main/java/org/elasticsearch/search/lookup/IndexFieldTerm.java @@ -19,12 +19,19 @@ package org.elasticsearch.search.lookup; -import org.apache.lucene.index.*; +import org.apache.lucene.index.Fields; +import org.apache.lucene.index.FilterLeafReader.FilterPostingsEnum; +import org.apache.lucene.index.LeafReader; +import org.apache.lucene.index.PostingsEnum; +import org.apache.lucene.index.Term; +import org.apache.lucene.index.TermContext; +import org.apache.lucene.index.Terms; +import org.apache.lucene.index.TermsEnum; import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.search.TermStatistics; +import org.apache.lucene.util.Bits; import org.apache.lucene.util.BytesRef; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.common.lucene.search.EmptyScorer; import java.io.IOException; import java.util.Iterator; @@ -144,7 +151,28 @@ public class IndexFieldTerm implements Iterable { if (terms != null) { TermsEnum termsEnum = terms.iterator(); if (termsEnum.seekExact(identifier.bytes())) { - newPostings = termsEnum.postings(reader.getLiveDocs(), postings, luceneFlags); + newPostings = termsEnum.postings(postings, luceneFlags); + final Bits liveDocs = reader.getLiveDocs(); + if (liveDocs != null) { + newPostings = new FilterPostingsEnum(newPostings) { + private int doNext(int d) throws IOException { + for (; d != NO_MORE_DOCS; d = super.nextDoc()) { + if (liveDocs.get(d)) { + return d; + } + } + return NO_MORE_DOCS; + } + @Override + public int nextDoc() throws IOException { + return doNext(super.nextDoc()); + } + @Override + public int advance(int target) throws IOException { + return doNext(super.advance(target)); + } + }; + } } } } diff --git a/core/src/main/java/org/elasticsearch/search/suggest/SuggestContextParser.java b/core/src/main/java/org/elasticsearch/search/suggest/SuggestContextParser.java index 98e450d1265..040ee4e85ea 100644 --- a/core/src/main/java/org/elasticsearch/search/suggest/SuggestContextParser.java +++ b/core/src/main/java/org/elasticsearch/search/suggest/SuggestContextParser.java @@ -18,13 +18,15 @@ */ package org.elasticsearch.search.suggest; -import java.io.IOException; - +import org.elasticsearch.common.HasContextAndHeaders; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.query.IndexQueryParserService; +import java.io.IOException; + public interface SuggestContextParser { - public SuggestionSearchContext.SuggestionContext parse(XContentParser parser, MapperService mapperService, IndexQueryParserService queryParserService) throws IOException; + public SuggestionSearchContext.SuggestionContext parse(XContentParser parser, MapperService mapperService, + IndexQueryParserService queryParserService, HasContextAndHeaders headersContext) throws IOException; } \ No newline at end of file diff --git a/core/src/main/java/org/elasticsearch/search/suggest/SuggestParseElement.java b/core/src/main/java/org/elasticsearch/search/suggest/SuggestParseElement.java index 637ed3d6c48..d2c2bc5d937 100644 --- a/core/src/main/java/org/elasticsearch/search/suggest/SuggestParseElement.java +++ b/core/src/main/java/org/elasticsearch/search/suggest/SuggestParseElement.java @@ -19,6 +19,7 @@ package org.elasticsearch.search.suggest; import org.apache.lucene.util.BytesRef; +import org.elasticsearch.common.HasContextAndHeaders; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.mapper.MapperService; @@ -45,11 +46,13 @@ public final class SuggestParseElement implements SearchParseElement { @Override public void parse(XContentParser parser, SearchContext context) throws Exception { - SuggestionSearchContext suggestionSearchContext = parseInternal(parser, context.mapperService(), context.queryParserService(), context.shardTarget().index(), context.shardTarget().shardId()); + SuggestionSearchContext suggestionSearchContext = parseInternal(parser, context.mapperService(), context.queryParserService(), + context.shardTarget().index(), context.shardTarget().shardId(), context); context.suggest(suggestionSearchContext); } - public SuggestionSearchContext parseInternal(XContentParser parser, MapperService mapperService, IndexQueryParserService queryParserService, String index, int shardId) throws IOException { + public SuggestionSearchContext parseInternal(XContentParser parser, MapperService mapperService, + IndexQueryParserService queryParserService, String index, int shardId, HasContextAndHeaders headersContext) throws IOException { SuggestionSearchContext suggestionSearchContext = new SuggestionSearchContext(); BytesRef globalText = null; @@ -88,7 +91,7 @@ public final class SuggestParseElement implements SearchParseElement { throw new IllegalArgumentException("Suggester[" + fieldName + "] not supported"); } final SuggestContextParser contextParser = suggesters.get(fieldName).getContextParser(); - suggestionContext = contextParser.parse(parser, mapperService, queryParserService); + suggestionContext = contextParser.parse(parser, mapperService, queryParserService, headersContext); } } if (suggestionContext != null) { diff --git a/core/src/main/java/org/elasticsearch/search/suggest/completion/AnalyzingCompletionLookupProvider.java b/core/src/main/java/org/elasticsearch/search/suggest/completion/AnalyzingCompletionLookupProvider.java index 4ee79025adf..c5b1b5931e9 100644 --- a/core/src/main/java/org/elasticsearch/search/suggest/completion/AnalyzingCompletionLookupProvider.java +++ b/core/src/main/java/org/elasticsearch/search/suggest/completion/AnalyzingCompletionLookupProvider.java @@ -22,6 +22,7 @@ package org.elasticsearch.search.suggest.completion; import com.carrotsearch.hppc.ObjectLongHashMap; import org.apache.lucene.analysis.TokenStream; +import org.apache.lucene.analysis.TokenStreamToAutomaton; import org.apache.lucene.codecs.CodecUtil; import org.apache.lucene.codecs.FieldsConsumer; import org.apache.lucene.index.PostingsEnum; @@ -40,6 +41,7 @@ import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.IOUtils; import org.apache.lucene.util.IntsRef; import org.apache.lucene.util.automaton.Automaton; +import org.apache.lucene.util.automaton.LimitedFiniteStringsIterator; import org.apache.lucene.util.fst.ByteSequenceOutputs; import org.apache.lucene.util.fst.FST; import org.apache.lucene.util.fst.PairOutputs; @@ -56,6 +58,7 @@ import java.io.IOException; import java.util.Collection; import java.util.Collections; import java.util.HashMap; +import java.util.HashSet; import java.util.Map; import java.util.Set; import java.util.TreeMap; @@ -156,7 +159,7 @@ public class AnalyzingCompletionLookupProvider extends CompletionLookupProvider if (term == null) { break; } - docsEnum = termsEnum.postings(null, docsEnum, PostingsEnum.PAYLOADS); + docsEnum = termsEnum.postings(docsEnum, PostingsEnum.PAYLOADS); builder.startTerm(term); int docFreq = 0; while (docsEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) { @@ -397,6 +400,8 @@ public class AnalyzingCompletionLookupProvider extends CompletionLookupProvider @Override public Set toFiniteStrings(TokenStream stream) throws IOException { - return prototype.toFiniteStrings(prototype.getTokenStreamToAutomaton(), stream); + return prototype.toFiniteStrings(stream); } + + } \ No newline at end of file diff --git a/core/src/main/java/org/elasticsearch/search/suggest/completion/CompletionSuggestParser.java b/core/src/main/java/org/elasticsearch/search/suggest/completion/CompletionSuggestParser.java index 9802db38130..8470633fdc5 100644 --- a/core/src/main/java/org/elasticsearch/search/suggest/completion/CompletionSuggestParser.java +++ b/core/src/main/java/org/elasticsearch/search/suggest/completion/CompletionSuggestParser.java @@ -18,6 +18,7 @@ */ package org.elasticsearch.search.suggest.completion; +import org.elasticsearch.common.HasContextAndHeaders; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.unit.Fuzziness; @@ -49,13 +50,14 @@ public class CompletionSuggestParser implements SuggestContextParser { } @Override - public SuggestionSearchContext.SuggestionContext parse(XContentParser parser, MapperService mapperService, IndexQueryParserService queryParserService) throws IOException { + public SuggestionSearchContext.SuggestionContext parse(XContentParser parser, MapperService mapperService, + IndexQueryParserService queryParserService, HasContextAndHeaders headersContext) throws IOException { XContentParser.Token token; String fieldName = null; CompletionSuggestionContext suggestion = new CompletionSuggestionContext(completionSuggester); - + XContentParser contextParser = null; - + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { if (token == XContentParser.Token.FIELD_NAME) { fieldName = parser.currentName(); @@ -90,7 +92,7 @@ public class CompletionSuggestParser implements SuggestContextParser { // Copy the current structure. We will parse, once the mapping is provided XContentBuilder builder = XContentFactory.contentBuilder(parser.contentType()); builder.copyCurrentStructure(parser); - BytesReference bytes = builder.bytes(); + BytesReference bytes = builder.bytes(); contextParser = parser.contentType().xContent().createParser(bytes); } else { throw new IllegalArgumentException("suggester [completion] doesn't support field [" + fieldName + "]"); @@ -99,7 +101,7 @@ public class CompletionSuggestParser implements SuggestContextParser { throw new IllegalArgumentException("suggester[completion] doesn't support field [" + fieldName + "]"); } } - + suggestion.fieldType((CompletionFieldMapper.CompletionFieldType) mapperService.smartNameFieldType(suggestion.getField())); CompletionFieldMapper.CompletionFieldType fieldType = suggestion.fieldType(); diff --git a/core/src/main/java/org/elasticsearch/search/suggest/completion/CompletionTokenStream.java b/core/src/main/java/org/elasticsearch/search/suggest/completion/CompletionTokenStream.java index 103fd0dcf0a..ebcf0456f87 100644 --- a/core/src/main/java/org/elasticsearch/search/suggest/completion/CompletionTokenStream.java +++ b/core/src/main/java/org/elasticsearch/search/suggest/completion/CompletionTokenStream.java @@ -135,11 +135,6 @@ public final class CompletionTokenStream extends TokenStream { private final BytesRefBuilder bytes = new BytesRefBuilder(); private CharsRefBuilder charsRef; - @Override - public void fillBytesRef() { - // does nothing - we change in place - } - @Override public BytesRefBuilder builder() { return bytes; diff --git a/core/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggestParser.java b/core/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggestParser.java index 0f6a8096973..13149e20e4d 100644 --- a/core/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggestParser.java +++ b/core/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggestParser.java @@ -22,6 +22,7 @@ import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.Terms; import org.apache.lucene.util.BytesRef; +import org.elasticsearch.common.HasContextAndHeaders; import org.elasticsearch.common.ParseFieldMatcher; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentParser.Token; @@ -48,12 +49,13 @@ public final class PhraseSuggestParser implements SuggestContextParser { } @Override - public SuggestionSearchContext.SuggestionContext parse(XContentParser parser, MapperService mapperService, IndexQueryParserService queryParserService) throws IOException { + public SuggestionSearchContext.SuggestionContext parse(XContentParser parser, MapperService mapperService, + IndexQueryParserService queryParserService, HasContextAndHeaders headersContext) throws IOException { PhraseSuggestionContext suggestion = new PhraseSuggestionContext(suggester); suggestion.setQueryParserService(queryParserService); XContentParser.Token token; String fieldName = null; - boolean gramSizeSet = false; + boolean gramSizeSet = false; while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { if (token == XContentParser.Token.FIELD_NAME) { fieldName = parser.currentName(); @@ -140,7 +142,8 @@ public final class PhraseSuggestParser implements SuggestContextParser { throw new IllegalArgumentException("suggester[phrase][collate] query already set, doesn't support additional [" + fieldName + "]"); } Template template = Template.parse(parser, queryParserService.parseFieldMatcher()); - CompiledScript compiledScript = suggester.scriptService().compile(template, ScriptContext.Standard.SEARCH); + CompiledScript compiledScript = suggester.scriptService().compile(template, ScriptContext.Standard.SEARCH, + headersContext); suggestion.setCollateQueryScript(compiledScript); } else if ("params".equals(fieldName)) { suggestion.setCollateScriptParams(parser.map()); @@ -162,7 +165,7 @@ public final class PhraseSuggestParser implements SuggestContextParser { throw new IllegalArgumentException("suggester[phrase] doesn't support field [" + fieldName + "]"); } } - + if (suggestion.getField() == null) { throw new IllegalArgumentException("The required field option is missing"); } @@ -178,11 +181,11 @@ public final class PhraseSuggestParser implements SuggestContextParser { suggestion.setAnalyzer(fieldType.searchAnalyzer()); } } - + if (suggestion.model() == null) { suggestion.setModel(StupidBackoffScorer.FACTORY); } - + if (!gramSizeSet || suggestion.generators().isEmpty()) { final ShingleTokenFilterFactory.Factory shingleFilterFactory = SuggestUtils.getShingleFilterFactory(suggestion.getAnalyzer()); if (!gramSizeSet) { @@ -204,9 +207,9 @@ public final class PhraseSuggestParser implements SuggestContextParser { suggestion.addGenerator(generator); } } - - - + + + return suggestion; } diff --git a/core/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggestionBuilder.java b/core/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggestionBuilder.java index ac990deffb7..d5e942b52e1 100644 --- a/core/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggestionBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggestionBuilder.java @@ -20,11 +20,16 @@ package org.elasticsearch.search.suggest.phrase; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.script.Template; import org.elasticsearch.search.suggest.SuggestBuilder.SuggestionBuilder; import java.io.IOException; -import java.util.*; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; import java.util.Map.Entry; +import java.util.Set; /** * Defines the actual suggest command for phrase suggestions ( phrase). @@ -41,7 +46,7 @@ public final class PhraseSuggestionBuilder extends SuggestionBuilder collateParams; private Boolean collatePrune; @@ -67,7 +72,7 @@ public final class PhraseSuggestionBuilder extends SuggestionBuilder>=1 as an absolut number of query terms. - * + * * The default is set to 1.0 which corresponds to that only * corrections with at most 1 missspelled term are returned. */ @@ -131,13 +136,13 @@ public final class PhraseSuggestionBuilder extends SuggestionBuildertrue the phrase suggester will fail if the analyzer only * produces ngrams. the default it true. */ public PhraseSuggestionBuilder forceUnigrams(boolean forceUnigrams) { - this.forceUnigrams = forceUnigrams; + this.forceUnigrams = forceUnigrams; return this; } @@ -149,7 +154,7 @@ public final class PhraseSuggestionBuilder extends SuggestionBuilderadditive - * smoothing model. + * smoothing model. *

* See N-Gram @@ -304,7 +317,7 @@ public final class PhraseSuggestionBuilder extends SuggestionBuilder } } - private final ImmutableList entries; + private final List entries; public IndexWarmersMetaData(Entry... entries) { - this.entries = ImmutableList.copyOf(entries); + this.entries = Arrays.asList(entries); } - public ImmutableList entries() { + public List entries() { return this.entries; } diff --git a/core/src/main/java/org/elasticsearch/snapshots/RestoreInfo.java b/core/src/main/java/org/elasticsearch/snapshots/RestoreInfo.java index 548a8d2a55b..cc9eeb04130 100644 --- a/core/src/main/java/org/elasticsearch/snapshots/RestoreInfo.java +++ b/core/src/main/java/org/elasticsearch/snapshots/RestoreInfo.java @@ -18,7 +18,6 @@ */ package org.elasticsearch.snapshots; -import com.google.common.collect.ImmutableList; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Streamable; @@ -28,6 +27,8 @@ import org.elasticsearch.common.xcontent.XContentBuilderString; import org.elasticsearch.rest.RestStatus; import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; import java.util.List; /** @@ -39,7 +40,7 @@ public class RestoreInfo implements ToXContent, Streamable { private String name; - private ImmutableList indices; + private List indices; private int totalShards; @@ -49,7 +50,7 @@ public class RestoreInfo implements ToXContent, Streamable { } - public RestoreInfo(String name, ImmutableList indices, int totalShards, int successfulShards) { + public RestoreInfo(String name, List indices, int totalShards, int successfulShards) { this.name = name; this.indices = indices; this.totalShards = totalShards; @@ -147,11 +148,11 @@ public class RestoreInfo implements ToXContent, Streamable { public void readFrom(StreamInput in) throws IOException { name = in.readString(); int size = in.readVInt(); - ImmutableList.Builder indicesListBuilder = ImmutableList.builder(); + List indicesListBuilder = new ArrayList<>(); for (int i = 0; i < size; i++) { indicesListBuilder.add(in.readString()); } - indices = indicesListBuilder.build(); + indices = Collections.unmodifiableList(indicesListBuilder); totalShards = in.readVInt(); successfulShards = in.readVInt(); } diff --git a/core/src/main/java/org/elasticsearch/snapshots/RestoreService.java b/core/src/main/java/org/elasticsearch/snapshots/RestoreService.java index 8738de1d948..cdaaafc1dad 100644 --- a/core/src/main/java/org/elasticsearch/snapshots/RestoreService.java +++ b/core/src/main/java/org/elasticsearch/snapshots/RestoreService.java @@ -22,7 +22,6 @@ import com.carrotsearch.hppc.IntHashSet; import com.carrotsearch.hppc.IntSet; import com.carrotsearch.hppc.cursors.ObjectCursor; import com.carrotsearch.hppc.cursors.ObjectObjectCursor; -import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; import com.google.common.collect.ImmutableSet; import org.elasticsearch.Version; @@ -80,6 +79,7 @@ import org.elasticsearch.transport.TransportService; import java.io.IOException; import java.util.ArrayList; +import java.util.Collections; import java.util.Iterator; import java.util.List; import java.util.Map; @@ -295,7 +295,7 @@ public class RestoreService extends AbstractComponent implements ClusterStateLis } shards = shardsBuilder.build(); - RestoreInProgress.Entry restoreEntry = new RestoreInProgress.Entry(snapshotId, RestoreInProgress.State.INIT, ImmutableList.copyOf(renamedIndices.keySet()), shards); + RestoreInProgress.Entry restoreEntry = new RestoreInProgress.Entry(snapshotId, RestoreInProgress.State.INIT, Collections.unmodifiableList(new ArrayList<>(renamedIndices.keySet())), shards); builder.putCustom(RestoreInProgress.TYPE, new RestoreInProgress(restoreEntry)); } else { shards = ImmutableMap.of(); @@ -308,7 +308,7 @@ public class RestoreService extends AbstractComponent implements ClusterStateLis if (completed(shards)) { // We don't have any indices to restore - we are done - restoreInfo = new RestoreInfo(request.name(), ImmutableList.copyOf(renamedIndices.keySet()), + restoreInfo = new RestoreInfo(request.name(), Collections.unmodifiableList(new ArrayList<>(renamedIndices.keySet())), shards.size(), shards.size() - failedShards(shards)); } diff --git a/core/src/main/java/org/elasticsearch/snapshots/Snapshot.java b/core/src/main/java/org/elasticsearch/snapshots/Snapshot.java index 05429eab850..75abc40af61 100644 --- a/core/src/main/java/org/elasticsearch/snapshots/Snapshot.java +++ b/core/src/main/java/org/elasticsearch/snapshots/Snapshot.java @@ -19,7 +19,6 @@ package org.elasticsearch.snapshots; -import com.google.common.collect.ImmutableList; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.Version; import org.elasticsearch.common.ParseFieldMatcher; @@ -57,7 +56,7 @@ public class Snapshot implements Comparable, ToXContent, FromXContentB private final List shardFailures; - private final static List NO_FAILURES = ImmutableList.of(); + private final static List NO_FAILURES = Collections.emptyList(); public final static Snapshot PROTO = new Snapshot(); @@ -287,7 +286,7 @@ public class Snapshot implements Comparable, ToXContent, FromXContentB Version version = Version.CURRENT; SnapshotState state = SnapshotState.IN_PROGRESS; String reason = null; - ImmutableList indices = ImmutableList.of(); + List indices = Collections.emptyList(); long startTime = 0; long endTime = 0; int totalShard = 0; @@ -331,13 +330,13 @@ public class Snapshot implements Comparable, ToXContent, FromXContentB while (parser.nextToken() != XContentParser.Token.END_ARRAY) { indicesArray.add(parser.text()); } - indices = ImmutableList.copyOf(indicesArray); + indices = Collections.unmodifiableList(indicesArray); } else if ("failures".equals(currentFieldName)) { ArrayList shardFailureArrayList = new ArrayList<>(); while (parser.nextToken() != XContentParser.Token.END_ARRAY) { shardFailureArrayList.add(SnapshotShardFailure.fromXContent(parser)); } - shardFailures = ImmutableList.copyOf(shardFailureArrayList); + shardFailures = Collections.unmodifiableList(shardFailureArrayList); } else { // It was probably created by newer version - ignoring parser.skipChildren(); diff --git a/core/src/main/java/org/elasticsearch/snapshots/SnapshotInfo.java b/core/src/main/java/org/elasticsearch/snapshots/SnapshotInfo.java index a54b1b3b78e..e7b6ce13f12 100644 --- a/core/src/main/java/org/elasticsearch/snapshots/SnapshotInfo.java +++ b/core/src/main/java/org/elasticsearch/snapshots/SnapshotInfo.java @@ -19,6 +19,8 @@ package org.elasticsearch.snapshots; import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; import java.util.List; import org.elasticsearch.Version; @@ -32,7 +34,6 @@ import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentBuilderString; import org.elasticsearch.rest.RestStatus; -import com.google.common.collect.ImmutableList; /** * Information about snapshot @@ -260,11 +261,11 @@ public class SnapshotInfo implements ToXContent, Streamable { public void readFrom(StreamInput in) throws IOException { name = in.readString(); int size = in.readVInt(); - ImmutableList.Builder indicesListBuilder = ImmutableList.builder(); + List indicesListBuilder = new ArrayList<>(); for (int i = 0; i < size; i++) { indicesListBuilder.add(in.readString()); } - indices = indicesListBuilder.build(); + indices = Collections.unmodifiableList(indicesListBuilder); state = SnapshotState.fromValue(in.readByte()); reason = in.readOptionalString(); startTime = in.readVLong(); @@ -273,13 +274,13 @@ public class SnapshotInfo implements ToXContent, Streamable { successfulShards = in.readVInt(); size = in.readVInt(); if (size > 0) { - ImmutableList.Builder failureBuilder = ImmutableList.builder(); + List failureBuilder = new ArrayList<>(); for (int i = 0; i < size; i++) { failureBuilder.add(SnapshotShardFailure.readSnapshotShardFailure(in)); } - shardFailures = failureBuilder.build(); + shardFailures = Collections.unmodifiableList(failureBuilder); } else { - shardFailures = ImmutableList.of(); + shardFailures = Collections.emptyList(); } version = Version.readVersion(in); } diff --git a/core/src/main/java/org/elasticsearch/snapshots/SnapshotUtils.java b/core/src/main/java/org/elasticsearch/snapshots/SnapshotUtils.java index 84e29063ddc..0f76fbdfc7f 100644 --- a/core/src/main/java/org/elasticsearch/snapshots/SnapshotUtils.java +++ b/core/src/main/java/org/elasticsearch/snapshots/SnapshotUtils.java @@ -18,11 +18,13 @@ */ package org.elasticsearch.snapshots; -import com.google.common.collect.ImmutableList; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.common.regex.Regex; import org.elasticsearch.index.IndexNotFoundException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; import java.util.HashSet; import java.util.List; import java.util.Set; @@ -114,8 +116,8 @@ public class SnapshotUtils { } } if (result == null) { - return ImmutableList.copyOf(selectedIndices); + return Collections.unmodifiableList(new ArrayList<>(Arrays.asList(selectedIndices))); } - return ImmutableList.copyOf(result); + return Collections.unmodifiableList(new ArrayList<>(result)); } } diff --git a/core/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java b/core/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java index 79f6e863f6a..1e19633b02a 100644 --- a/core/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java +++ b/core/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java @@ -19,7 +19,6 @@ package org.elasticsearch.snapshots; -import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; import org.apache.lucene.util.CollectionUtil; import org.elasticsearch.ExceptionsHelper; @@ -63,6 +62,8 @@ import org.elasticsearch.threadpool.ThreadPool; import java.io.IOException; import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; import java.util.List; import java.util.Locale; import java.util.Map; @@ -150,7 +151,7 @@ public class SnapshotsService extends AbstractLifecycleComponent snapshotList = new ArrayList<>(snapshotSet); CollectionUtil.timSort(snapshotList); - return ImmutableList.copyOf(snapshotList); + return Collections.unmodifiableList(snapshotList); } /** @@ -166,7 +167,7 @@ public class SnapshotsService extends AbstractLifecycleComponent indices = ImmutableList.copyOf(indexNameExpressionResolver.concreteIndices(currentState, request.indicesOptions(), request.indices())); + List indices = Arrays.asList(indexNameExpressionResolver.concreteIndices(currentState, request.indicesOptions(), request.indices())); logger.trace("[{}][{}] creating snapshot for indices [{}]", request.repository(), request.name(), indices); newSnapshot = new SnapshotsInProgress.Entry(snapshotId, request.includeGlobalState(), State.INIT, indices, System.currentTimeMillis(), null); snapshots = new SnapshotsInProgress(newSnapshot); @@ -311,7 +312,7 @@ public class SnapshotsService extends AbstractLifecycleComponent entries = ImmutableList.builder(); + List entries = new ArrayList<>(); for (SnapshotsInProgress.Entry entry : snapshots.entries()) { if (entry.snapshotId().equals(snapshot.snapshotId())) { // Replace the snapshot that was just created @@ -348,7 +349,7 @@ public class SnapshotsService extends AbstractLifecycleComponentof()); + snapshot.snapshotId(), snapshot.indices(), snapshot.startTime(), ExceptionsHelper.detailedMessage(t), 0, Collections.emptyList()); } catch (Throwable t2) { logger.warn("[{}] failed to close snapshot in repository", snapshot.snapshotId()); } @@ -387,7 +388,7 @@ public class SnapshotsService extends AbstractLifecycleComponentof()); + ExceptionsHelper.detailedMessage(t), 0, Collections.emptyList()); } catch (Throwable t2) { logger.warn("[{}] failed to close snapshot in repository", snapshot.snapshotId()); } @@ -413,7 +414,7 @@ public class SnapshotsService extends AbstractLifecycleComponent currentSnapshots(String repository, String[] snapshots) { SnapshotsInProgress snapshotsInProgress = clusterService.state().custom(SnapshotsInProgress.TYPE); if (snapshotsInProgress == null || snapshotsInProgress.entries().isEmpty()) { - return ImmutableList.of(); + return Collections.emptyList(); } if ("_all".equals(repository)) { return snapshotsInProgress.entries(); @@ -423,7 +424,7 @@ public class SnapshotsService extends AbstractLifecycleComponent 0) { for (String snapshot : snapshots) { @@ -431,12 +432,12 @@ public class SnapshotsService extends AbstractLifecycleComponent builder = ImmutableList.builder(); + List builder = new ArrayList<>(); for (SnapshotsInProgress.Entry entry : snapshotsInProgress.entries()) { if (!entry.snapshotId().getRepository().equals(repository)) { continue; @@ -452,7 +453,7 @@ public class SnapshotsService extends AbstractLifecycleComponent shards(ClusterState clusterState, ImmutableList indices) { + private ImmutableMap shards(ClusterState clusterState, List indices) { ImmutableMap.Builder builder = ImmutableMap.builder(); MetaData metaData = clusterState.metaData(); for (String index : indices) { diff --git a/core/src/main/java/org/elasticsearch/threadpool/ThreadPoolStats.java b/core/src/main/java/org/elasticsearch/threadpool/ThreadPoolStats.java index 8b180dab5fb..af53482c629 100644 --- a/core/src/main/java/org/elasticsearch/threadpool/ThreadPoolStats.java +++ b/core/src/main/java/org/elasticsearch/threadpool/ThreadPoolStats.java @@ -28,6 +28,7 @@ import org.elasticsearch.common.xcontent.XContentBuilderString; import java.io.IOException; import java.util.ArrayList; +import java.util.Collections; import java.util.Iterator; import java.util.List; @@ -35,7 +36,7 @@ import java.util.List; */ public class ThreadPoolStats implements Streamable, ToXContent, Iterable { - public static class Stats implements Streamable, ToXContent { + public static class Stats implements Streamable, ToXContent, Comparable { private String name; private int threads; @@ -133,6 +134,23 @@ public class ThreadPoolStats implements Streamable, ToXContent, Iterable stats; @@ -142,6 +160,7 @@ public class ThreadPoolStats implements Streamable, ToXContent, Iterable stats) { + Collections.sort(stats); this.stats = stats; } diff --git a/core/src/main/java/org/elasticsearch/transport/netty/NettyTransport.java b/core/src/main/java/org/elasticsearch/transport/netty/NettyTransport.java index 6b13ddd82bc..3103d3c86f3 100644 --- a/core/src/main/java/org/elasticsearch/transport/netty/NettyTransport.java +++ b/core/src/main/java/org/elasticsearch/transport/netty/NettyTransport.java @@ -23,7 +23,6 @@ import com.google.common.base.Charsets; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; import com.google.common.collect.Maps; - import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.Version; import org.elasticsearch.cluster.node.DiscoveryNode; @@ -58,13 +57,31 @@ import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.common.util.concurrent.KeyedLock; import org.elasticsearch.monitor.jvm.JvmInfo; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.transport.*; +import org.elasticsearch.transport.BindTransportException; +import org.elasticsearch.transport.BytesTransportRequest; +import org.elasticsearch.transport.ConnectTransportException; +import org.elasticsearch.transport.NodeNotConnectedException; +import org.elasticsearch.transport.Transport; +import org.elasticsearch.transport.TransportException; +import org.elasticsearch.transport.TransportRequest; +import org.elasticsearch.transport.TransportRequestOptions; +import org.elasticsearch.transport.TransportServiceAdapter; import org.elasticsearch.transport.support.TransportStatus; import org.jboss.netty.bootstrap.ClientBootstrap; import org.jboss.netty.bootstrap.ServerBootstrap; import org.jboss.netty.buffer.ChannelBuffer; import org.jboss.netty.buffer.ChannelBuffers; -import org.jboss.netty.channel.*; +import org.jboss.netty.channel.AdaptiveReceiveBufferSizePredictorFactory; +import org.jboss.netty.channel.Channel; +import org.jboss.netty.channel.ChannelFuture; +import org.jboss.netty.channel.ChannelFutureListener; +import org.jboss.netty.channel.ChannelHandlerContext; +import org.jboss.netty.channel.ChannelPipeline; +import org.jboss.netty.channel.ChannelPipelineFactory; +import org.jboss.netty.channel.Channels; +import org.jboss.netty.channel.ExceptionEvent; +import org.jboss.netty.channel.FixedReceiveBufferSizePredictorFactory; +import org.jboss.netty.channel.ReceiveBufferSizePredictorFactory; import org.jboss.netty.channel.socket.nio.NioClientSocketChannelFactory; import org.jboss.netty.channel.socket.nio.NioServerSocketChannelFactory; import org.jboss.netty.channel.socket.nio.NioWorkerPool; @@ -78,8 +95,20 @@ import java.net.InetSocketAddress; import java.net.SocketAddress; import java.net.UnknownHostException; import java.nio.channels.CancelledKeyException; -import java.util.*; -import java.util.concurrent.*; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashSet; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.Set; +import java.util.concurrent.ConcurrentMap; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.Executors; +import java.util.concurrent.ThreadFactory; +import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReference; import java.util.concurrent.locks.ReadWriteLock; @@ -947,7 +976,13 @@ public class NettyTransport extends AbstractLifecycleComponent implem } } catch (RuntimeException e) { // clean the futures - for (ChannelFuture future : ImmutableList.builder().add(connectRecovery).add(connectBulk).add(connectReg).add(connectState).add(connectPing).build()) { + List futures = new ArrayList<>(); + futures.addAll(Arrays.asList(connectRecovery)); + futures.addAll(Arrays.asList(connectBulk)); + futures.addAll(Arrays.asList(connectReg)); + futures.addAll(Arrays.asList(connectState)); + futures.addAll(Arrays.asList(connectPing)); + for (ChannelFuture future : Collections.unmodifiableList(futures)) { future.cancel(); if (future.getChannel() != null && future.getChannel().isOpen()) { try { @@ -1130,7 +1165,7 @@ public class NettyTransport extends AbstractLifecycleComponent implem public static class NodeChannels { - ImmutableList allChannels = ImmutableList.of(); + List allChannels = Collections.emptyList(); private Channel[] recovery; private final AtomicInteger recoveryCounter = new AtomicInteger(); private Channel[] bulk; @@ -1151,7 +1186,13 @@ public class NettyTransport extends AbstractLifecycleComponent implem } public void start() { - this.allChannels = ImmutableList.builder().add(recovery).add(bulk).add(reg).add(state).add(ping).build(); + List newAllChannels = new ArrayList<>(); + newAllChannels.addAll(Arrays.asList(recovery)); + newAllChannels.addAll(Arrays.asList(bulk)); + newAllChannels.addAll(Arrays.asList(reg)); + newAllChannels.addAll(Arrays.asList(state)); + newAllChannels.addAll(Arrays.asList(ping)); + this.allChannels = Collections.unmodifiableList(newAllChannels); } public boolean hasChannel(Channel channel) { diff --git a/core/src/test/java/org/elasticsearch/action/IndicesRequestIT.java b/core/src/test/java/org/elasticsearch/action/IndicesRequestIT.java index c22638268ed..65349bde389 100644 --- a/core/src/test/java/org/elasticsearch/action/IndicesRequestIT.java +++ b/core/src/test/java/org/elasticsearch/action/IndicesRequestIT.java @@ -28,8 +28,8 @@ import org.elasticsearch.action.admin.indices.close.CloseIndexAction; import org.elasticsearch.action.admin.indices.close.CloseIndexRequest; import org.elasticsearch.action.admin.indices.delete.DeleteIndexAction; import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest; -import org.elasticsearch.action.admin.indices.flush.FlushAction; import org.elasticsearch.action.admin.indices.flush.FlushRequest; +import org.elasticsearch.action.admin.indices.flush.TransportShardFlushAction; import org.elasticsearch.action.admin.indices.mapping.get.GetFieldMappingsAction; import org.elasticsearch.action.admin.indices.mapping.get.GetFieldMappingsRequest; import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsAction; @@ -42,8 +42,8 @@ import org.elasticsearch.action.admin.indices.optimize.OptimizeAction; import org.elasticsearch.action.admin.indices.optimize.OptimizeRequest; import org.elasticsearch.action.admin.indices.recovery.RecoveryAction; import org.elasticsearch.action.admin.indices.recovery.RecoveryRequest; -import org.elasticsearch.action.admin.indices.refresh.RefreshAction; import org.elasticsearch.action.admin.indices.refresh.RefreshRequest; +import org.elasticsearch.action.admin.indices.refresh.TransportShardRefreshAction; import org.elasticsearch.action.admin.indices.segments.IndicesSegmentsAction; import org.elasticsearch.action.admin.indices.segments.IndicesSegmentsRequest; import org.elasticsearch.action.admin.indices.settings.get.GetSettingsAction; @@ -85,6 +85,7 @@ import org.elasticsearch.action.termvectors.TermVectorsRequest; import org.elasticsearch.action.update.UpdateAction; import org.elasticsearch.action.update.UpdateRequest; import org.elasticsearch.action.update.UpdateResponse; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; @@ -95,35 +96,18 @@ import org.elasticsearch.search.action.SearchServiceTransportAction; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.ESIntegTestCase.ClusterScope; import org.elasticsearch.test.ESIntegTestCase.Scope; -import org.elasticsearch.test.transport.MockTransportService; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.transport.Transport; -import org.elasticsearch.transport.TransportChannel; -import org.elasticsearch.transport.TransportModule; -import org.elasticsearch.transport.TransportRequest; -import org.elasticsearch.transport.TransportRequestHandler; -import org.elasticsearch.transport.TransportService; +import org.elasticsearch.transport.*; import org.junit.After; import org.junit.Before; import org.junit.Test; -import java.util.ArrayList; -import java.util.Collection; -import java.util.Collections; -import java.util.HashMap; -import java.util.HashSet; -import java.util.List; -import java.util.Map; -import java.util.Set; +import java.util.*; import java.util.concurrent.Callable; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; -import static org.hamcrest.Matchers.emptyIterable; -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.greaterThan; -import static org.hamcrest.Matchers.hasItem; -import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.*; @ClusterScope(scope = Scope.SUITE, numClientNodes = 1, minNumDataNodes = 2) public class IndicesRequestIT extends ESIntegTestCase { @@ -390,14 +374,15 @@ public class IndicesRequestIT extends ESIntegTestCase { @Test public void testFlush() { - String flushShardAction = FlushAction.NAME + "[s]"; - interceptTransportActions(flushShardAction); + String[] indexShardActions = new String[]{TransportShardFlushAction.NAME + "[r]", TransportShardFlushAction.NAME}; + interceptTransportActions(indexShardActions); FlushRequest flushRequest = new FlushRequest(randomIndicesOrAliases()); internalCluster().clientNodeClient().admin().indices().flush(flushRequest).actionGet(); clearInterceptedActions(); - assertSameIndices(flushRequest, flushShardAction); + String[] indices = new IndexNameExpressionResolver(Settings.EMPTY).concreteIndices(client().admin().cluster().prepareState().get().getState(), flushRequest); + assertIndicesSubset(Arrays.asList(indices), indexShardActions); } @Test @@ -414,14 +399,15 @@ public class IndicesRequestIT extends ESIntegTestCase { @Test public void testRefresh() { - String refreshShardAction = RefreshAction.NAME + "[s]"; - interceptTransportActions(refreshShardAction); + String[] indexShardActions = new String[]{TransportShardRefreshAction.NAME + "[r]", TransportShardRefreshAction.NAME}; + interceptTransportActions(indexShardActions); RefreshRequest refreshRequest = new RefreshRequest(randomIndicesOrAliases()); internalCluster().clientNodeClient().admin().indices().refresh(refreshRequest).actionGet(); clearInterceptedActions(); - assertSameIndices(refreshRequest, refreshShardAction); + String[] indices = new IndexNameExpressionResolver(Settings.EMPTY).concreteIndices(client().admin().cluster().prepareState().get().getState(), refreshRequest); + assertIndicesSubset(Arrays.asList(indices), indexShardActions); } @Test diff --git a/core/src/test/java/org/elasticsearch/action/admin/indices/flush/FlushBlocksIT.java b/core/src/test/java/org/elasticsearch/action/admin/indices/flush/FlushBlocksIT.java index cf03bf798d5..803262f8292 100644 --- a/core/src/test/java/org/elasticsearch/action/admin/indices/flush/FlushBlocksIT.java +++ b/core/src/test/java/org/elasticsearch/action/admin/indices/flush/FlushBlocksIT.java @@ -61,7 +61,8 @@ public class FlushBlocksIT extends ESIntegTestCase { for (String blockSetting : Arrays.asList(SETTING_READ_ONLY, SETTING_BLOCKS_METADATA)) { try { enableIndexBlock("test", blockSetting); - assertBlocked(client().admin().indices().prepareFlush("test")); + FlushResponse flushResponse = client().admin().indices().prepareFlush("test").get(); + assertBlocked(flushResponse); } finally { disableIndexBlock("test", blockSetting); } @@ -74,7 +75,7 @@ public class FlushBlocksIT extends ESIntegTestCase { assertThat(response.getSuccessfulShards(), equalTo(numShards.totalNumShards)); setClusterReadOnly(true); - assertBlocked(client().admin().indices().prepareFlush()); + assertBlocked(client().admin().indices().prepareFlush().get()); } finally { setClusterReadOnly(false); } diff --git a/core/src/test/java/org/elasticsearch/action/admin/indices/get/GetIndexIT.java b/core/src/test/java/org/elasticsearch/action/admin/indices/get/GetIndexIT.java index ce214dd5a9d..9484c5e07f1 100644 --- a/core/src/test/java/org/elasticsearch/action/admin/indices/get/GetIndexIT.java +++ b/core/src/test/java/org/elasticsearch/action/admin/indices/get/GetIndexIT.java @@ -19,7 +19,6 @@ package org.elasticsearch.action.admin.indices.get; -import com.google.common.collect.ImmutableList; import org.elasticsearch.action.admin.indices.alias.Alias; import org.elasticsearch.action.admin.indices.get.GetIndexRequest.Feature; import org.elasticsearch.cluster.metadata.AliasMetaData; @@ -239,10 +238,10 @@ public class GetIndexIT extends ESIntegTestCase { } private void assertWarmers(GetIndexResponse response, String indexName) { - ImmutableOpenMap> warmers = response.warmers(); + ImmutableOpenMap> warmers = response.warmers(); assertThat(warmers, notNullValue()); assertThat(warmers.size(), equalTo(1)); - ImmutableList indexWarmers = warmers.get(indexName); + List indexWarmers = warmers.get(indexName); assertThat(indexWarmers, notNullValue()); assertThat(indexWarmers.size(), equalTo(1)); Entry warmer = indexWarmers.get(0); @@ -297,10 +296,10 @@ public class GetIndexIT extends ESIntegTestCase { } private void assertAliases(GetIndexResponse response, String indexName) { - ImmutableOpenMap> aliases = response.aliases(); + ImmutableOpenMap> aliases = response.aliases(); assertThat(aliases, notNullValue()); assertThat(aliases.size(), equalTo(1)); - ImmutableList indexAliases = aliases.get(indexName); + List indexAliases = aliases.get(indexName); assertThat(indexAliases, notNullValue()); assertThat(indexAliases.size(), equalTo(1)); AliasMetaData alias = indexAliases.get(0); diff --git a/core/src/test/java/org/elasticsearch/action/admin/indices/optimize/OptimizeBlocksIT.java b/core/src/test/java/org/elasticsearch/action/admin/indices/optimize/OptimizeBlocksIT.java index 06c19564c3e..6b6e663b293 100644 --- a/core/src/test/java/org/elasticsearch/action/admin/indices/optimize/OptimizeBlocksIT.java +++ b/core/src/test/java/org/elasticsearch/action/admin/indices/optimize/OptimizeBlocksIT.java @@ -74,7 +74,7 @@ public class OptimizeBlocksIT extends ESIntegTestCase { assertThat(response.getSuccessfulShards(), equalTo(numShards.totalNumShards)); setClusterReadOnly(true); - assertBlocked(client().admin().indices().prepareFlush()); + assertBlocked(client().admin().indices().prepareOptimize()); } finally { setClusterReadOnly(false); } diff --git a/core/src/test/java/org/elasticsearch/action/admin/indices/refresh/RefreshBlocksIT.java b/core/src/test/java/org/elasticsearch/action/admin/indices/refresh/RefreshBlocksIT.java index 65c37c686b7..5815ce8c266 100644 --- a/core/src/test/java/org/elasticsearch/action/admin/indices/refresh/RefreshBlocksIT.java +++ b/core/src/test/java/org/elasticsearch/action/admin/indices/refresh/RefreshBlocksIT.java @@ -57,7 +57,7 @@ public class RefreshBlocksIT extends ESIntegTestCase { for (String blockSetting : Arrays.asList(SETTING_READ_ONLY, SETTING_BLOCKS_METADATA)) { try { enableIndexBlock("test", blockSetting); - assertBlocked(client().admin().indices().prepareRefresh("test")); + assertBlocked(client().admin().indices().prepareRefresh("test").get()); } finally { disableIndexBlock("test", blockSetting); } @@ -70,7 +70,7 @@ public class RefreshBlocksIT extends ESIntegTestCase { assertThat(response.getSuccessfulShards(), equalTo(numShards.totalNumShards)); setClusterReadOnly(true); - assertBlocked(client().admin().indices().prepareRefresh()); + assertBlocked(client().admin().indices().prepareRefresh().get()); } finally { setClusterReadOnly(false); } diff --git a/core/src/test/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoreResponseTest.java b/core/src/test/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoreResponseTest.java index 12b61e2c2a5..925e01e8f2e 100644 --- a/core/src/test/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoreResponseTest.java +++ b/core/src/test/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoreResponseTest.java @@ -19,7 +19,6 @@ package org.elasticsearch.action.admin.indices.shards; -import com.google.common.collect.ImmutableList; import org.apache.lucene.util.CollectionUtil; import org.elasticsearch.Version; import org.elasticsearch.cluster.node.DiscoveryNode; @@ -42,7 +41,7 @@ public class IndicesShardStoreResponseTest extends ESTestCase { @Test public void testBasicSerialization() throws Exception { ImmutableOpenMap.Builder>> indexStoreStatuses = ImmutableOpenMap.builder(); - ImmutableList.Builder failures = ImmutableList.builder(); + List failures = new ArrayList<>(); ImmutableOpenIntMap.Builder> storeStatuses = ImmutableOpenIntMap.builder(); DiscoveryNode node1 = new DiscoveryNode("node1", DummyTransportAddress.INSTANCE, Version.CURRENT); @@ -59,7 +58,7 @@ public class IndicesShardStoreResponseTest extends ESTestCase { failures.add(new IndicesShardStoresResponse.Failure("node1", "test", 3, new NodeDisconnectedException(node1, ""))); - IndicesShardStoresResponse storesResponse = new IndicesShardStoresResponse(indexStoreStatuses.build(), failures.build()); + IndicesShardStoresResponse storesResponse = new IndicesShardStoresResponse(indexStoreStatuses.build(), Collections.unmodifiableList(failures)); XContentBuilder contentBuilder = XContentFactory.jsonBuilder(); contentBuilder.startObject(); storesResponse.toXContent(contentBuilder, ToXContent.EMPTY_PARAMS); diff --git a/core/src/test/java/org/elasticsearch/action/search/MultiSearchRequestTests.java b/core/src/test/java/org/elasticsearch/action/search/MultiSearchRequestTests.java index d2533eaf2da..5fd9baea068 100644 --- a/core/src/test/java/org/elasticsearch/action/search/MultiSearchRequestTests.java +++ b/core/src/test/java/org/elasticsearch/action/search/MultiSearchRequestTests.java @@ -19,7 +19,6 @@ package org.elasticsearch.action.search; -import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.test.StreamsUtils; import org.elasticsearch.common.xcontent.ToXContent; @@ -29,19 +28,16 @@ import org.elasticsearch.test.ESTestCase; import org.junit.Test; import java.io.IOException; -import java.util.Collections; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.nullValue; -/** - */ public class MultiSearchRequestTests extends ESTestCase { @Test public void simpleAdd() throws Exception { byte[] data = StreamsUtils.copyToBytesFromClasspath("/org/elasticsearch/action/search/simple-msearch1.json"); - MultiSearchRequest request = new MultiSearchRequest().add(data, 0, data.length, null, null, null); + MultiSearchRequest request = new MultiSearchRequest().add(data, 0, data.length, false, null, null, null); assertThat(request.requests().size(), equalTo(8)); assertThat(request.requests().get(0).indices()[0], equalTo("test")); assertThat(request.requests().get(0).indicesOptions(), equalTo(IndicesOptions.fromOptions(true, true, true, true, IndicesOptions.strictExpandOpenAndForbidClosed()))); @@ -67,7 +63,7 @@ public class MultiSearchRequestTests extends ESTestCase { @Test public void simpleAdd2() throws Exception { byte[] data = StreamsUtils.copyToBytesFromClasspath("/org/elasticsearch/action/search/simple-msearch2.json"); - MultiSearchRequest request = new MultiSearchRequest().add(data, 0, data.length, null, null, null); + MultiSearchRequest request = new MultiSearchRequest().add(data, 0, data.length, false, null, null, null); assertThat(request.requests().size(), equalTo(5)); assertThat(request.requests().get(0).indices()[0], equalTo("test")); assertThat(request.requests().get(0).types().length, equalTo(0)); @@ -81,11 +77,11 @@ public class MultiSearchRequestTests extends ESTestCase { assertThat(request.requests().get(4).indices(), nullValue()); assertThat(request.requests().get(4).types().length, equalTo(0)); } - + @Test public void simpleAdd3() throws Exception { byte[] data = StreamsUtils.copyToBytesFromClasspath("/org/elasticsearch/action/search/simple-msearch3.json"); - MultiSearchRequest request = new MultiSearchRequest().add(data, 0, data.length, null, null, null); + MultiSearchRequest request = new MultiSearchRequest().add(data, 0, data.length, false, null, null, null); assertThat(request.requests().size(), equalTo(4)); assertThat(request.requests().get(0).indices()[0], equalTo("test0")); assertThat(request.requests().get(0).indices()[1], equalTo("test1")); @@ -104,7 +100,28 @@ public class MultiSearchRequestTests extends ESTestCase { @Test public void simpleAdd4() throws Exception { byte[] data = StreamsUtils.copyToBytesFromClasspath("/org/elasticsearch/action/search/simple-msearch4.json"); - MultiSearchRequest request = new MultiSearchRequest().add(data, 0, data.length, null, null, null); + MultiSearchRequest request = new MultiSearchRequest().add(data, 0, data.length, false, null, null, null); + assertThat(request.requests().size(), equalTo(3)); + assertThat(request.requests().get(0).indices()[0], equalTo("test0")); + assertThat(request.requests().get(0).indices()[1], equalTo("test1")); + assertThat(request.requests().get(0).requestCache(), equalTo(true)); + assertThat(request.requests().get(0).preference(), nullValue()); + assertThat(request.requests().get(1).indices()[0], equalTo("test2")); + assertThat(request.requests().get(1).indices()[1], equalTo("test3")); + assertThat(request.requests().get(1).types()[0], equalTo("type1")); + assertThat(request.requests().get(1).requestCache(), nullValue()); + assertThat(request.requests().get(1).preference(), equalTo("_local")); + assertThat(request.requests().get(2).indices()[0], equalTo("test4")); + assertThat(request.requests().get(2).indices()[1], equalTo("test1")); + assertThat(request.requests().get(2).types()[0], equalTo("type2")); + assertThat(request.requests().get(2).types()[1], equalTo("type1")); + assertThat(request.requests().get(2).routing(), equalTo("123")); + } + + @Test + public void simpleAdd5() throws Exception { + byte[] data = StreamsUtils.copyToBytesFromClasspath("/org/elasticsearch/action/search/simple-msearch5.json"); + MultiSearchRequest request = new MultiSearchRequest().add(data, 0, data.length, true, null, null, null); assertThat(request.requests().size(), equalTo(3)); assertThat(request.requests().get(0).indices()[0], equalTo("test0")); assertThat(request.requests().get(0).indices()[1], equalTo("test1")); diff --git a/core/src/test/java/org/elasticsearch/action/support/replication/BroadcastReplicationTests.java b/core/src/test/java/org/elasticsearch/action/support/replication/BroadcastReplicationTests.java new file mode 100644 index 00000000000..4ab6122365e --- /dev/null +++ b/core/src/test/java/org/elasticsearch/action/support/replication/BroadcastReplicationTests.java @@ -0,0 +1,317 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.action.support.replication; + +import org.elasticsearch.Version; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionWriteResponse; +import org.elasticsearch.action.UnavailableShardsException; +import org.elasticsearch.action.admin.indices.flush.FlushRequest; +import org.elasticsearch.action.admin.indices.flush.FlushResponse; +import org.elasticsearch.action.admin.indices.flush.TransportFlushAction; +import org.elasticsearch.action.admin.indices.flush.TransportShardFlushAction; +import org.elasticsearch.action.admin.indices.refresh.RefreshRequest; +import org.elasticsearch.action.admin.indices.refresh.RefreshResponse; +import org.elasticsearch.action.admin.indices.refresh.TransportRefreshAction; +import org.elasticsearch.action.admin.indices.refresh.TransportShardRefreshAction; +import org.elasticsearch.action.support.ActionFilter; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.broadcast.BroadcastRequest; +import org.elasticsearch.action.support.broadcast.BroadcastResponse; +import org.elasticsearch.cluster.ClusterService; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.block.ClusterBlock; +import org.elasticsearch.cluster.block.ClusterBlockException; +import org.elasticsearch.cluster.block.ClusterBlockLevel; +import org.elasticsearch.cluster.block.ClusterBlocks; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.routing.ShardRoutingState; +import org.elasticsearch.common.collect.Tuple; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.ConcurrentCollections; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.cluster.TestClusterService; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.transport.local.LocalTransport; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; + +import java.io.IOException; +import java.util.Date; +import java.util.HashSet; +import java.util.List; +import java.util.Set; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.Future; +import java.util.concurrent.TimeUnit; + +import static org.elasticsearch.action.support.replication.ClusterStateCreationUtils.*; +import static org.hamcrest.Matchers.*; + +public class BroadcastReplicationTests extends ESTestCase { + + private static ThreadPool threadPool; + private TestClusterService clusterService; + private TransportService transportService; + private LocalTransport transport; + private TestBroadcastReplicationAction broadcastReplicationAction; + + @BeforeClass + public static void beforeClass() { + threadPool = new ThreadPool("BroadcastReplicationTests"); + } + + @Override + @Before + public void setUp() throws Exception { + super.setUp(); + transport = new LocalTransport(Settings.EMPTY, threadPool, Version.CURRENT, new NamedWriteableRegistry()); + clusterService = new TestClusterService(threadPool); + transportService = new TransportService(transport, threadPool); + transportService.start(); + broadcastReplicationAction = new TestBroadcastReplicationAction(Settings.EMPTY, threadPool, clusterService, transportService, new ActionFilters(new HashSet()), new IndexNameExpressionResolver(Settings.EMPTY), null); + } + + @AfterClass + public static void afterClass() { + ThreadPool.terminate(threadPool, 30, TimeUnit.SECONDS); + threadPool = null; + } + + @Test + public void testNotStartedPrimary() throws InterruptedException, ExecutionException, IOException { + final String index = "test"; + final ShardId shardId = new ShardId(index, 0); + clusterService.setState(state(index, randomBoolean(), + randomBoolean() ? ShardRoutingState.INITIALIZING : ShardRoutingState.UNASSIGNED, ShardRoutingState.UNASSIGNED)); + logger.debug("--> using initial state:\n{}", clusterService.state().prettyPrint()); + Future response = (broadcastReplicationAction.execute(new BroadcastRequest().indices(index))); + for (Tuple> shardRequests : broadcastReplicationAction.capturedShardRequests) { + shardRequests.v2().onFailure(new UnavailableShardsException(shardId, "test exception expected")); + } + response.get(); + logger.info("total shards: {}, ", response.get().getTotalShards()); + // we expect no failures here because UnavailableShardsException does not count as failed + assertBroadcastResponse(2, 0, 0, response.get(), null); + } + + @Test + public void testStartedPrimary() throws InterruptedException, ExecutionException, IOException { + final String index = "test"; + clusterService.setState(state(index, randomBoolean(), + ShardRoutingState.STARTED)); + logger.debug("--> using initial state:\n{}", clusterService.state().prettyPrint()); + Future response = (broadcastReplicationAction.execute(new BroadcastRequest().indices(index))); + for (Tuple> shardRequests : broadcastReplicationAction.capturedShardRequests) { + ActionWriteResponse actionWriteResponse = new ActionWriteResponse(); + actionWriteResponse.setShardInfo(new ActionWriteResponse.ShardInfo(1, 1, new ActionWriteResponse.ShardInfo.Failure[0])); + shardRequests.v2().onResponse(actionWriteResponse); + } + logger.info("total shards: {}, ", response.get().getTotalShards()); + assertBroadcastResponse(1, 1, 0, response.get(), null); + } + + @Test + public void testResultCombine() throws InterruptedException, ExecutionException, IOException { + final String index = "test"; + int numShards = randomInt(3); + clusterService.setState(stateWithAssignedPrimariesAndOneReplica(index, numShards)); + logger.debug("--> using initial state:\n{}", clusterService.state().prettyPrint()); + Future response = (broadcastReplicationAction.execute(new BroadcastRequest().indices(index))); + int succeeded = 0; + int failed = 0; + for (Tuple> shardRequests : broadcastReplicationAction.capturedShardRequests) { + if (randomBoolean()) { + ActionWriteResponse.ShardInfo.Failure[] failures = new ActionWriteResponse.ShardInfo.Failure[0]; + int shardsSucceeded = randomInt(1) + 1; + succeeded += shardsSucceeded; + ActionWriteResponse actionWriteResponse = new ActionWriteResponse(); + if (shardsSucceeded == 1 && randomBoolean()) { + //sometimes add failure (no failure means shard unavailable) + failures = new ActionWriteResponse.ShardInfo.Failure[1]; + failures[0] = new ActionWriteResponse.ShardInfo.Failure(index, shardRequests.v1().id(), null, new Exception("pretend shard failed"), RestStatus.GATEWAY_TIMEOUT, false); + failed++; + } + actionWriteResponse.setShardInfo(new ActionWriteResponse.ShardInfo(2, shardsSucceeded, failures)); + shardRequests.v2().onResponse(actionWriteResponse); + } else { + // sometimes fail + failed += 2; + // just add a general exception and see if failed shards will be incremented by 2 + shardRequests.v2().onFailure(new Exception("pretend shard failed")); + } + } + assertBroadcastResponse(2 * numShards, succeeded, failed, response.get(), Exception.class); + } + + @Test + public void testNoShards() throws InterruptedException, ExecutionException, IOException { + clusterService.setState(stateWithNoShard()); + logger.debug("--> using initial state:\n{}", clusterService.state().prettyPrint()); + BroadcastResponse response = executeAndAssertImmediateResponse(broadcastReplicationAction, new BroadcastRequest()); + assertBroadcastResponse(0, 0, 0, response, null); + } + + @Test + public void testShardsList() throws InterruptedException, ExecutionException { + final String index = "test"; + final ShardId shardId = new ShardId(index, 0); + ClusterState clusterState = state(index, randomBoolean(), + randomBoolean() ? ShardRoutingState.INITIALIZING : ShardRoutingState.UNASSIGNED, ShardRoutingState.UNASSIGNED); + logger.debug("--> using initial state:\n{}", clusterService.state().prettyPrint()); + List shards = broadcastReplicationAction.shards(new BroadcastRequest().indices(shardId.index().name()), clusterState); + assertThat(shards.size(), equalTo(1)); + assertThat(shards.get(0), equalTo(shardId)); + } + + private class TestBroadcastReplicationAction extends TransportBroadcastReplicationAction { + protected final Set>> capturedShardRequests = ConcurrentCollections.newConcurrentSet(); + + public TestBroadcastReplicationAction(Settings settings, ThreadPool threadPool, ClusterService clusterService, TransportService transportService, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, TransportReplicationAction replicatedBroadcastShardAction) { + super("test-broadcast-replication-action", BroadcastRequest.class, settings, threadPool, clusterService, transportService, actionFilters, indexNameExpressionResolver, replicatedBroadcastShardAction); + } + + @Override + protected ActionWriteResponse newShardResponse() { + return new ActionWriteResponse(); + } + + @Override + protected ReplicationRequest newShardRequest(BroadcastRequest request, ShardId shardId) { + return new ReplicationRequest().setShardId(shardId); + } + + @Override + protected BroadcastResponse newResponse(int successfulShards, int failedShards, int totalNumCopies, List shardFailures) { + return new BroadcastResponse(totalNumCopies, successfulShards, failedShards, shardFailures); + } + + @Override + protected void shardExecute(BroadcastRequest request, ShardId shardId, ActionListener shardActionListener) { + capturedShardRequests.add(new Tuple<>(shardId, shardActionListener)); + } + + protected void clearCapturedRequests() { + capturedShardRequests.clear(); + } + } + + public FlushResponse assertImmediateResponse(String index, TransportFlushAction flushAction) throws InterruptedException, ExecutionException { + Date beginDate = new Date(); + FlushResponse flushResponse = flushAction.execute(new FlushRequest(index)).get(); + Date endDate = new Date(); + long maxTime = 500; + assertThat("this should not take longer than " + maxTime + " ms. The request hangs somewhere", endDate.getTime() - beginDate.getTime(), lessThanOrEqualTo(maxTime)); + return flushResponse; + } + + @Test + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/13238") + public void testTimeoutFlush() throws ExecutionException, InterruptedException { + + final String index = "test"; + clusterService.setState(state(index, randomBoolean(), + randomBoolean() ? ShardRoutingState.INITIALIZING : ShardRoutingState.UNASSIGNED, ShardRoutingState.UNASSIGNED)); + logger.debug("--> using initial state:\n{}", clusterService.state().prettyPrint()); + TransportShardFlushAction shardFlushAction = new TransportShardFlushAction(Settings.EMPTY, transportService, clusterService, + null, threadPool, null, + null, new ActionFilters(new HashSet()), new IndexNameExpressionResolver(Settings.EMPTY)); + TransportFlushAction flushAction = new TransportFlushAction(Settings.EMPTY, threadPool, clusterService, + transportService, new ActionFilters(new HashSet()), new IndexNameExpressionResolver(Settings.EMPTY), + shardFlushAction); + FlushResponse flushResponse = (FlushResponse) executeAndAssertImmediateResponse(flushAction, new FlushRequest(index)); + logger.info("total shards: {}, ", flushResponse.getTotalShards()); + assertBroadcastResponse(2, 0, 0, flushResponse, UnavailableShardsException.class); + + ClusterBlocks.Builder block = ClusterBlocks.builder() + .addGlobalBlock(new ClusterBlock(1, "non retryable", false, true, RestStatus.SERVICE_UNAVAILABLE, ClusterBlockLevel.ALL)); + clusterService.setState(ClusterState.builder(clusterService.state()).blocks(block)); + assertFailure("all shards should fail with cluster block", executeAndAssertImmediateResponse(flushAction, new FlushRequest(index)), ClusterBlockException.class); + + block = ClusterBlocks.builder() + .addGlobalBlock(new ClusterBlock(1, "retryable", true, true, RestStatus.SERVICE_UNAVAILABLE, ClusterBlockLevel.ALL)); + clusterService.setState(ClusterState.builder(clusterService.state()).blocks(block)); + assertFailure("all shards should fail with cluster block", executeAndAssertImmediateResponse(flushAction, new FlushRequest(index)), ClusterBlockException.class); + + block = ClusterBlocks.builder() + .addGlobalBlock(new ClusterBlock(1, "non retryable", false, true, RestStatus.SERVICE_UNAVAILABLE, ClusterBlockLevel.ALL)); + clusterService.setState(ClusterState.builder(clusterService.state()).blocks(block)); + assertFailure("all shards should fail with cluster block", executeAndAssertImmediateResponse(flushAction, new FlushRequest(index)), ClusterBlockException.class); + } + + void assertFailure(String msg, BroadcastResponse broadcastResponse, Class klass) throws InterruptedException { + assertThat(broadcastResponse.getSuccessfulShards(), equalTo(0)); + assertThat(broadcastResponse.getTotalShards(), equalTo(broadcastResponse.getFailedShards())); + for (int i = 0; i < broadcastResponse.getFailedShards(); i++) { + assertThat(msg, broadcastResponse.getShardFailures()[i].getCause().getCause(), instanceOf(klass)); + } + } + + @Test + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/13238") + public void testTimeoutRefresh() throws ExecutionException, InterruptedException { + + final String index = "test"; + clusterService.setState(state(index, randomBoolean(), + randomBoolean() ? ShardRoutingState.INITIALIZING : ShardRoutingState.UNASSIGNED, ShardRoutingState.UNASSIGNED)); + logger.debug("--> using initial state:\n{}", clusterService.state().prettyPrint()); + TransportShardRefreshAction shardrefreshAction = new TransportShardRefreshAction(Settings.EMPTY, transportService, clusterService, + null, threadPool, null, + null, new ActionFilters(new HashSet()), new IndexNameExpressionResolver(Settings.EMPTY)); + TransportRefreshAction refreshAction = new TransportRefreshAction(Settings.EMPTY, threadPool, clusterService, + transportService, new ActionFilters(new HashSet()), new IndexNameExpressionResolver(Settings.EMPTY), + shardrefreshAction); + RefreshResponse refreshResponse = (RefreshResponse) executeAndAssertImmediateResponse(refreshAction, new RefreshRequest(index)); + assertBroadcastResponse(2, 0, 0, refreshResponse, UnavailableShardsException.class); + + ClusterBlocks.Builder block = ClusterBlocks.builder() + .addGlobalBlock(new ClusterBlock(1, "non retryable", false, true, RestStatus.SERVICE_UNAVAILABLE, ClusterBlockLevel.ALL)); + clusterService.setState(ClusterState.builder(clusterService.state()).blocks(block)); + assertFailure("all shards should fail with cluster block", executeAndAssertImmediateResponse(refreshAction, new RefreshRequest(index)), ClusterBlockException.class); + + block = ClusterBlocks.builder() + .addGlobalBlock(new ClusterBlock(1, "retryable", true, true, RestStatus.SERVICE_UNAVAILABLE, ClusterBlockLevel.ALL)); + clusterService.setState(ClusterState.builder(clusterService.state()).blocks(block)); + assertFailure("all shards should fail with cluster block", executeAndAssertImmediateResponse(refreshAction, new RefreshRequest(index)), ClusterBlockException.class); + + block = ClusterBlocks.builder() + .addGlobalBlock(new ClusterBlock(1, "non retryable", false, true, RestStatus.SERVICE_UNAVAILABLE, ClusterBlockLevel.ALL)); + clusterService.setState(ClusterState.builder(clusterService.state()).blocks(block)); + assertFailure("all shards should fail with cluster block", executeAndAssertImmediateResponse(refreshAction, new RefreshRequest(index)), ClusterBlockException.class); + } + + public BroadcastResponse executeAndAssertImmediateResponse(TransportBroadcastReplicationAction broadcastAction, BroadcastRequest request) throws InterruptedException, ExecutionException { + return (BroadcastResponse) broadcastAction.execute(request).actionGet("5s"); + } + + private void assertBroadcastResponse(int total, int successful, int failed, BroadcastResponse response, Class exceptionClass) { + assertThat(response.getSuccessfulShards(), equalTo(successful)); + assertThat(response.getTotalShards(), equalTo(total)); + assertThat(response.getFailedShards(), equalTo(failed)); + for (int i = 0; i < failed; i++) { + assertThat(response.getShardFailures()[0].getCause().getCause(), instanceOf(exceptionClass)); + } + } +} diff --git a/core/src/test/java/org/elasticsearch/action/support/replication/ClusterStateCreationUtils.java b/core/src/test/java/org/elasticsearch/action/support/replication/ClusterStateCreationUtils.java new file mode 100644 index 00000000000..e5143a3ef09 --- /dev/null +++ b/core/src/test/java/org/elasticsearch/action/support/replication/ClusterStateCreationUtils.java @@ -0,0 +1,230 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + + +package org.elasticsearch.action.support.replication; + +import org.elasticsearch.Version; +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.cluster.routing.*; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.transport.DummyTransportAddress; +import org.elasticsearch.index.shard.ShardId; + +import java.util.HashSet; +import java.util.Set; + +import static org.elasticsearch.cluster.metadata.IndexMetaData.*; +import static org.elasticsearch.test.ESTestCase.randomBoolean; +import static org.elasticsearch.test.ESTestCase.randomFrom; +import static org.elasticsearch.test.ESTestCase.randomIntBetween; + +/** + * Helper methods for generating cluster states + */ +public class ClusterStateCreationUtils { + + + /** + * Creates cluster state with and index that has one shard and #(replicaStates) replicas + * + * @param index name of the index + * @param primaryLocal if primary should coincide with the local node in the cluster state + * @param primaryState state of primary + * @param replicaStates states of the replicas. length of this array determines also the number of replicas + */ + public static ClusterState state(String index, boolean primaryLocal, ShardRoutingState primaryState, ShardRoutingState... replicaStates) { + final int numberOfReplicas = replicaStates.length; + + int numberOfNodes = numberOfReplicas + 1; + if (primaryState == ShardRoutingState.RELOCATING) { + numberOfNodes++; + } + for (ShardRoutingState state : replicaStates) { + if (state == ShardRoutingState.RELOCATING) { + numberOfNodes++; + } + } + numberOfNodes = Math.max(2, numberOfNodes); // we need a non-local master to test shard failures + final ShardId shardId = new ShardId(index, 0); + DiscoveryNodes.Builder discoBuilder = DiscoveryNodes.builder(); + Set unassignedNodes = new HashSet<>(); + for (int i = 0; i < numberOfNodes + 1; i++) { + final DiscoveryNode node = newNode(i); + discoBuilder = discoBuilder.put(node); + unassignedNodes.add(node.id()); + } + discoBuilder.localNodeId(newNode(0).id()); + discoBuilder.masterNodeId(newNode(1).id()); // we need a non-local master to test shard failures + IndexMetaData indexMetaData = IndexMetaData.builder(index).settings(Settings.builder() + .put(SETTING_VERSION_CREATED, Version.CURRENT) + .put(SETTING_NUMBER_OF_SHARDS, 1).put(SETTING_NUMBER_OF_REPLICAS, numberOfReplicas) + .put(SETTING_CREATION_DATE, System.currentTimeMillis())).build(); + + RoutingTable.Builder routing = new RoutingTable.Builder(); + routing.addAsNew(indexMetaData); + IndexShardRoutingTable.Builder indexShardRoutingBuilder = new IndexShardRoutingTable.Builder(shardId); + + String primaryNode = null; + String relocatingNode = null; + UnassignedInfo unassignedInfo = null; + if (primaryState != ShardRoutingState.UNASSIGNED) { + if (primaryLocal) { + primaryNode = newNode(0).id(); + unassignedNodes.remove(primaryNode); + } else { + primaryNode = selectAndRemove(unassignedNodes); + } + if (primaryState == ShardRoutingState.RELOCATING) { + relocatingNode = selectAndRemove(unassignedNodes); + } + } else { + unassignedInfo = new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, null); + } + indexShardRoutingBuilder.addShard(TestShardRouting.newShardRouting(index, 0, primaryNode, relocatingNode, null, true, primaryState, 0, unassignedInfo)); + + for (ShardRoutingState replicaState : replicaStates) { + String replicaNode = null; + relocatingNode = null; + unassignedInfo = null; + if (replicaState != ShardRoutingState.UNASSIGNED) { + assert primaryNode != null : "a replica is assigned but the primary isn't"; + replicaNode = selectAndRemove(unassignedNodes); + if (replicaState == ShardRoutingState.RELOCATING) { + relocatingNode = selectAndRemove(unassignedNodes); + } + } else { + unassignedInfo = new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, null); + } + indexShardRoutingBuilder.addShard( + TestShardRouting.newShardRouting(index, shardId.id(), replicaNode, relocatingNode, null, false, replicaState, 0, unassignedInfo)); + } + + ClusterState.Builder state = ClusterState.builder(new ClusterName("test")); + state.nodes(discoBuilder); + state.metaData(MetaData.builder().put(indexMetaData, false).generateClusterUuidIfNeeded()); + state.routingTable(RoutingTable.builder().add(IndexRoutingTable.builder(index).addIndexShard(indexShardRoutingBuilder.build()))); + return state.build(); + } + + /** + * Creates cluster state with several shards and one replica and all shards STARTED. + */ + public static ClusterState stateWithAssignedPrimariesAndOneReplica(String index, int numberOfShards) { + + int numberOfNodes = 2; // we need a non-local master to test shard failures + DiscoveryNodes.Builder discoBuilder = DiscoveryNodes.builder(); + for (int i = 0; i < numberOfNodes + 1; i++) { + final DiscoveryNode node = newNode(i); + discoBuilder = discoBuilder.put(node); + } + discoBuilder.localNodeId(newNode(0).id()); + discoBuilder.masterNodeId(newNode(1).id()); // we need a non-local master to test shard failures + IndexMetaData indexMetaData = IndexMetaData.builder(index).settings(Settings.builder() + .put(SETTING_VERSION_CREATED, Version.CURRENT) + .put(SETTING_NUMBER_OF_SHARDS, 1).put(SETTING_NUMBER_OF_REPLICAS, 1) + .put(SETTING_CREATION_DATE, System.currentTimeMillis())).build(); + ClusterState.Builder state = ClusterState.builder(new ClusterName("test")); + state.nodes(discoBuilder); + state.metaData(MetaData.builder().put(indexMetaData, false).generateClusterUuidIfNeeded()); + IndexRoutingTable.Builder indexRoutingTableBuilder = IndexRoutingTable.builder(index); + for (int i = 0; i < numberOfShards; i++) { + RoutingTable.Builder routing = new RoutingTable.Builder(); + routing.addAsNew(indexMetaData); + final ShardId shardId = new ShardId(index, i); + IndexShardRoutingTable.Builder indexShardRoutingBuilder = new IndexShardRoutingTable.Builder(shardId); + indexShardRoutingBuilder.addShard(TestShardRouting.newShardRouting(index, i, newNode(0).id(), null, null, true, ShardRoutingState.STARTED, 0, null)); + indexShardRoutingBuilder.addShard(TestShardRouting.newShardRouting(index, i, newNode(1).id(), null, null, false, ShardRoutingState.STARTED, 0, null)); + indexRoutingTableBuilder.addIndexShard(indexShardRoutingBuilder.build()); + } + state.routingTable(RoutingTable.builder().add(indexRoutingTableBuilder)); + return state.build(); + } + + /** + * Creates cluster state with and index that has one shard and as many replicas as numberOfReplicas. + * Primary will be STARTED in cluster state but replicas will be one of UNASSIGNED, INITIALIZING, STARTED or RELOCATING. + * + * @param index name of the index + * @param primaryLocal if primary should coincide with the local node in the cluster state + * @param numberOfReplicas number of replicas + */ + public static ClusterState stateWithStartedPrimary(String index, boolean primaryLocal, int numberOfReplicas) { + int assignedReplicas = randomIntBetween(0, numberOfReplicas); + return stateWithStartedPrimary(index, primaryLocal, assignedReplicas, numberOfReplicas - assignedReplicas); + } + + /** + * Creates cluster state with and index that has one shard and as many replicas as numberOfReplicas. + * Primary will be STARTED in cluster state. Some (unassignedReplicas) will be UNASSIGNED and + * some (assignedReplicas) will be one of INITIALIZING, STARTED or RELOCATING. + * + * @param index name of the index + * @param primaryLocal if primary should coincide with the local node in the cluster state + * @param assignedReplicas number of replicas that should have INITIALIZING, STARTED or RELOCATING state + * @param unassignedReplicas number of replicas that should be unassigned + */ + public static ClusterState stateWithStartedPrimary(String index, boolean primaryLocal, int assignedReplicas, int unassignedReplicas) { + ShardRoutingState[] replicaStates = new ShardRoutingState[assignedReplicas + unassignedReplicas]; + // no point in randomizing - node assignment later on does it too. + for (int i = 0; i < assignedReplicas; i++) { + replicaStates[i] = randomFrom(ShardRoutingState.INITIALIZING, ShardRoutingState.STARTED, ShardRoutingState.RELOCATING); + } + for (int i = assignedReplicas; i < replicaStates.length; i++) { + replicaStates[i] = ShardRoutingState.UNASSIGNED; + } + return state(index, primaryLocal, randomFrom(ShardRoutingState.STARTED, ShardRoutingState.RELOCATING), replicaStates); + } + + /** + * Creates a cluster state with no index + */ + public static ClusterState stateWithNoShard() { + int numberOfNodes = 2; + DiscoveryNodes.Builder discoBuilder = DiscoveryNodes.builder(); + Set unassignedNodes = new HashSet<>(); + for (int i = 0; i < numberOfNodes + 1; i++) { + final DiscoveryNode node = newNode(i); + discoBuilder = discoBuilder.put(node); + unassignedNodes.add(node.id()); + } + discoBuilder.localNodeId(newNode(0).id()); + discoBuilder.masterNodeId(newNode(1).id()); + ClusterState.Builder state = ClusterState.builder(new ClusterName("test")); + state.nodes(discoBuilder); + state.metaData(MetaData.builder().generateClusterUuidIfNeeded()); + state.routingTable(RoutingTable.builder()); + return state.build(); + } + + private static DiscoveryNode newNode(int nodeId) { + return new DiscoveryNode("node_" + nodeId, DummyTransportAddress.INSTANCE, Version.CURRENT); + } + + static private String selectAndRemove(Set strings) { + String selection = randomFrom(strings.toArray(new String[strings.size()])); + strings.remove(selection); + return selection; + } +} diff --git a/core/src/test/java/org/elasticsearch/action/support/replication/ShardReplicationTests.java b/core/src/test/java/org/elasticsearch/action/support/replication/ShardReplicationTests.java index d7fb2ddd54d..0b9e254b0ad 100644 --- a/core/src/test/java/org/elasticsearch/action/support/replication/ShardReplicationTests.java +++ b/core/src/test/java/org/elasticsearch/action/support/replication/ShardReplicationTests.java @@ -77,6 +77,8 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; +import static org.elasticsearch.action.support.replication.ClusterStateCreationUtils.state; +import static org.elasticsearch.action.support.replication.ClusterStateCreationUtils.stateWithStartedPrimary; import static org.elasticsearch.cluster.metadata.IndexMetaData.*; import static org.hamcrest.Matchers.*; @@ -98,6 +100,7 @@ public class ShardReplicationTests extends ESTestCase { threadPool = new ThreadPool("ShardReplicationTests"); } + @Override @Before public void setUp() throws Exception { super.setUp(); @@ -161,103 +164,6 @@ public class ShardReplicationTests extends ESTestCase { assertEquals(1, count.get()); } - ClusterState stateWithStartedPrimary(String index, boolean primaryLocal, int numberOfReplicas) { - int assignedReplicas = randomIntBetween(0, numberOfReplicas); - return stateWithStartedPrimary(index, primaryLocal, assignedReplicas, numberOfReplicas - assignedReplicas); - } - - ClusterState stateWithStartedPrimary(String index, boolean primaryLocal, int assignedReplicas, int unassignedReplicas) { - ShardRoutingState[] replicaStates = new ShardRoutingState[assignedReplicas + unassignedReplicas]; - // no point in randomizing - node assignment later on does it too. - for (int i = 0; i < assignedReplicas; i++) { - replicaStates[i] = randomFrom(ShardRoutingState.INITIALIZING, ShardRoutingState.STARTED, ShardRoutingState.RELOCATING); - } - for (int i = assignedReplicas; i < replicaStates.length; i++) { - replicaStates[i] = ShardRoutingState.UNASSIGNED; - } - return state(index, primaryLocal, randomFrom(ShardRoutingState.STARTED, ShardRoutingState.RELOCATING), replicaStates); - } - - ClusterState state(String index, boolean primaryLocal, ShardRoutingState primaryState, ShardRoutingState... replicaStates) { - final int numberOfReplicas = replicaStates.length; - - int numberOfNodes = numberOfReplicas + 1; - if (primaryState == ShardRoutingState.RELOCATING) { - numberOfNodes++; - } - for (ShardRoutingState state : replicaStates) { - if (state == ShardRoutingState.RELOCATING) { - numberOfNodes++; - } - } - numberOfNodes = Math.max(2, numberOfNodes); // we need a non-local master to test shard failures - final ShardId shardId = new ShardId(index, 0); - DiscoveryNodes.Builder discoBuilder = DiscoveryNodes.builder(); - Set unassignedNodes = new HashSet<>(); - for (int i = 0; i < numberOfNodes + 1; i++) { - final DiscoveryNode node = newNode(i); - discoBuilder = discoBuilder.put(node); - unassignedNodes.add(node.id()); - } - discoBuilder.localNodeId(newNode(0).id()); - discoBuilder.masterNodeId(newNode(1).id()); // we need a non-local master to test shard failures - IndexMetaData indexMetaData = IndexMetaData.builder(index).settings(Settings.builder() - .put(SETTING_VERSION_CREATED, Version.CURRENT) - .put(SETTING_NUMBER_OF_SHARDS, 1).put(SETTING_NUMBER_OF_REPLICAS, numberOfReplicas) - .put(SETTING_CREATION_DATE, System.currentTimeMillis())).build(); - - RoutingTable.Builder routing = new RoutingTable.Builder(); - routing.addAsNew(indexMetaData); - IndexShardRoutingTable.Builder indexShardRoutingBuilder = new IndexShardRoutingTable.Builder(shardId); - - String primaryNode = null; - String relocatingNode = null; - UnassignedInfo unassignedInfo = null; - if (primaryState != ShardRoutingState.UNASSIGNED) { - if (primaryLocal) { - primaryNode = newNode(0).id(); - unassignedNodes.remove(primaryNode); - } else { - primaryNode = selectAndRemove(unassignedNodes); - } - if (primaryState == ShardRoutingState.RELOCATING) { - relocatingNode = selectAndRemove(unassignedNodes); - } - } else { - unassignedInfo = new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, null); - } - indexShardRoutingBuilder.addShard(TestShardRouting.newShardRouting(index, 0, primaryNode, relocatingNode, null, true, primaryState, 0, unassignedInfo)); - - for (ShardRoutingState replicaState : replicaStates) { - String replicaNode = null; - relocatingNode = null; - unassignedInfo = null; - if (replicaState != ShardRoutingState.UNASSIGNED) { - assert primaryNode != null : "a replica is assigned but the primary isn't"; - replicaNode = selectAndRemove(unassignedNodes); - if (replicaState == ShardRoutingState.RELOCATING) { - relocatingNode = selectAndRemove(unassignedNodes); - } - } else { - unassignedInfo = new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, null); - } - indexShardRoutingBuilder.addShard( - TestShardRouting.newShardRouting(index, shardId.id(), replicaNode, relocatingNode, null, false, replicaState, 0, unassignedInfo)); - } - - ClusterState.Builder state = ClusterState.builder(new ClusterName("test")); - state.nodes(discoBuilder); - state.metaData(MetaData.builder().put(indexMetaData, false).generateClusterUuidIfNeeded()); - state.routingTable(RoutingTable.builder().add(IndexRoutingTable.builder(index).addIndexShard(indexShardRoutingBuilder.build()))); - return state.build(); - } - - private String selectAndRemove(Set strings) { - String selection = randomFrom(strings.toArray(new String[strings.size()])); - strings.remove(selection); - return selection; - } - @Test public void testNotStartedPrimary() throws InterruptedException, ExecutionException { final String index = "test"; @@ -527,6 +433,7 @@ public class ShardReplicationTests extends ESTestCase { action = new ActionWithDelay(Settings.EMPTY, "testActionWithExceptions", transportService, clusterService, threadPool); final TransportReplicationAction.PrimaryPhase primaryPhase = action.new PrimaryPhase(request, listener); Thread t = new Thread() { + @Override public void run() { primaryPhase.run(); } @@ -587,6 +494,7 @@ public class ShardReplicationTests extends ESTestCase { action = new ActionWithDelay(Settings.EMPTY, "testActionWithExceptions", transportService, clusterService, threadPool); final Action.ReplicaOperationTransportHandler replicaOperationTransportHandler = action.new ReplicaOperationTransportHandler(); Thread t = new Thread() { + @Override public void run() { try { replicaOperationTransportHandler.messageReceived(new Request(), createTransportChannel()); @@ -746,10 +654,6 @@ public class ShardReplicationTests extends ESTestCase { } } - static DiscoveryNode newNode(int nodeId) { - return new DiscoveryNode("node_" + nodeId, DummyTransportAddress.INSTANCE, Version.CURRENT); - } - /* * Throws exceptions when executed. Used for testing if the counter is correctly decremented in case an operation fails. * */ diff --git a/core/src/test/java/org/elasticsearch/action/termvectors/AbstractTermVectorsTestCase.java b/core/src/test/java/org/elasticsearch/action/termvectors/AbstractTermVectorsTestCase.java index 62f4dc77060..ba6e6b6532d 100644 --- a/core/src/test/java/org/elasticsearch/action/termvectors/AbstractTermVectorsTestCase.java +++ b/core/src/test/java/org/elasticsearch/action/termvectors/AbstractTermVectorsTestCase.java @@ -346,8 +346,8 @@ public abstract class AbstractTermVectorsTestCase extends ESIntegTestCase { assertNotNull(luceneTermEnum.next()); assertThat(esTermEnum.totalTermFreq(), equalTo(luceneTermEnum.totalTermFreq())); - PostingsEnum esDocsPosEnum = esTermEnum.postings(null, null, PostingsEnum.POSITIONS); - PostingsEnum luceneDocsPosEnum = luceneTermEnum.postings(null, null, PostingsEnum.POSITIONS); + PostingsEnum esDocsPosEnum = esTermEnum.postings(null, PostingsEnum.POSITIONS); + PostingsEnum luceneDocsPosEnum = luceneTermEnum.postings(null, PostingsEnum.POSITIONS); if (luceneDocsPosEnum == null) { // test we expect that... assertFalse(field.storedOffset); diff --git a/core/src/test/java/org/elasticsearch/action/termvectors/GetTermVectorsCheckDocFreqIT.java b/core/src/test/java/org/elasticsearch/action/termvectors/GetTermVectorsCheckDocFreqIT.java index 0e1c978dda8..1d0c317f5ad 100644 --- a/core/src/test/java/org/elasticsearch/action/termvectors/GetTermVectorsCheckDocFreqIT.java +++ b/core/src/test/java/org/elasticsearch/action/termvectors/GetTermVectorsCheckDocFreqIT.java @@ -119,7 +119,7 @@ public class GetTermVectorsCheckDocFreqIT extends ESIntegTestCase { assertThat("expected ttf of " + string, numDocs, equalTo((int) iterator.totalTermFreq())); } - PostingsEnum docsAndPositions = iterator.postings(null, null, PostingsEnum.ALL); + PostingsEnum docsAndPositions = iterator.postings(null, PostingsEnum.ALL); assertThat(docsAndPositions.nextDoc(), equalTo(0)); assertThat(freq[j], equalTo(docsAndPositions.freq())); assertThat(iterator.docFreq(), equalTo(numDocs)); @@ -176,7 +176,7 @@ public class GetTermVectorsCheckDocFreqIT extends ESIntegTestCase { assertThat("expected ttf of " + string, -1, equalTo((int) iterator.totalTermFreq())); - PostingsEnum docsAndPositions = iterator.postings(null, null, PostingsEnum.ALL); + PostingsEnum docsAndPositions = iterator.postings(null, PostingsEnum.ALL); assertThat(docsAndPositions.nextDoc(), equalTo(0)); assertThat(freq[j], equalTo(docsAndPositions.freq())); assertThat(iterator.docFreq(), equalTo(-1)); @@ -236,7 +236,7 @@ public class GetTermVectorsCheckDocFreqIT extends ESIntegTestCase { assertThat("expected ttf of " + string, numDocs, equalTo((int) iterator.totalTermFreq())); } - PostingsEnum docsAndPositions = iterator.postings(null, null, PostingsEnum.ALL); + PostingsEnum docsAndPositions = iterator.postings(null, PostingsEnum.ALL); assertThat(docsAndPositions.nextDoc(), equalTo(0)); assertThat(freq[j], equalTo(docsAndPositions.freq())); assertThat(iterator.docFreq(), equalTo(numDocs)); diff --git a/core/src/test/java/org/elasticsearch/action/termvectors/GetTermVectorsIT.java b/core/src/test/java/org/elasticsearch/action/termvectors/GetTermVectorsIT.java index 47031b828b3..6f046974633 100644 --- a/core/src/test/java/org/elasticsearch/action/termvectors/GetTermVectorsIT.java +++ b/core/src/test/java/org/elasticsearch/action/termvectors/GetTermVectorsIT.java @@ -335,7 +335,7 @@ public class GetTermVectorsIT extends AbstractTermVectorsTestCase { assertThat(infoString, next, notNullValue()); // do not test ttf or doc frequency, because here we have // many shards and do not know how documents are distributed - PostingsEnum docsAndPositions = iterator.postings(null, null, PostingsEnum.ALL); + PostingsEnum docsAndPositions = iterator.postings(null, PostingsEnum.ALL); // docs and pos only returns something if positions or // payloads or offsets are stored / requestd Otherwise use // DocsEnum? @@ -464,7 +464,7 @@ public class GetTermVectorsIT extends AbstractTermVectorsTestCase { TermsEnum iterator = terms.iterator(); while (iterator.next() != null) { String term = iterator.term().utf8ToString(); - PostingsEnum docsAndPositions = iterator.postings(null, null, PostingsEnum.ALL); + PostingsEnum docsAndPositions = iterator.postings(null, PostingsEnum.ALL); assertThat(docsAndPositions.nextDoc(), equalTo(0)); List curPayloads = payloads.get(term); assertThat(term, curPayloads, notNullValue()); @@ -658,7 +658,7 @@ public class GetTermVectorsIT extends AbstractTermVectorsTestCase { assertThat(next, notNullValue()); // do not test ttf or doc frequency, because here we have many // shards and do not know how documents are distributed - PostingsEnum docsAndPositions = iterator.postings(null, null, PostingsEnum.ALL); + PostingsEnum docsAndPositions = iterator.postings(null, PostingsEnum.ALL); assertThat(docsAndPositions.nextDoc(), equalTo(0)); assertThat(freq[j], equalTo(docsAndPositions.freq())); int[] termPos = pos[j]; @@ -753,8 +753,8 @@ public class GetTermVectorsIT extends AbstractTermVectorsTestCase { assertThat("term: " + string0, iter0.totalTermFreq(), equalTo(iter1.totalTermFreq())); // compare freq and docs - PostingsEnum docsAndPositions0 = iter0.postings(null, null, PostingsEnum.ALL); - PostingsEnum docsAndPositions1 = iter1.postings(null, null, PostingsEnum.ALL); + PostingsEnum docsAndPositions0 = iter0.postings(null, PostingsEnum.ALL); + PostingsEnum docsAndPositions1 = iter1.postings(null, PostingsEnum.ALL); assertThat("term: " + string0, docsAndPositions0.nextDoc(), equalTo(docsAndPositions1.nextDoc())); assertThat("term: " + string0, docsAndPositions0.freq(), equalTo(docsAndPositions1.freq())); diff --git a/core/src/test/java/org/elasticsearch/bootstrap/BootstrapForTesting.java b/core/src/test/java/org/elasticsearch/bootstrap/BootstrapForTesting.java index 83ae87580bb..a7b9043f350 100644 --- a/core/src/test/java/org/elasticsearch/bootstrap/BootstrapForTesting.java +++ b/core/src/test/java/org/elasticsearch/bootstrap/BootstrapForTesting.java @@ -106,6 +106,13 @@ public class BootstrapForTesting { if (Strings.hasLength(System.getProperty("tests.config"))) { perms.add(new FilePermission(System.getProperty("tests.config"), "read,readlink")); } + // jacoco coverage output file + if (Boolean.getBoolean("tests.coverage")) { + Path coverageDir = PathUtils.get(System.getProperty("tests.coverage.dir")); + perms.add(new FilePermission(coverageDir.resolve("jacoco.exec").toString(), "read,write")); + // in case we get fancy and use the -integration goals later: + perms.add(new FilePermission(coverageDir.resolve("jacoco-it.exec").toString(), "read,write")); + } Policy.setPolicy(new ESPolicy(perms)); System.setSecurityManager(new TestSecurityManager()); Security.selfTest(); diff --git a/core/src/test/java/org/elasticsearch/bootstrap/ESPolicyTests.java b/core/src/test/java/org/elasticsearch/bootstrap/ESPolicyTests.java new file mode 100644 index 00000000000..5423e68b555 --- /dev/null +++ b/core/src/test/java/org/elasticsearch/bootstrap/ESPolicyTests.java @@ -0,0 +1,97 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.bootstrap; + +import org.elasticsearch.test.ESTestCase; + +import java.io.FilePermission; +import java.security.AccessControlContext; +import java.security.AccessController; +import java.security.CodeSource; +import java.security.PermissionCollection; +import java.security.Permissions; +import java.security.PrivilegedAction; +import java.security.ProtectionDomain; +import java.security.cert.Certificate; + +/** + * Tests for ESPolicy + *

+ * Most unit tests won't run under security manager, since we don't allow + * access to the policy (you cannot construct it) + */ +public class ESPolicyTests extends ESTestCase { + + /** + * Test policy with null codesource. + *

+ * This can happen when restricting privileges with doPrivileged, + * even though ProtectionDomain's ctor javadocs might make you think + * that the policy won't be consulted. + */ + public void testNullCodeSource() throws Exception { + assumeTrue("test cannot run with security manager", System.getSecurityManager() == null); + PermissionCollection noPermissions = new Permissions(); + ESPolicy policy = new ESPolicy(noPermissions); + assertFalse(policy.implies(new ProtectionDomain(null, noPermissions), new FilePermission("foo", "read"))); + } + + /** + * test with null location + *

+ * its unclear when/if this happens, see https://bugs.openjdk.java.net/browse/JDK-8129972 + */ + public void testNullLocation() throws Exception { + assumeTrue("test cannot run with security manager", System.getSecurityManager() == null); + PermissionCollection noPermissions = new Permissions(); + ESPolicy policy = new ESPolicy(noPermissions); + assertFalse(policy.implies(new ProtectionDomain(new CodeSource(null, (Certificate[])null), noPermissions), new FilePermission("foo", "read"))); + } + + /** + * test restricting privileges to no permissions actually works + */ + public void testRestrictPrivileges() { + assumeTrue("test requires security manager", System.getSecurityManager() != null); + try { + System.getProperty("user.home"); + } catch (SecurityException e) { + fail("this test needs to be fixed: user.home not available by policy"); + } + + PermissionCollection noPermissions = new Permissions(); + AccessControlContext noPermissionsAcc = new AccessControlContext( + new ProtectionDomain[] { + new ProtectionDomain(null, noPermissions) + } + ); + try { + AccessController.doPrivileged(new PrivilegedAction() { + public Void run() { + System.getProperty("user.home"); + fail("access should have been denied"); + return null; + } + }, noPermissionsAcc); + } catch (SecurityException expected) { + // expected exception + } + } +} diff --git a/core/src/test/java/org/elasticsearch/bwcompat/BasicAnalysisBackwardCompatibilityIT.java b/core/src/test/java/org/elasticsearch/bwcompat/BasicAnalysisBackwardCompatibilityIT.java index aec57c57816..4b38cf73919 100644 --- a/core/src/test/java/org/elasticsearch/bwcompat/BasicAnalysisBackwardCompatibilityIT.java +++ b/core/src/test/java/org/elasticsearch/bwcompat/BasicAnalysisBackwardCompatibilityIT.java @@ -112,6 +112,9 @@ public class BasicAnalysisBackwardCompatibilityIT extends ESBackcompatTestCase { if (preBuiltAnalyzers == PreBuiltAnalyzers.SORANI && compatibilityVersion().before(Version.V_1_3_0)) { continue; // SORANI was added in 1.3.0 } + if (preBuiltAnalyzers == PreBuiltAnalyzers.LITHUANIAN && compatibilityVersion().before(Version.V_2_1_0)) { + continue; // LITHUANIAN was added in 2.1.0 + } return preBuiltAnalyzers.name().toLowerCase(Locale.ROOT); } diff --git a/core/src/test/java/org/elasticsearch/bwcompat/GetIndexBackwardsCompatibilityIT.java b/core/src/test/java/org/elasticsearch/bwcompat/GetIndexBackwardsCompatibilityIT.java index d26d0491095..a7e9380d02a 100644 --- a/core/src/test/java/org/elasticsearch/bwcompat/GetIndexBackwardsCompatibilityIT.java +++ b/core/src/test/java/org/elasticsearch/bwcompat/GetIndexBackwardsCompatibilityIT.java @@ -19,7 +19,6 @@ package org.elasticsearch.bwcompat; -import com.google.common.collect.ImmutableList; import org.elasticsearch.action.admin.indices.alias.Alias; import org.elasticsearch.action.admin.indices.create.CreateIndexResponse; @@ -33,6 +32,8 @@ import org.elasticsearch.search.warmer.IndexWarmersMetaData.Entry; import org.elasticsearch.test.ESBackcompatTestCase; import org.junit.Test; +import java.util.List; + import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.hamcrest.Matchers.anyOf; import static org.hamcrest.Matchers.equalTo; @@ -46,10 +47,10 @@ public class GetIndexBackwardsCompatibilityIT extends ESBackcompatTestCase { assertAcked(createIndexResponse); GetIndexResponse getIndexResponse = client().admin().indices().prepareGetIndex().addIndices("test").addFeatures(Feature.ALIASES) .execute().actionGet(); - ImmutableOpenMap> aliasesMap = getIndexResponse.aliases(); + ImmutableOpenMap> aliasesMap = getIndexResponse.aliases(); assertThat(aliasesMap, notNullValue()); assertThat(aliasesMap.size(), equalTo(1)); - ImmutableList aliasesList = aliasesMap.get("test"); + List aliasesList = aliasesMap.get("test"); assertThat(aliasesList, notNullValue()); assertThat(aliasesList.size(), equalTo(1)); AliasMetaData alias = aliasesList.get(0); @@ -100,10 +101,10 @@ public class GetIndexBackwardsCompatibilityIT extends ESBackcompatTestCase { ensureSearchable("test"); GetIndexResponse getIndexResponse = client().admin().indices().prepareGetIndex().addIndices("test").addFeatures(Feature.WARMERS) .execute().actionGet(); - ImmutableOpenMap> warmersMap = getIndexResponse.warmers(); + ImmutableOpenMap> warmersMap = getIndexResponse.warmers(); assertThat(warmersMap, notNullValue()); assertThat(warmersMap.size(), equalTo(1)); - ImmutableList warmersList = warmersMap.get("test"); + List warmersList = warmersMap.get("test"); assertThat(warmersList, notNullValue()); assertThat(warmersList.size(), equalTo(1)); Entry warmer = warmersList.get(0); diff --git a/core/src/test/java/org/elasticsearch/cluster/ClusterStateDiffIT.java b/core/src/test/java/org/elasticsearch/cluster/ClusterStateDiffIT.java index edd6254b511..1aa1602b7e5 100644 --- a/core/src/test/java/org/elasticsearch/cluster/ClusterStateDiffIT.java +++ b/core/src/test/java/org/elasticsearch/cluster/ClusterStateDiffIT.java @@ -20,7 +20,6 @@ package org.elasticsearch.cluster; import com.carrotsearch.hppc.cursors.ObjectCursor; -import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; import org.elasticsearch.Version; import org.elasticsearch.cluster.block.ClusterBlock; @@ -44,6 +43,7 @@ import org.elasticsearch.search.warmer.IndexWarmersMetaData; import org.elasticsearch.test.ESIntegTestCase; import org.junit.Test; +import java.util.Collections; import java.util.List; import static org.elasticsearch.cluster.metadata.AliasMetaData.newAliasMetaDataBuilder; @@ -659,14 +659,14 @@ public class ClusterStateDiffIT extends ESIntegTestCase { new SnapshotId(randomName("repo"), randomName("snap")), randomBoolean(), SnapshotsInProgress.State.fromValue((byte) randomIntBetween(0, 6)), - ImmutableList.of(), + Collections.emptyList(), Math.abs(randomLong()), ImmutableMap.of())); case 1: return new RestoreInProgress(new RestoreInProgress.Entry( new SnapshotId(randomName("repo"), randomName("snap")), RestoreInProgress.State.fromValue((byte) randomIntBetween(0, 3)), - ImmutableList.of(), + Collections.emptyList(), ImmutableMap.of())); default: throw new IllegalArgumentException("Shouldn't be here"); diff --git a/core/src/test/java/org/elasticsearch/cluster/ack/AckIT.java b/core/src/test/java/org/elasticsearch/cluster/ack/AckIT.java index 84b31e3be30..1ec4c23adda 100644 --- a/core/src/test/java/org/elasticsearch/cluster/ack/AckIT.java +++ b/core/src/test/java/org/elasticsearch/cluster/ack/AckIT.java @@ -46,8 +46,8 @@ import org.elasticsearch.test.ESIntegTestCase; import org.junit.Test; import com.carrotsearch.hppc.cursors.ObjectObjectCursor; import com.google.common.base.Predicate; -import com.google.common.collect.ImmutableList; +import java.util.List; import java.util.concurrent.TimeUnit; import static org.elasticsearch.cluster.metadata.IndexMetaData.*; @@ -100,7 +100,7 @@ public class AckIT extends ESIntegTestCase { for (Client client : clients()) { GetWarmersResponse getWarmersResponse = client.admin().indices().prepareGetWarmers().setLocal(true).get(); assertThat(getWarmersResponse.warmers().size(), equalTo(1)); - ObjectObjectCursor> entry = getWarmersResponse.warmers().iterator().next(); + ObjectObjectCursor> entry = getWarmersResponse.warmers().iterator().next(); assertThat(entry.key, equalTo("test")); assertThat(entry.value.size(), equalTo(1)); assertThat(entry.value.get(0).name(), equalTo("custom_warmer")); diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/UnassignedInfoTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/UnassignedInfoTests.java index a0b610c2bb2..fdc1c528a45 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/UnassignedInfoTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/UnassignedInfoTests.java @@ -21,7 +21,6 @@ package org.elasticsearch.cluster.routing; import com.carrotsearch.hppc.IntHashSet; import com.carrotsearch.randomizedtesting.generators.RandomPicks; -import com.google.common.collect.ImmutableList; import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; @@ -38,6 +37,7 @@ import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.test.ESAllocationTestCase; import org.junit.Test; +import java.util.Collections; import java.util.EnumSet; import static org.elasticsearch.cluster.routing.ShardRoutingState.*; @@ -251,7 +251,7 @@ public class UnassignedInfoTests extends ESAllocationTestCase { assertThat(clusterState.getRoutingNodes().hasUnassigned(), equalTo(false)); // fail shard ShardRouting shardToFail = clusterState.getRoutingNodes().shardsWithState(STARTED).get(0); - clusterState = ClusterState.builder(clusterState).routingResult(allocation.applyFailedShards(clusterState, ImmutableList.of(new FailedRerouteAllocation.FailedShard(shardToFail, "test fail", null)))).build(); + clusterState = ClusterState.builder(clusterState).routingResult(allocation.applyFailedShards(clusterState, Collections.singletonList(new FailedRerouteAllocation.FailedShard(shardToFail, "test fail", null)))).build(); // verify the reason and details assertThat(clusterState.getRoutingNodes().hasUnassigned(), equalTo(true)); assertThat(clusterState.getRoutingNodes().shardsWithState(UNASSIGNED).size(), equalTo(1)); diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/FailedShardsRoutingTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/FailedShardsRoutingTests.java index 78083a6b902..ff2ae1051ed 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/FailedShardsRoutingTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/FailedShardsRoutingTests.java @@ -19,7 +19,6 @@ package org.elasticsearch.cluster.routing.allocation; -import com.google.common.collect.ImmutableList; import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexMetaData; @@ -35,6 +34,7 @@ import org.elasticsearch.test.ESAllocationTestCase; import org.junit.Test; import java.util.ArrayList; +import java.util.Collections; import static org.elasticsearch.cluster.routing.ShardRoutingState.*; import static org.elasticsearch.common.settings.Settings.settingsBuilder; @@ -562,7 +562,7 @@ public class FailedShardsRoutingTests extends ESAllocationTestCase { assertThat(clusterState.getRoutingNodes().shardsWithState(INITIALIZING).size(), equalTo(2)); // start another replica shard, while keep one initializing - clusterState = ClusterState.builder(clusterState).routingTable(allocation.applyStartedShards(clusterState, ImmutableList.of(clusterState.getRoutingNodes().shardsWithState(INITIALIZING).get(0))).routingTable()).build(); + clusterState = ClusterState.builder(clusterState).routingTable(allocation.applyStartedShards(clusterState, Collections.singletonList(clusterState.getRoutingNodes().shardsWithState(INITIALIZING).get(0))).routingTable()).build(); assertThat(clusterState.getRoutingNodes().shardsWithState(STARTED).size(), equalTo(2)); assertThat(clusterState.getRoutingNodes().shardsWithState(INITIALIZING).size(), equalTo(1)); diff --git a/core/src/test/java/org/elasticsearch/cluster/structure/RoutingIteratorTests.java b/core/src/test/java/org/elasticsearch/cluster/structure/RoutingIteratorTests.java index 4e6eabc589c..d405fb1d42c 100644 --- a/core/src/test/java/org/elasticsearch/cluster/structure/RoutingIteratorTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/structure/RoutingIteratorTests.java @@ -19,7 +19,6 @@ package org.elasticsearch.cluster.structure; -import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterName; @@ -37,6 +36,8 @@ import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.test.ESAllocationTestCase; import org.junit.Test; +import java.util.Collections; + import static org.elasticsearch.cluster.routing.ShardRoutingState.INITIALIZING; import static org.elasticsearch.common.settings.Settings.settingsBuilder; import static org.hamcrest.Matchers.*; @@ -46,28 +47,28 @@ public class RoutingIteratorTests extends ESAllocationTestCase { @Test public void testEmptyIterator() { ShardShuffler shuffler = new RotationShardShuffler(0); - ShardIterator shardIterator = new PlainShardIterator(new ShardId("test1", 0), shuffler.shuffle(ImmutableList.of())); + ShardIterator shardIterator = new PlainShardIterator(new ShardId("test1", 0), shuffler.shuffle(Collections.emptyList())); assertThat(shardIterator.remaining(), equalTo(0)); assertThat(shardIterator.nextOrNull(), nullValue()); assertThat(shardIterator.remaining(), equalTo(0)); assertThat(shardIterator.nextOrNull(), nullValue()); assertThat(shardIterator.remaining(), equalTo(0)); - shardIterator = new PlainShardIterator(new ShardId("test1", 0), shuffler.shuffle(ImmutableList.of())); + shardIterator = new PlainShardIterator(new ShardId("test1", 0), shuffler.shuffle(Collections.emptyList())); assertThat(shardIterator.remaining(), equalTo(0)); assertThat(shardIterator.nextOrNull(), nullValue()); assertThat(shardIterator.remaining(), equalTo(0)); assertThat(shardIterator.nextOrNull(), nullValue()); assertThat(shardIterator.remaining(), equalTo(0)); - shardIterator = new PlainShardIterator(new ShardId("test1", 0), shuffler.shuffle(ImmutableList.of())); + shardIterator = new PlainShardIterator(new ShardId("test1", 0), shuffler.shuffle(Collections.emptyList())); assertThat(shardIterator.remaining(), equalTo(0)); assertThat(shardIterator.nextOrNull(), nullValue()); assertThat(shardIterator.remaining(), equalTo(0)); assertThat(shardIterator.nextOrNull(), nullValue()); assertThat(shardIterator.remaining(), equalTo(0)); - shardIterator = new PlainShardIterator(new ShardId("test1", 0), shuffler.shuffle(ImmutableList.of())); + shardIterator = new PlainShardIterator(new ShardId("test1", 0), shuffler.shuffle(Collections.emptyList())); assertThat(shardIterator.remaining(), equalTo(0)); assertThat(shardIterator.nextOrNull(), nullValue()); assertThat(shardIterator.remaining(), equalTo(0)); diff --git a/core/src/test/java/org/elasticsearch/common/lucene/IndexCacheableQueryTests.java b/core/src/test/java/org/elasticsearch/common/lucene/IndexCacheableQueryTests.java index 70a2a2c6018..5511796a2ed 100644 --- a/core/src/test/java/org/elasticsearch/common/lucene/IndexCacheableQueryTests.java +++ b/core/src/test/java/org/elasticsearch/common/lucene/IndexCacheableQueryTests.java @@ -34,7 +34,6 @@ import org.apache.lucene.search.QueryUtils; import org.apache.lucene.search.Scorer; import org.apache.lucene.search.Weight; import org.apache.lucene.store.Directory; -import org.apache.lucene.util.Bits; import org.apache.lucene.util.Version; import org.elasticsearch.test.ESTestCase; @@ -73,7 +72,7 @@ public class IndexCacheableQueryTests extends ESTestCase { } @Override - public Scorer scorer(LeafReaderContext context, Bits acceptDocs) throws IOException { + public Scorer scorer(LeafReaderContext context) throws IOException { return null; } @@ -104,10 +103,7 @@ public class IndexCacheableQueryTests extends ESTestCase { } IndexReader reader = writer.getReader(); - // IndexReader wrapping is disabled because of LUCENE-6500. - // Add it back when we are on 5.3 - assert Version.LATEST == Version.LUCENE_5_2_1; - IndexSearcher searcher = newSearcher(reader, false); + IndexSearcher searcher = newSearcher(reader); reader = searcher.getIndexReader(); // reader might be wrapped searcher.setQueryCache(cache); searcher.setQueryCachingPolicy(policy); @@ -123,10 +119,7 @@ public class IndexCacheableQueryTests extends ESTestCase { writer.addDocument(new Document()); IndexReader reader2 = writer.getReader(); - // IndexReader wrapping is disabled because of LUCENE-6500. - // Add it back when we are on 5.3 - assert Version.LATEST == Version.LUCENE_5_2_1; - searcher = newSearcher(reader2, false); + searcher = newSearcher(reader2); reader2 = searcher.getIndexReader(); // reader might be wrapped searcher.setQueryCache(cache); searcher.setQueryCachingPolicy(policy); diff --git a/core/src/test/java/org/elasticsearch/common/util/CollectionUtilsTests.java b/core/src/test/java/org/elasticsearch/common/util/CollectionUtilsTests.java index d277e3a240c..b85c42c902a 100644 --- a/core/src/test/java/org/elasticsearch/common/util/CollectionUtilsTests.java +++ b/core/src/test/java/org/elasticsearch/common/util/CollectionUtilsTests.java @@ -19,7 +19,6 @@ package org.elasticsearch.common.util; -import com.google.common.collect.ImmutableList; import com.google.common.collect.Iterables; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRefArray; @@ -45,7 +44,7 @@ public class CollectionUtilsTests extends ESTestCase { @Test public void rotateEmpty() { - assertTrue(CollectionUtils.rotate(ImmutableList.of(), randomInt()).isEmpty()); + assertTrue(CollectionUtils.rotate(Collections.emptyList(), randomInt()).isEmpty()); } @Test diff --git a/core/src/test/java/org/elasticsearch/deps/lucene/SimpleLuceneTests.java b/core/src/test/java/org/elasticsearch/deps/lucene/SimpleLuceneTests.java index d410c86890a..77fd17eec80 100644 --- a/core/src/test/java/org/elasticsearch/deps/lucene/SimpleLuceneTests.java +++ b/core/src/test/java/org/elasticsearch/deps/lucene/SimpleLuceneTests.java @@ -214,7 +214,7 @@ public class SimpleLuceneTests extends ESTestCase { TermsEnum termsEnum = terms.iterator(); termsEnum.next(); - PostingsEnum termDocs = termsEnum.postings(atomicReader.getLiveDocs(), null); + PostingsEnum termDocs = termsEnum.postings(null); assertThat(termDocs.nextDoc(), equalTo(0)); assertThat(termDocs.docID(), equalTo(0)); assertThat(termDocs.freq(), equalTo(1)); @@ -222,7 +222,7 @@ public class SimpleLuceneTests extends ESTestCase { terms = atomicReader.terms("int2"); termsEnum = terms.iterator(); termsEnum.next(); - termDocs = termsEnum.postings(atomicReader.getLiveDocs(), termDocs); + termDocs = termsEnum.postings(termDocs); assertThat(termDocs.nextDoc(), equalTo(0)); assertThat(termDocs.docID(), equalTo(0)); assertThat(termDocs.freq(), equalTo(2)); diff --git a/core/src/test/java/org/elasticsearch/discovery/DiscoveryWithServiceDisruptionsIT.java b/core/src/test/java/org/elasticsearch/discovery/DiscoveryWithServiceDisruptionsIT.java index 34f4a860e39..8f00bf4073f 100644 --- a/core/src/test/java/org/elasticsearch/discovery/DiscoveryWithServiceDisruptionsIT.java +++ b/core/src/test/java/org/elasticsearch/discovery/DiscoveryWithServiceDisruptionsIT.java @@ -23,21 +23,29 @@ import com.google.common.base.Predicate; import org.apache.lucene.util.LuceneTestCase; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; -import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; +import org.elasticsearch.action.admin.cluster.reroute.ClusterRerouteResponse; +import org.elasticsearch.action.admin.indices.flush.FlushResponse; +import org.elasticsearch.action.admin.indices.refresh.RefreshResponse; +import org.elasticsearch.action.count.CountResponse; import org.elasticsearch.action.get.GetResponse; +import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.index.IndexResponse; import org.elasticsearch.client.Client; import org.elasticsearch.cluster.*; +import org.elasticsearch.cluster.action.shard.ShardStateAction; import org.elasticsearch.cluster.block.ClusterBlock; import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.routing.DjbHashFunction; +import org.elasticsearch.cluster.routing.RoutingNode; +import org.elasticsearch.cluster.routing.allocation.command.MoveAllocationCommand; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Priority; import org.elasticsearch.common.Strings; import org.elasticsearch.common.collect.Tuple; +import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.discovery.zen.ZenDiscovery; @@ -48,6 +56,10 @@ import org.elasticsearch.discovery.zen.ping.ZenPing; import org.elasticsearch.discovery.zen.ping.ZenPingService; import org.elasticsearch.discovery.zen.ping.unicast.UnicastZenPing; import org.elasticsearch.discovery.zen.publish.PublishClusterStateAction; +import org.elasticsearch.index.shard.IndexShard; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.indices.recovery.RecoverySource; +import org.elasticsearch.indices.store.IndicesStoreIntegrationIT; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.InternalTestCluster; @@ -55,7 +67,10 @@ import org.elasticsearch.test.discovery.ClusterDiscoveryConfiguration; import org.elasticsearch.test.disruption.*; import org.elasticsearch.test.junit.annotations.TestLogging; import org.elasticsearch.test.transport.MockTransportService; -import org.elasticsearch.transport.*; +import org.elasticsearch.transport.TransportException; +import org.elasticsearch.transport.TransportRequest; +import org.elasticsearch.transport.TransportRequestOptions; +import org.elasticsearch.transport.TransportService; import org.junit.Before; import org.junit.Test; @@ -259,8 +274,10 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase { NetworkPartition networkPartition = addRandomPartition(); - final String isolatedNode = networkPartition.getMinoritySide().get(0); - final String nonIsolatedNode = networkPartition.getMajoritySide().get(0); + assertEquals(1, networkPartition.getMinoritySide().size()); + final String isolatedNode = networkPartition.getMinoritySide().iterator().next(); + assertEquals(2, networkPartition.getMajoritySide().size()); + final String nonIsolatedNode = networkPartition.getMajoritySide().iterator().next(); // Simulate a network issue between the unlucky node and the rest of the cluster. networkPartition.startDisrupting(); @@ -337,7 +354,7 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase { NetworkPartition networkPartition = addRandomIsolation(isolatedNode); networkPartition.startDisrupting(); - String nonIsolatedNode = networkPartition.getMajoritySide().get(0); + String nonIsolatedNode = networkPartition.getMajoritySide().iterator().next(); // make sure cluster reforms ensureStableCluster(2, nonIsolatedNode); @@ -810,7 +827,9 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase { } - /** Test cluster join with issues in cluster state publishing * */ + /** + * Test cluster join with issues in cluster state publishing * + */ @Test public void testClusterJoinDespiteOfPublishingIssues() throws Exception { List nodes = startCluster(2, 1); @@ -917,6 +936,277 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase { ensureStableCluster(3); } + /* + * Tests a visibility issue if a shard is in POST_RECOVERY + * + * When a user indexes a document, then refreshes and then a executes a search and all are successful and no timeouts etc then + * the document must be visible for the search. + * + * When a primary is relocating from node_1 to node_2, there can be a short time where both old and new primary + * are started and accept indexing and read requests. However, the new primary might not be visible to nodes + * that lag behind one cluster state. If such a node then sends a refresh to the index, this refresh request + * must reach the new primary on node_2 too. Otherwise a different node that searches on the new primary might not + * find the indexed document although a refresh was executed before. + * + * In detail: + * Cluster state 0: + * node_1: [index][0] STARTED (ShardRoutingState) + * node_2: no shard + * + * 0. primary ([index][0]) relocates from node_1 to node_2 + * Cluster state 1: + * node_1: [index][0] RELOCATING (ShardRoutingState), (STARTED from IndexShardState perspective on node_1) + * node_2: [index][0] INITIALIZING (ShardRoutingState), (IndexShardState on node_2 is RECOVERING) + * + * 1. node_2 is done recovering, moves its shard to IndexShardState.POST_RECOVERY and sends a message to master that the shard is ShardRoutingState.STARTED + * Cluster state is still the same but the IndexShardState on node_2 has changed and it now accepts writes and reads: + * node_1: [index][0] RELOCATING (ShardRoutingState), (STARTED from IndexShardState perspective on node_1) + * node_2: [index][0] INITIALIZING (ShardRoutingState), (IndexShardState on node_2 is POST_RECOVERY) + * + * 2. any node receives an index request which is then executed on node_1 and node_2 + * + * 3. node_3 sends a refresh but it is a little behind with cluster state processing and still on cluster state 0. + * If refresh was a broadcast operation it send it to node_1 only because it does not know node_2 has a shard too + * + * 4. node_3 catches up with the cluster state and acks it to master which now can process the shard started message + * from node_2 before and updates cluster state to: + * Cluster state 2: + * node_1: [index][0] no shard + * node_2: [index][0] STARTED (ShardRoutingState), (IndexShardState on node_2 is still POST_RECOVERY) + * + * master sends this to all nodes. + * + * 5. node_4 and node_3 process cluster state 2, but node_1 and node_2 have not yet + * + * If now node_4 searches for document that was indexed before, it will search at node_2 because it is on + * cluster state 2. It should be able to retrieve it with a search because the refresh from before was + * successful. + */ + @Test + public void testReadOnPostRecoveryShards() throws Exception { + List clusterStateBlocks = new ArrayList<>(); + try { + configureUnicastCluster(5, null, 1); + // we could probably write a test without a dedicated master node but it is easier if we use one + Future masterNodeFuture = internalCluster().startMasterOnlyNodeAsync(); + // node_1 will have the shard in the beginning + Future node1Future = internalCluster().startDataOnlyNodeAsync(); + final String masterNode = masterNodeFuture.get(); + final String node_1 = node1Future.get(); + logger.info("--> creating index [test] with one shard and zero replica"); + assertAcked(prepareCreate("test").setSettings( + Settings.builder().put(indexSettings()) + .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0) + .put(IndexShard.INDEX_REFRESH_INTERVAL, -1)) + .addMapping("doc", jsonBuilder().startObject().startObject("doc") + .startObject("properties").startObject("text").field("type", "string").endObject().endObject() + .endObject().endObject()) + ); + ensureGreen("test"); + logger.info("--> starting three more data nodes"); + List nodeNamesFuture = internalCluster().startDataOnlyNodesAsync(3).get(); + final String node_2 = nodeNamesFuture.get(0); + final String node_3 = nodeNamesFuture.get(1); + final String node_4 = nodeNamesFuture.get(2); + logger.info("--> running cluster_health"); + ClusterHealthResponse clusterHealth = client().admin().cluster().prepareHealth() + .setWaitForNodes("5") + .setWaitForRelocatingShards(0) + .get(); + assertThat(clusterHealth.isTimedOut(), equalTo(false)); + + logger.info("--> move shard from node_1 to node_2, and wait for relocation to finish"); + + // block cluster state updates on node_3 so that it only sees the shard on node_1 + BlockClusterStateProcessing disruptionNode3 = new BlockClusterStateProcessing(node_3, getRandom()); + clusterStateBlocks.add(disruptionNode3); + internalCluster().setDisruptionScheme(disruptionNode3); + disruptionNode3.startDisrupting(); + // register a Tracer that notifies begin and end of a relocation + MockTransportService transportServiceNode2 = (MockTransportService) internalCluster().getInstance(TransportService.class, node_2); + CountDownLatch beginRelocationLatchNode2 = new CountDownLatch(1); + CountDownLatch endRelocationLatchNode2 = new CountDownLatch(1); + transportServiceNode2.addTracer(new StartRecoveryToShardStaredTracer(logger, beginRelocationLatchNode2, endRelocationLatchNode2)); + + // block cluster state updates on node_1 and node_2 so that we end up with two primaries + BlockClusterStateProcessing disruptionNode2 = new BlockClusterStateProcessing(node_2, getRandom()); + clusterStateBlocks.add(disruptionNode2); + disruptionNode2.applyToCluster(internalCluster()); + BlockClusterStateProcessing disruptionNode1 = new BlockClusterStateProcessing(node_1, getRandom()); + clusterStateBlocks.add(disruptionNode1); + disruptionNode1.applyToCluster(internalCluster()); + + logger.info("--> move shard from node_1 to node_2"); + // don't block on the relocation. cluster state updates are blocked on node_3 and the relocation would timeout + Future rerouteFuture = internalCluster().client().admin().cluster().prepareReroute().add(new MoveAllocationCommand(new ShardId("test", 0), node_1, node_2)).setTimeout(new TimeValue(1000, TimeUnit.MILLISECONDS)).execute(); + + logger.info("--> wait for relocation to start"); + // wait for relocation to start + beginRelocationLatchNode2.await(); + // start to block cluster state updates on node_1 and node_2 so that we end up with two primaries + // one STARTED on node_1 and one in POST_RECOVERY on node_2 + disruptionNode1.startDisrupting(); + disruptionNode2.startDisrupting(); + endRelocationLatchNode2.await(); + final Client node3Client = internalCluster().client(node_3); + final Client node2Client = internalCluster().client(node_2); + final Client node1Client = internalCluster().client(node_1); + final Client node4Client = internalCluster().client(node_4); + logger.info("--> index doc"); + logLocalClusterStates(node1Client, node2Client, node3Client, node4Client); + assertTrue(node3Client.prepareIndex("test", "doc").setSource("{\"text\":\"a\"}").get().isCreated()); + //sometimes refresh and sometimes flush + int refreshOrFlushType = randomIntBetween(1, 2); + switch (refreshOrFlushType) { + case 1: { + logger.info("--> refresh from node_3"); + RefreshResponse refreshResponse = node3Client.admin().indices().prepareRefresh().get(); + assertThat(refreshResponse.getFailedShards(), equalTo(0)); + // the total shards is num replicas + 1 so that can be lower here because one shard + // is relocating and counts twice as successful + assertThat(refreshResponse.getTotalShards(), equalTo(2)); + assertThat(refreshResponse.getSuccessfulShards(), equalTo(2)); + break; + } + case 2: { + logger.info("--> flush from node_3"); + FlushResponse flushResponse = node3Client.admin().indices().prepareFlush().get(); + assertThat(flushResponse.getFailedShards(), equalTo(0)); + // the total shards is num replicas + 1 so that can be lower here because one shard + // is relocating and counts twice as successful + assertThat(flushResponse.getTotalShards(), equalTo(2)); + assertThat(flushResponse.getSuccessfulShards(), equalTo(2)); + break; + } + default: + fail("this is test bug, number should be between 1 and 2"); + } + // now stop disrupting so that node_3 can ack last cluster state to master and master can continue + // to publish the next cluster state + logger.info("--> stop disrupting node_3"); + disruptionNode3.stopDisrupting(); + rerouteFuture.get(); + logger.info("--> wait for node_4 to get new cluster state"); + // wait until node_4 actually has the new cluster state in which node_1 has no shard + assertBusy(new Runnable() { + @Override + public void run() { + ClusterState clusterState = node4Client.admin().cluster().prepareState().setLocal(true).get().getState(); + // get the node id from the name. TODO: Is there a better way to do this? + String nodeId = null; + for (RoutingNode node : clusterState.getRoutingNodes()) { + if (node.node().name().equals(node_1)) { + nodeId = node.nodeId(); + } + } + assertNotNull(nodeId); + // check that node_1 does not have the shard in local cluster state + assertFalse(clusterState.getRoutingNodes().routingNodeIter(nodeId).hasNext()); + } + }); + + logger.info("--> run count from node_4"); + logLocalClusterStates(node1Client, node2Client, node3Client, node4Client); + CountResponse countResponse = node4Client.prepareCount("test").setPreference("local").get(); + assertThat(countResponse.getCount(), equalTo(1l)); + logger.info("--> stop disrupting node_1 and node_2"); + disruptionNode2.stopDisrupting(); + disruptionNode1.stopDisrupting(); + // wait for relocation to finish + logger.info("--> wait for relocation to finish"); + clusterHealth = client().admin().cluster().prepareHealth() + .setWaitForRelocatingShards(0) + .get(); + assertThat(clusterHealth.isTimedOut(), equalTo(false)); + } catch (AssertionError e) { + for (BlockClusterStateProcessing blockClusterStateProcessing : clusterStateBlocks) { + blockClusterStateProcessing.stopDisrupting(); + } + throw e; + } + } + + /** + * This Tracer can be used to signal start of a recovery and shard started event after translog was copied + */ + public static class StartRecoveryToShardStaredTracer extends MockTransportService.Tracer { + private final ESLogger logger; + private final CountDownLatch beginRelocationLatch; + private final CountDownLatch sentShardStartedLatch; + + public StartRecoveryToShardStaredTracer(ESLogger logger, CountDownLatch beginRelocationLatch, CountDownLatch sentShardStartedLatch) { + this.logger = logger; + this.beginRelocationLatch = beginRelocationLatch; + this.sentShardStartedLatch = sentShardStartedLatch; + } + + @Override + public void requestSent(DiscoveryNode node, long requestId, String action, TransportRequestOptions options) { + if (action.equals(RecoverySource.Actions.START_RECOVERY)) { + logger.info("sent: {}, relocation starts", action); + beginRelocationLatch.countDown(); + } + if (action.equals(ShardStateAction.SHARD_STARTED_ACTION_NAME)) { + logger.info("sent: {}, shard started", action); + sentShardStartedLatch.countDown(); + } + } + } + + private void logLocalClusterStates(Client... clients) { + int counter = 1; + for (Client client : clients) { + ClusterState clusterState = client.admin().cluster().prepareState().setLocal(true).get().getState(); + logger.info("--> cluster state on node_{} {}", counter, clusterState.prettyPrint()); + counter++; + } + } + + /** + * This test creates a scenario where a primary shard (0 replicas) relocates and is in POST_RECOVERY on the target + * node but already deleted on the source node. Search request should still work. + */ + @Test + public void searchWithRelocationAndSlowClusterStateProcessing() throws Exception { + configureUnicastCluster(3, null, 1); + Future masterNodeFuture = internalCluster().startMasterOnlyNodeAsync(); + Future node_1Future = internalCluster().startDataOnlyNodeAsync(); + + final String node_1 = node_1Future.get(); + final String masterNode = masterNodeFuture.get(); + logger.info("--> creating index [test] with one shard and on replica"); + assertAcked(prepareCreate("test").setSettings( + Settings.builder().put(indexSettings()) + .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0)) + ); + ensureGreen("test"); + + Future node_2Future = internalCluster().startDataOnlyNodeAsync(); + final String node_2 = node_2Future.get(); + List indexRequestBuilderList = new ArrayList<>(); + for (int i = 0; i < 100; i++) { + indexRequestBuilderList.add(client().prepareIndex().setIndex("test").setType("doc").setSource("{\"int_field\":1}")); + } + indexRandom(true, indexRequestBuilderList); + SingleNodeDisruption disruption = new BlockClusterStateProcessing(node_2, getRandom()); + + internalCluster().setDisruptionScheme(disruption); + MockTransportService transportServiceNode2 = (MockTransportService) internalCluster().getInstance(TransportService.class, node_2); + CountDownLatch beginRelocationLatch = new CountDownLatch(1); + CountDownLatch endRelocationLatch = new CountDownLatch(1); + transportServiceNode2.addTracer(new IndicesStoreIntegrationIT.ReclocationStartEndTracer(logger, beginRelocationLatch, endRelocationLatch)); + internalCluster().client().admin().cluster().prepareReroute().add(new MoveAllocationCommand(new ShardId("test", 0), node_1, node_2)).get(); + // wait for relocation to start + beginRelocationLatch.await(); + disruption.startDisrupting(); + // wait for relocation to finish + endRelocationLatch.await(); + // now search for the documents and see if we get a reply + assertThat(client().prepareCount().get().getCount(), equalTo(100l)); + } + @Test public void testIndexImportedFromDataOnlyNodesIfMasterLostDataFolder() throws Exception { // test for https://github.com/elastic/elasticsearch/issues/8823 @@ -930,6 +1220,7 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase { ensureGreen(); internalCluster().restartNode(masterNode, new InternalTestCluster.RestartCallback() { + @Override public boolean clearData(String nodeName) { return true; } diff --git a/core/src/test/java/org/elasticsearch/gateway/PriorityComparatorTests.java b/core/src/test/java/org/elasticsearch/gateway/PriorityComparatorTests.java index a846bc2cc9b..88499bf96cd 100644 --- a/core/src/test/java/org/elasticsearch/gateway/PriorityComparatorTests.java +++ b/core/src/test/java/org/elasticsearch/gateway/PriorityComparatorTests.java @@ -23,12 +23,70 @@ import org.elasticsearch.cluster.routing.*; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.test.ESTestCase; -import java.util.HashMap; -import java.util.Locale; -import java.util.Map; +import java.util.*; public class PriorityComparatorTests extends ESTestCase { + public void testPreferNewIndices() { + RoutingNodes.UnassignedShards shards = new RoutingNodes.UnassignedShards((RoutingNodes) null); + List shardRoutings = Arrays.asList(TestShardRouting.newShardRouting("oldest", 0, null, null, null, + randomBoolean(), ShardRoutingState.UNASSIGNED, 0, new UnassignedInfo(randomFrom(UnassignedInfo.Reason.values()), "foobar")), TestShardRouting.newShardRouting("newest", 0, null, null, null, + randomBoolean(), ShardRoutingState.UNASSIGNED, 0, new UnassignedInfo(randomFrom(UnassignedInfo.Reason.values()), "foobar"))); + Collections.shuffle(shardRoutings, random()); + for (ShardRouting routing : shardRoutings) { + shards.add(routing); + } + shards.sort(new PriorityComparator() { + @Override + protected Settings getIndexSettings(String index) { + if ("oldest".equals(index)) { + return Settings.builder().put(IndexMetaData.SETTING_CREATION_DATE, 10) + .put(IndexMetaData.SETTING_PRIORITY, 1).build(); + } else if ("newest".equals(index)) { + return Settings.builder().put(IndexMetaData.SETTING_CREATION_DATE, 100) + .put(IndexMetaData.SETTING_PRIORITY, 1).build(); + } + return Settings.EMPTY; + } + }); + RoutingNodes.UnassignedShards.UnassignedIterator iterator = shards.iterator(); + ShardRouting next = iterator.next(); + assertEquals("newest", next.index()); + next = iterator.next(); + assertEquals("oldest", next.index()); + assertFalse(iterator.hasNext()); + } + + public void testPreferPriorityIndices() { + RoutingNodes.UnassignedShards shards = new RoutingNodes.UnassignedShards((RoutingNodes) null); + List shardRoutings = Arrays.asList(TestShardRouting.newShardRouting("oldest", 0, null, null, null, + randomBoolean(), ShardRoutingState.UNASSIGNED, 0, new UnassignedInfo(randomFrom(UnassignedInfo.Reason.values()), "foobar")), TestShardRouting.newShardRouting("newest", 0, null, null, null, + randomBoolean(), ShardRoutingState.UNASSIGNED, 0, new UnassignedInfo(randomFrom(UnassignedInfo.Reason.values()), "foobar"))); + Collections.shuffle(shardRoutings, random()); + for (ShardRouting routing : shardRoutings) { + shards.add(routing); + } + shards.sort(new PriorityComparator() { + @Override + protected Settings getIndexSettings(String index) { + if ("oldest".equals(index)) { + return Settings.builder().put(IndexMetaData.SETTING_CREATION_DATE, 10) + .put(IndexMetaData.SETTING_PRIORITY, 100).build(); + } else if ("newest".equals(index)) { + return Settings.builder().put(IndexMetaData.SETTING_CREATION_DATE, 100) + .put(IndexMetaData.SETTING_PRIORITY, 1).build(); + } + return Settings.EMPTY; + } + }); + RoutingNodes.UnassignedShards.UnassignedIterator iterator = shards.iterator(); + ShardRouting next = iterator.next(); + assertEquals("oldest", next.index()); + next = iterator.next(); + assertEquals("newest", next.index()); + assertFalse(iterator.hasNext()); + } + public void testPriorityComparatorSort() { RoutingNodes.UnassignedShards shards = new RoutingNodes.UnassignedShards((RoutingNodes) null); int numIndices = randomIntBetween(3, 99); diff --git a/core/src/test/java/org/elasticsearch/index/codec/CodecTests.java b/core/src/test/java/org/elasticsearch/index/codec/CodecTests.java index fc967189482..e45f1c469b0 100644 --- a/core/src/test/java/org/elasticsearch/index/codec/CodecTests.java +++ b/core/src/test/java/org/elasticsearch/index/codec/CodecTests.java @@ -30,6 +30,7 @@ import org.apache.lucene.codecs.lucene49.Lucene49Codec; import org.apache.lucene.codecs.lucene50.Lucene50Codec; import org.apache.lucene.codecs.lucene50.Lucene50StoredFieldsFormat; import org.apache.lucene.codecs.lucene50.Lucene50StoredFieldsFormat.Mode; +import org.apache.lucene.codecs.lucene53.Lucene53Codec; import org.apache.lucene.document.Document; import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexWriter; @@ -51,7 +52,8 @@ public class CodecTests extends ESSingleNodeTestCase { public void testResolveDefaultCodecs() throws Exception { CodecService codecService = createCodecService(); assertThat(codecService.codec("default"), instanceOf(PerFieldMappingPostingFormatCodec.class)); - assertThat(codecService.codec("default"), instanceOf(Lucene50Codec.class)); + assertThat(codecService.codec("default"), instanceOf(Lucene53Codec.class)); + assertThat(codecService.codec("Lucene50"), instanceOf(Lucene50Codec.class)); assertThat(codecService.codec("Lucene410"), instanceOf(Lucene410Codec.class)); assertThat(codecService.codec("Lucene49"), instanceOf(Lucene49Codec.class)); assertThat(codecService.codec("Lucene46"), instanceOf(Lucene46Codec.class)); diff --git a/core/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java b/core/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java index f3d45a8061c..deebc4511c0 100644 --- a/core/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java +++ b/core/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java @@ -116,7 +116,6 @@ public class InternalEngineTests extends ESTestCase { protected InternalEngine replicaEngine; private Settings defaultSettings; - private int indexConcurrency; private String codecName; private Path primaryTranslogDir; private Path replicaTranslogDir; @@ -127,7 +126,6 @@ public class InternalEngineTests extends ESTestCase { super.setUp(); CodecService codecService = new CodecService(shardId.index()); - indexConcurrency = randomIntBetween(1, 20); String name = Codec.getDefault().getName(); if (Arrays.asList(codecService.availableCodecs()).contains(name)) { // some codecs are read only so we only take the ones that we have in the service and randomly @@ -140,7 +138,6 @@ public class InternalEngineTests extends ESTestCase { .put(EngineConfig.INDEX_COMPOUND_ON_FLUSH, randomBoolean()) .put(EngineConfig.INDEX_GC_DELETES_SETTING, "1h") // make sure this doesn't kick in on us .put(EngineConfig.INDEX_CODEC_SETTING, codecName) - .put(EngineConfig.INDEX_CONCURRENCY_SETTING, indexConcurrency) .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) .build(); // TODO randomize more settings threadPool = new ThreadPool(getClass().getName()); @@ -1507,8 +1504,6 @@ public class InternalEngineTests extends ESTestCase { assertEquals(engine.config().getCodec().getName(), codecService.codec(codecName).getName()); assertEquals(currentIndexWriterConfig.getCodec().getName(), codecService.codec(codecName).getName()); - assertEquals(engine.config().getIndexConcurrency(), indexConcurrency); - assertEquals(currentIndexWriterConfig.getMaxThreadStates(), indexConcurrency); } @Test diff --git a/core/src/test/java/org/elasticsearch/index/engine/ShadowEngineTests.java b/core/src/test/java/org/elasticsearch/index/engine/ShadowEngineTests.java index 7b45a3b90cd..5d431c5d9e6 100644 --- a/core/src/test/java/org/elasticsearch/index/engine/ShadowEngineTests.java +++ b/core/src/test/java/org/elasticsearch/index/engine/ShadowEngineTests.java @@ -91,7 +91,6 @@ public class ShadowEngineTests extends ESTestCase { protected Engine replicaEngine; private Settings defaultSettings; - private int indexConcurrency; private String codecName; private Path dirPath; @@ -100,7 +99,6 @@ public class ShadowEngineTests extends ESTestCase { public void setUp() throws Exception { super.setUp(); CodecService codecService = new CodecService(shardId.index()); - indexConcurrency = randomIntBetween(1, 20); String name = Codec.getDefault().getName(); if (Arrays.asList(codecService.availableCodecs()).contains(name)) { // some codecs are read only so we only take the ones that we have in the service and randomly @@ -113,7 +111,6 @@ public class ShadowEngineTests extends ESTestCase { .put(EngineConfig.INDEX_COMPOUND_ON_FLUSH, randomBoolean()) .put(EngineConfig.INDEX_GC_DELETES_SETTING, "1h") // make sure this doesn't kick in on us .put(EngineConfig.INDEX_CODEC_SETTING, codecName) - .put(EngineConfig.INDEX_CONCURRENCY_SETTING, indexConcurrency) .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) .build(); // TODO randomize more settings threadPool = new ThreadPool(getClass().getName()); @@ -921,7 +918,6 @@ public class ShadowEngineTests extends ESTestCase { public void testSettings() { CodecService codecService = new CodecService(shardId.index()); assertEquals(replicaEngine.config().getCodec().getName(), codecService.codec(codecName).getName()); - assertEquals(replicaEngine.config().getIndexConcurrency(), indexConcurrency); } @Test diff --git a/core/src/test/java/org/elasticsearch/index/fielddata/plain/ParentChildFilteredTermsEnumTests.java b/core/src/test/java/org/elasticsearch/index/fielddata/plain/ParentChildFilteredTermsEnumTests.java index 7a1aad21824..488aca2a34e 100644 --- a/core/src/test/java/org/elasticsearch/index/fielddata/plain/ParentChildFilteredTermsEnumTests.java +++ b/core/src/test/java/org/elasticsearch/index/fielddata/plain/ParentChildFilteredTermsEnumTests.java @@ -59,7 +59,7 @@ public class ParentChildFilteredTermsEnumTests extends ESTestCase { for (BytesRef term = termsEnum.next(); term != null; term = termsEnum.next()) { ++expected; assertThat(term.utf8ToString(), equalTo(format(expected))); - PostingsEnum docsEnum = termsEnum.postings(null, null); + PostingsEnum docsEnum = termsEnum.postings(null); assertThat(docsEnum, notNullValue()); int docId = docsEnum.nextDoc(); assertThat(docId, not(equalTo(-1))); @@ -98,7 +98,7 @@ public class ParentChildFilteredTermsEnumTests extends ESTestCase { for (BytesRef term = termsEnum.next(); term != null; term = termsEnum.next()) { ++expected; assertThat(term.utf8ToString(), equalTo(format(expected))); - PostingsEnum docsEnum = termsEnum.postings(null, null); + PostingsEnum docsEnum = termsEnum.postings(null); assertThat(docsEnum, notNullValue()); int numDocs = 0; for (int docId = docsEnum.nextDoc(); docId != DocIdSetIterator.NO_MORE_DOCS; docId = docsEnum.nextDoc()) { diff --git a/core/src/test/java/org/elasticsearch/index/mapper/FieldTypeLookupTests.java b/core/src/test/java/org/elasticsearch/index/mapper/FieldTypeLookupTests.java index 5ea01ee244e..41a44d4a88c 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/FieldTypeLookupTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/FieldTypeLookupTests.java @@ -182,14 +182,14 @@ public class FieldTypeLookupTests extends ESTestCase { lookup.checkCompatibility(newList(f3), false); fail("expected conflict"); } catch (IllegalArgumentException e) { - assertTrue(e.getMessage().contains("has different store values")); + assertTrue(e.getMessage().contains("has different [store] values")); } // even with updateAllTypes == true, incompatible try { lookup.checkCompatibility(newList(f3), true); fail("expected conflict"); } catch (IllegalArgumentException e) { - assertTrue(e.getMessage().contains("has different store values")); + assertTrue(e.getMessage().contains("has different [store] values")); } } diff --git a/core/src/test/java/org/elasticsearch/index/mapper/FieldTypeTestCase.java b/core/src/test/java/org/elasticsearch/index/mapper/FieldTypeTestCase.java index a27a4344c56..a45348d530c 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/FieldTypeTestCase.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/FieldTypeTestCase.java @@ -18,57 +18,197 @@ */ package org.elasticsearch.index.mapper; -import org.elasticsearch.common.lucene.Lucene; +import org.apache.lucene.analysis.standard.StandardAnalyzer; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.analysis.NamedAnalyzer; import org.elasticsearch.index.fielddata.FieldDataType; import org.elasticsearch.index.similarity.BM25SimilarityProvider; import org.elasticsearch.test.ESTestCase; import java.util.ArrayList; +import java.util.Arrays; import java.util.List; /** Base test case for subclasses of MappedFieldType */ public abstract class FieldTypeTestCase extends ESTestCase { + /** Abstraction for mutating a property of a MappedFieldType */ + public static abstract class Modifier { + /** The name of the property that is being modified. Used in test failure messages. */ + public final String property; + /** true if this modifier only makes types incompatible in strict mode, false otherwise */ + public final boolean strictOnly; + /** true if reversing the order of checkCompatibility arguments should result in the same conflicts, false otherwise **/ + public final boolean symmetric; + + public Modifier(String property, boolean strictOnly, boolean symmetric) { + this.property = property; + this.strictOnly = strictOnly; + this.symmetric = symmetric; + } + + /** Modifies the property */ + public abstract void modify(MappedFieldType ft); + /** + * Optional method to implement that allows the field type that will be compared to be modified, + * so that it does not have the default value for the property being modified. + */ + public void normalizeOther(MappedFieldType other) {} + } + + private final List modifiers = new ArrayList<>(Arrays.asList( + new Modifier("boost", true, true) { + @Override + public void modify(MappedFieldType ft) { + ft.setBoost(1.1f); + } + }, + new Modifier("doc_values", false, false) { + @Override + public void modify(MappedFieldType ft) { + ft.setHasDocValues(ft.hasDocValues() == false); + } + }, + new Modifier("analyzer", false, true) { + @Override + public void modify(MappedFieldType ft) { + ft.setIndexAnalyzer(new NamedAnalyzer("bar", new StandardAnalyzer())); + } + }, + new Modifier("analyzer", false, true) { + @Override + public void modify(MappedFieldType ft) { + ft.setIndexAnalyzer(new NamedAnalyzer("bar", new StandardAnalyzer())); + } + @Override + public void normalizeOther(MappedFieldType other) { + other.setIndexAnalyzer(new NamedAnalyzer("foo", new StandardAnalyzer())); + } + }, + new Modifier("search_analyzer", true, true) { + @Override + public void modify(MappedFieldType ft) { + ft.setSearchAnalyzer(new NamedAnalyzer("bar", new StandardAnalyzer())); + } + }, + new Modifier("search_analyzer", true, true) { + @Override + public void modify(MappedFieldType ft) { + ft.setSearchAnalyzer(new NamedAnalyzer("bar", new StandardAnalyzer())); + } + @Override + public void normalizeOther(MappedFieldType other) { + other.setSearchAnalyzer(new NamedAnalyzer("foo", new StandardAnalyzer())); + } + }, + new Modifier("search_quote_analyzer", true, true) { + @Override + public void modify(MappedFieldType ft) { + ft.setSearchQuoteAnalyzer(new NamedAnalyzer("bar", new StandardAnalyzer())); + } + }, + new Modifier("search_quote_analyzer", true, true) { + @Override + public void modify(MappedFieldType ft) { + ft.setSearchQuoteAnalyzer(new NamedAnalyzer("bar", new StandardAnalyzer())); + } + @Override + public void normalizeOther(MappedFieldType other) { + other.setSearchQuoteAnalyzer(new NamedAnalyzer("foo", new StandardAnalyzer())); + } + }, + new Modifier("similarity", false, true) { + @Override + public void modify(MappedFieldType ft) { + ft.setSimilarity(new BM25SimilarityProvider("foo", Settings.EMPTY)); + } + }, + new Modifier("similarity", false, true) { + @Override + public void modify(MappedFieldType ft) { + ft.setSimilarity(new BM25SimilarityProvider("foo", Settings.EMPTY)); + } + @Override + public void normalizeOther(MappedFieldType other) { + other.setSimilarity(new BM25SimilarityProvider("bar", Settings.EMPTY)); + } + }, + new Modifier("norms.loading", true, true) { + @Override + public void modify(MappedFieldType ft) { + ft.setNormsLoading(MappedFieldType.Loading.LAZY); + } + }, + new Modifier("fielddata", true, true) { + @Override + public void modify(MappedFieldType ft) { + ft.setFieldDataType(new FieldDataType("foo", Settings.builder().put("loading", "eager").build())); + } + }, + new Modifier("null_value", true, true) { + @Override + public void modify(MappedFieldType ft) { + ft.setNullValue(dummyNullValue); + } + } + )); + + /** + * Add a mutation that will be tested for all expected semantics of equality and compatibility. + * These should be added in an @Before method. + */ + protected void addModifier(Modifier modifier) { + modifiers.add(modifier); + } + + private Object dummyNullValue = "dummyvalue"; + + /** Sets the null value used by the modifier for null value testing. This should be set in an @Before method. */ + protected void setDummyNullValue(Object value) { + dummyNullValue = value; + } + /** Create a default constructed fieldtype */ protected abstract MappedFieldType createDefaultFieldType(); - MappedFieldType createNamedDefaultFieldType(String name) { + MappedFieldType createNamedDefaultFieldType() { MappedFieldType fieldType = createDefaultFieldType(); - fieldType.setNames(new MappedFieldType.Names(name)); + fieldType.setNames(new MappedFieldType.Names("foo")); return fieldType; } - /** A dummy null value to use when modifying null value */ - protected Object dummyNullValue() { - return "dummyvalue"; - } - - /** Returns the number of properties that can be modified for the fieldtype */ - protected int numProperties() { - return 10; - } - - /** Modifies a property, identified by propNum, on the given fieldtype */ - protected void modifyProperty(MappedFieldType ft, int propNum) { - switch (propNum) { - case 0: ft.setNames(new MappedFieldType.Names("dummy")); break; - case 1: ft.setBoost(1.1f); break; - case 2: ft.setHasDocValues(!ft.hasDocValues()); break; - case 3: ft.setIndexAnalyzer(Lucene.STANDARD_ANALYZER); break; - case 4: ft.setSearchAnalyzer(Lucene.STANDARD_ANALYZER); break; - case 5: ft.setSearchQuoteAnalyzer(Lucene.STANDARD_ANALYZER); break; - case 6: ft.setSimilarity(new BM25SimilarityProvider("foo", Settings.EMPTY)); break; - case 7: ft.setNormsLoading(MappedFieldType.Loading.LAZY); break; - case 8: ft.setFieldDataType(new FieldDataType("foo", Settings.builder().put("loading", "eager").build())); break; - case 9: ft.setNullValue(dummyNullValue()); break; - default: fail("unknown fieldtype property number " + propNum); + // TODO: remove this once toString is no longer final on FieldType... + protected void assertFieldTypeEquals(String property, MappedFieldType ft1, MappedFieldType ft2) { + if (ft1.equals(ft2) == false) { + fail("Expected equality, testing property " + property + "\nexpected: " + toString(ft1) + "; \nactual: " + toString(ft2) + "\n"); } } - // TODO: remove this once toString is no longer final on FieldType... - protected void assertEquals(int i, MappedFieldType ft1, MappedFieldType ft2) { - assertEquals("prop " + i + "\nexpected: " + toString(ft1) + "; \nactual: " + toString(ft2), ft1, ft2); + protected void assertFieldTypeNotEquals(String property, MappedFieldType ft1, MappedFieldType ft2) { + if (ft1.equals(ft2)) { + fail("Expected inequality, testing property " + property + "\nfirst: " + toString(ft1) + "; \nsecond: " + toString(ft2) + "\n"); + } + } + + protected void assertCompatible(String msg, MappedFieldType ft1, MappedFieldType ft2, boolean strict) { + List conflicts = new ArrayList<>(); + ft1.checkCompatibility(ft2, conflicts, strict); + assertTrue("Found conflicts for " + msg + ": " + conflicts, conflicts.isEmpty()); + } + + protected void assertNotCompatible(String msg, MappedFieldType ft1, MappedFieldType ft2, boolean strict, String... messages) { + assert messages.length != 0; + List conflicts = new ArrayList<>(); + ft1.checkCompatibility(ft2, conflicts, strict); + for (String message : messages) { + boolean found = false; + for (String conflict : conflicts) { + if (conflict.contains(message)) { + found = true; + } + } + assertTrue("Missing conflict for " + msg + ": [" + message + "] in conflicts " + conflicts, found); + } } protected String toString(MappedFieldType ft) { @@ -88,45 +228,50 @@ public abstract class FieldTypeTestCase extends ESTestCase { } public void testClone() { - MappedFieldType fieldType = createNamedDefaultFieldType("foo"); + MappedFieldType fieldType = createNamedDefaultFieldType(); MappedFieldType clone = fieldType.clone(); assertNotSame(clone, fieldType); assertEquals(clone.getClass(), fieldType.getClass()); assertEquals(clone, fieldType); assertEquals(clone, clone.clone()); // transitivity - for (int i = 0; i < numProperties(); ++i) { - fieldType = createNamedDefaultFieldType("foo"); - modifyProperty(fieldType, i); + for (Modifier modifier : modifiers) { + fieldType = createNamedDefaultFieldType(); + modifier.modify(fieldType); clone = fieldType.clone(); assertNotSame(clone, fieldType); - assertEquals(i, clone, fieldType); + assertFieldTypeEquals(modifier.property, clone, fieldType); } } public void testEquals() { - MappedFieldType ft1 = createNamedDefaultFieldType("foo"); - MappedFieldType ft2 = createNamedDefaultFieldType("foo"); + MappedFieldType ft1 = createNamedDefaultFieldType(); + MappedFieldType ft2 = createNamedDefaultFieldType(); assertEquals(ft1, ft1); // reflexive assertEquals(ft1, ft2); // symmetric assertEquals(ft2, ft1); assertEquals(ft1.hashCode(), ft2.hashCode()); - for (int i = 0; i < numProperties(); ++i) { - ft2 = createNamedDefaultFieldType("foo"); - modifyProperty(ft2, i); - assertNotEquals(ft1, ft2); - assertNotEquals(ft1.hashCode(), ft2.hashCode()); + for (Modifier modifier : modifiers) { + ft1 = createNamedDefaultFieldType(); + ft2 = createNamedDefaultFieldType(); + modifier.modify(ft2); + assertFieldTypeNotEquals(modifier.property, ft1, ft2); + assertNotEquals("hash code for modified property " + modifier.property, ft1.hashCode(), ft2.hashCode()); + // modify the same property and they are equal again + modifier.modify(ft1); + assertFieldTypeEquals(modifier.property, ft1, ft2); + assertEquals("hash code for modified property " + modifier.property, ft1.hashCode(), ft2.hashCode()); } } public void testFreeze() { - for (int i = 0; i < numProperties(); ++i) { - MappedFieldType fieldType = createNamedDefaultFieldType("foo"); + for (Modifier modifier : modifiers) { + MappedFieldType fieldType = createNamedDefaultFieldType(); fieldType.freeze(); try { - modifyProperty(fieldType, i); - fail("expected already frozen exception for property " + i); + modifier.modify(fieldType); + fail("expected already frozen exception for property " + modifier.property); } catch (IllegalStateException e) { assertTrue(e.getMessage().contains("already frozen")); } @@ -134,7 +279,7 @@ public abstract class FieldTypeTestCase extends ESTestCase { } public void testCheckTypeName() { - final MappedFieldType fieldType = createNamedDefaultFieldType("foo"); + final MappedFieldType fieldType = createNamedDefaultFieldType(); List conflicts = new ArrayList<>(); fieldType.checkTypeName(fieldType, conflicts); assertTrue(conflicts.toString(), conflicts.isEmpty()); @@ -164,4 +309,46 @@ public abstract class FieldTypeTestCase extends ESTestCase { assertTrue(conflicts.get(0).contains("cannot be changed from type")); assertEquals(1, conflicts.size()); } + + public void testCheckCompatibility() { + MappedFieldType ft1 = createNamedDefaultFieldType(); + MappedFieldType ft2 = createNamedDefaultFieldType(); + assertCompatible("default", ft1, ft2, true); + assertCompatible("default", ft1, ft2, false); + assertCompatible("default", ft2, ft1, true); + assertCompatible("default", ft2, ft1, false); + + for (Modifier modifier : modifiers) { + ft1 = createNamedDefaultFieldType(); + ft2 = createNamedDefaultFieldType(); + modifier.normalizeOther(ft1); + modifier.modify(ft2); + if (modifier.strictOnly) { + String[] conflicts = { + "mapper [foo] is used by multiple types", + "update [" + modifier.property + "]" + }; + assertCompatible(modifier.property, ft1, ft2, false); + assertNotCompatible(modifier.property, ft1, ft2, true, conflicts); + assertCompatible(modifier.property, ft2, ft1, false); // always symmetric when not strict + if (modifier.symmetric) { + assertNotCompatible(modifier.property, ft2, ft1, true, conflicts); + } else { + assertCompatible(modifier.property, ft2, ft1, true); + } + } else { + // not compatible whether strict or not + String conflict = "different [" + modifier.property + "]"; + assertNotCompatible(modifier.property, ft1, ft2, true, conflict); + assertNotCompatible(modifier.property, ft1, ft2, false, conflict); + if (modifier.symmetric) { + assertNotCompatible(modifier.property, ft2, ft1, true, conflict); + assertNotCompatible(modifier.property, ft2, ft1, false, conflict); + } else { + assertCompatible(modifier.property, ft2, ft1, true); + assertCompatible(modifier.property, ft2, ft1, false); + } + } + } + } } diff --git a/core/src/test/java/org/elasticsearch/index/mapper/core/BinaryFieldTypeTests.java b/core/src/test/java/org/elasticsearch/index/mapper/core/BinaryFieldTypeTests.java index 47403c8b1df..f241d555e12 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/core/BinaryFieldTypeTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/core/BinaryFieldTypeTests.java @@ -20,6 +20,7 @@ package org.elasticsearch.index.mapper.core; import org.elasticsearch.index.mapper.FieldTypeTestCase; import org.elasticsearch.index.mapper.MappedFieldType; +import org.junit.Before; public class BinaryFieldTypeTests extends FieldTypeTestCase { @@ -28,17 +29,14 @@ public class BinaryFieldTypeTests extends FieldTypeTestCase { return new BinaryFieldMapper.BinaryFieldType(); } - @Override - protected int numProperties() { - return 1 + super.numProperties(); - } - - @Override - protected void modifyProperty(MappedFieldType ft, int propNum) { - BinaryFieldMapper.BinaryFieldType bft = (BinaryFieldMapper.BinaryFieldType)ft; - switch (propNum) { - case 0: bft.setTryUncompressing(!bft.tryUncompressing()); break; - default: super.modifyProperty(ft, propNum - 1); - } + @Before + public void setupProperties() { + addModifier(new Modifier("try_uncompressing", false, true) { + @Override + public void modify(MappedFieldType ft) { + BinaryFieldMapper.BinaryFieldType bft = (BinaryFieldMapper.BinaryFieldType)ft; + bft.setTryUncompressing(!bft.tryUncompressing()); + } + }); } } diff --git a/core/src/test/java/org/elasticsearch/index/mapper/core/BooleanFieldTypeTests.java b/core/src/test/java/org/elasticsearch/index/mapper/core/BooleanFieldTypeTests.java index 3485a383cd6..0800e4ae3d9 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/core/BooleanFieldTypeTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/core/BooleanFieldTypeTests.java @@ -20,6 +20,7 @@ package org.elasticsearch.index.mapper.core; import org.elasticsearch.index.mapper.FieldTypeTestCase; import org.elasticsearch.index.mapper.MappedFieldType; +import org.junit.Before; public class BooleanFieldTypeTests extends FieldTypeTestCase { @Override @@ -27,8 +28,8 @@ public class BooleanFieldTypeTests extends FieldTypeTestCase { return new BooleanFieldMapper.BooleanFieldType(); } - @Override - protected Object dummyNullValue() { - return true; + @Before + public void setupProperties() { + setDummyNullValue(true); } } diff --git a/core/src/test/java/org/elasticsearch/index/mapper/core/ByteFieldTypeTests.java b/core/src/test/java/org/elasticsearch/index/mapper/core/ByteFieldTypeTests.java index 33bf21e2710..08697ccd364 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/core/ByteFieldTypeTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/core/ByteFieldTypeTests.java @@ -20,6 +20,7 @@ package org.elasticsearch.index.mapper.core; import org.elasticsearch.index.mapper.FieldTypeTestCase; import org.elasticsearch.index.mapper.MappedFieldType; +import org.junit.Before; public class ByteFieldTypeTests extends FieldTypeTestCase { @Override @@ -27,8 +28,8 @@ public class ByteFieldTypeTests extends FieldTypeTestCase { return new ByteFieldMapper.ByteFieldType(); } - @Override - protected Object dummyNullValue() { - return (byte)10; + @Before + public void setupProperties() { + setDummyNullValue((byte)10); } } diff --git a/core/src/test/java/org/elasticsearch/index/mapper/core/CompletionFieldTypeTests.java b/core/src/test/java/org/elasticsearch/index/mapper/core/CompletionFieldTypeTests.java index b5afc1ae672..55dd7f8f7c9 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/core/CompletionFieldTypeTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/core/CompletionFieldTypeTests.java @@ -20,10 +20,53 @@ package org.elasticsearch.index.mapper.core; import org.elasticsearch.index.mapper.FieldTypeTestCase; import org.elasticsearch.index.mapper.MappedFieldType; +import org.elasticsearch.search.suggest.completion.AnalyzingCompletionLookupProvider; +import org.elasticsearch.search.suggest.context.ContextBuilder; +import org.elasticsearch.search.suggest.context.ContextMapping; +import org.junit.Before; + +import java.util.SortedMap; +import java.util.TreeMap; public class CompletionFieldTypeTests extends FieldTypeTestCase { @Override protected MappedFieldType createDefaultFieldType() { - return new CompletionFieldMapper.CompletionFieldType(); + CompletionFieldMapper.CompletionFieldType ft = new CompletionFieldMapper.CompletionFieldType(); + ft.setProvider(new AnalyzingCompletionLookupProvider(true, false, true, false)); + return ft; + } + + @Before + public void setupProperties() { + addModifier(new Modifier("preserve_separators", false, true) { + @Override + public void modify(MappedFieldType ft) { + CompletionFieldMapper.CompletionFieldType cft = (CompletionFieldMapper.CompletionFieldType)ft; + cft.setProvider(new AnalyzingCompletionLookupProvider(false, false, true, false)); + } + }); + addModifier(new Modifier("preserve_position_increments", false, true) { + @Override + public void modify(MappedFieldType ft) { + CompletionFieldMapper.CompletionFieldType cft = (CompletionFieldMapper.CompletionFieldType)ft; + cft.setProvider(new AnalyzingCompletionLookupProvider(true, false, false, false)); + } + }); + addModifier(new Modifier("payload", false, true) { + @Override + public void modify(MappedFieldType ft) { + CompletionFieldMapper.CompletionFieldType cft = (CompletionFieldMapper.CompletionFieldType)ft; + cft.setProvider(new AnalyzingCompletionLookupProvider(true, false, true, true)); + } + }); + addModifier(new Modifier("context_mapping", false, true) { + @Override + public void modify(MappedFieldType ft) { + CompletionFieldMapper.CompletionFieldType cft = (CompletionFieldMapper.CompletionFieldType)ft; + SortedMap contextMapping = new TreeMap<>(); + contextMapping.put("foo", ContextBuilder.location("foo").build()); + cft.setContextMapping(contextMapping); + } + }); } } diff --git a/core/src/test/java/org/elasticsearch/index/mapper/core/DateFieldTypeTests.java b/core/src/test/java/org/elasticsearch/index/mapper/core/DateFieldTypeTests.java index b6e162c8c38..3c37af6f49a 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/core/DateFieldTypeTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/core/DateFieldTypeTests.java @@ -21,6 +21,7 @@ package org.elasticsearch.index.mapper.core; import org.elasticsearch.common.joda.Joda; import org.elasticsearch.index.mapper.FieldTypeTestCase; import org.elasticsearch.index.mapper.MappedFieldType; +import org.junit.Before; import java.util.Locale; import java.util.concurrent.TimeUnit; @@ -31,23 +32,26 @@ public class DateFieldTypeTests extends FieldTypeTestCase { return new DateFieldMapper.DateFieldType(); } - @Override - protected Object dummyNullValue() { - return 10; - } - - @Override - protected int numProperties() { - return 2 + super.numProperties(); - } - - @Override - protected void modifyProperty(MappedFieldType ft, int propNum) { - DateFieldMapper.DateFieldType dft = (DateFieldMapper.DateFieldType)ft; - switch (propNum) { - case 0: dft.setDateTimeFormatter(Joda.forPattern("basic_week_date", Locale.ROOT)); break; - case 1: dft.setTimeUnit(TimeUnit.HOURS); break; - default: super.modifyProperty(ft, propNum - 2); - } + @Before + public void setupProperties() { + setDummyNullValue(10); + addModifier(new Modifier("format", true, true) { + @Override + public void modify(MappedFieldType ft) { + ((DateFieldMapper.DateFieldType) ft).setDateTimeFormatter(Joda.forPattern("basic_week_date", Locale.ROOT)); + } + }); + addModifier(new Modifier("locale", true, true) { + @Override + public void modify(MappedFieldType ft) { + ((DateFieldMapper.DateFieldType) ft).setDateTimeFormatter(Joda.forPattern("date_optional_time", Locale.CANADA)); + } + }); + addModifier(new Modifier("numeric_resolution", true, true) { + @Override + public void modify(MappedFieldType ft) { + ((DateFieldMapper.DateFieldType)ft).setTimeUnit(TimeUnit.HOURS); + } + }); } } diff --git a/core/src/test/java/org/elasticsearch/index/mapper/core/DoubleFieldTypeTests.java b/core/src/test/java/org/elasticsearch/index/mapper/core/DoubleFieldTypeTests.java index 247f5e2364d..5f34e746ece 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/core/DoubleFieldTypeTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/core/DoubleFieldTypeTests.java @@ -20,6 +20,7 @@ package org.elasticsearch.index.mapper.core; import org.elasticsearch.index.mapper.FieldTypeTestCase; import org.elasticsearch.index.mapper.MappedFieldType; +import org.junit.Before; public class DoubleFieldTypeTests extends FieldTypeTestCase { @Override @@ -27,8 +28,8 @@ public class DoubleFieldTypeTests extends FieldTypeTestCase { return new DoubleFieldMapper.DoubleFieldType(); } - @Override - protected Object dummyNullValue() { - return 10.0D; + @Before + public void setupProperties() { + setDummyNullValue(10.0D); } } diff --git a/core/src/test/java/org/elasticsearch/index/mapper/core/FloatFieldTypeTests.java b/core/src/test/java/org/elasticsearch/index/mapper/core/FloatFieldTypeTests.java index 934f656ae76..73d593ac2f6 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/core/FloatFieldTypeTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/core/FloatFieldTypeTests.java @@ -20,6 +20,7 @@ package org.elasticsearch.index.mapper.core; import org.elasticsearch.index.mapper.FieldTypeTestCase; import org.elasticsearch.index.mapper.MappedFieldType; +import org.junit.Before; public class FloatFieldTypeTests extends FieldTypeTestCase { @Override @@ -27,8 +28,8 @@ public class FloatFieldTypeTests extends FieldTypeTestCase { return new DoubleFieldMapper.DoubleFieldType(); } - @Override - protected Object dummyNullValue() { - return 10.0; + @Before + public void setupProperties() { + setDummyNullValue(10.0); } } diff --git a/core/src/test/java/org/elasticsearch/index/mapper/core/IntegerFieldTypeTests.java b/core/src/test/java/org/elasticsearch/index/mapper/core/IntegerFieldTypeTests.java index 6e4a49e758b..b8c40af72c5 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/core/IntegerFieldTypeTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/core/IntegerFieldTypeTests.java @@ -20,6 +20,7 @@ package org.elasticsearch.index.mapper.core; import org.elasticsearch.index.mapper.FieldTypeTestCase; import org.elasticsearch.index.mapper.MappedFieldType; +import org.junit.Before; public class IntegerFieldTypeTests extends FieldTypeTestCase { @Override @@ -27,8 +28,8 @@ public class IntegerFieldTypeTests extends FieldTypeTestCase { return new IntegerFieldMapper.IntegerFieldType(); } - @Override - protected Object dummyNullValue() { - return 10; + @Before + public void setupProperties() { + setDummyNullValue(10); } } diff --git a/core/src/test/java/org/elasticsearch/index/mapper/core/LongFieldTypeTests.java b/core/src/test/java/org/elasticsearch/index/mapper/core/LongFieldTypeTests.java index 671483e8978..e7b41bf21d1 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/core/LongFieldTypeTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/core/LongFieldTypeTests.java @@ -20,6 +20,7 @@ package org.elasticsearch.index.mapper.core; import org.elasticsearch.index.mapper.FieldTypeTestCase; import org.elasticsearch.index.mapper.MappedFieldType; +import org.junit.Before; public class LongFieldTypeTests extends FieldTypeTestCase { @Override @@ -27,8 +28,8 @@ public class LongFieldTypeTests extends FieldTypeTestCase { return new LongFieldMapper.LongFieldType(); } - @Override - protected Object dummyNullValue() { - return (long)10; + @Before + public void setupProperties() { + setDummyNullValue((long)10); } } diff --git a/core/src/test/java/org/elasticsearch/index/mapper/core/ShortFieldTypeTests.java b/core/src/test/java/org/elasticsearch/index/mapper/core/ShortFieldTypeTests.java index 8a10da5cac2..12ddf827f43 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/core/ShortFieldTypeTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/core/ShortFieldTypeTests.java @@ -20,6 +20,7 @@ package org.elasticsearch.index.mapper.core; import org.elasticsearch.index.mapper.FieldTypeTestCase; import org.elasticsearch.index.mapper.MappedFieldType; +import org.junit.Before; public class ShortFieldTypeTests extends FieldTypeTestCase { @Override @@ -27,8 +28,8 @@ public class ShortFieldTypeTests extends FieldTypeTestCase { return new ShortFieldMapper.ShortFieldType(); } - @Override - protected Object dummyNullValue() { - return (short)10; + @Before + public void setupProperties() { + setDummyNullValue((short)10); } } diff --git a/core/src/test/java/org/elasticsearch/index/mapper/core/TokenCountFieldMapperIntegrationIT.java b/core/src/test/java/org/elasticsearch/index/mapper/core/TokenCountFieldMapperIntegrationIT.java index 837a7a060c1..613cdde97e6 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/core/TokenCountFieldMapperIntegrationIT.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/core/TokenCountFieldMapperIntegrationIT.java @@ -21,7 +21,6 @@ package org.elasticsearch.index.mapper.core; import com.carrotsearch.randomizedtesting.annotations.Name; import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; -import com.google.common.collect.ImmutableList; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.bulk.BulkResponse; import org.elasticsearch.action.index.IndexRequestBuilder; @@ -36,6 +35,7 @@ import org.junit.Test; import java.io.IOException; import java.util.ArrayList; +import java.util.Arrays; import java.util.List; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; @@ -98,8 +98,8 @@ public class TokenCountFieldMapperIntegrationIT extends ESIntegTestCase { public void facetByTokenCount() throws IOException { init(); - String facetField = randomFrom(ImmutableList.of( - "foo.token_count", "foo.token_count_unstored", "foo.token_count_with_doc_values")); + String facetField = randomFrom(Arrays.asList( + "foo.token_count", "foo.token_count_unstored", "foo.token_count_with_doc_values")); SearchResponse result = searchByNumericRange(1, 10) .addAggregation(AggregationBuilders.terms("facet").field(facetField)).get(); assertSearchReturns(result, "single", "bulk1", "bulk2", "multi", "multibulk1", "multibulk2"); @@ -166,7 +166,7 @@ public class TokenCountFieldMapperIntegrationIT extends ESIntegTestCase { private SearchRequestBuilder searchByNumericRange(int low, int high) { return prepareSearch().setQuery(QueryBuilders.rangeQuery(randomFrom( - ImmutableList.of("foo.token_count", "foo.token_count_unstored", "foo.token_count_with_doc_values") + Arrays.asList("foo.token_count", "foo.token_count_unstored", "foo.token_count_with_doc_values") )).gte(low).lte(high)); } diff --git a/core/src/test/java/org/elasticsearch/index/mapper/geo/GeoPointFieldMapperTests.java b/core/src/test/java/org/elasticsearch/index/mapper/geo/GeoPointFieldMapperTests.java index 1ca1c3a2837..4d61ecf397e 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/geo/GeoPointFieldMapperTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/geo/GeoPointFieldMapperTests.java @@ -626,9 +626,10 @@ public class GeoPointFieldMapperTests extends ESSingleNodeTestCase { MergeResult mergeResult = stage1.merge(stage2.mapping(), false, false); assertThat(mergeResult.hasConflicts(), equalTo(true)); - assertThat(mergeResult.buildConflicts().length, equalTo(1)); + assertThat(mergeResult.buildConflicts().length, equalTo(2)); // todo better way of checking conflict? - assertThat("mapper [point] has different lat_lon", isIn(new ArrayList<>(Arrays.asList(mergeResult.buildConflicts())))); + assertThat("mapper [point] has different [lat_lon]", isIn(new ArrayList<>(Arrays.asList(mergeResult.buildConflicts())))); + assertThat("mapper [point] has different [ignore_malformed]", isIn(new ArrayList<>(Arrays.asList(mergeResult.buildConflicts())))); // correct mapping and ensure no failures stage2Mapping = XContentFactory.jsonBuilder().startObject().startObject("type") diff --git a/core/src/test/java/org/elasticsearch/index/mapper/geo/GeoPointFieldTypeTests.java b/core/src/test/java/org/elasticsearch/index/mapper/geo/GeoPointFieldTypeTests.java index 07a769faa61..b6ebeb90acf 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/geo/GeoPointFieldTypeTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/geo/GeoPointFieldTypeTests.java @@ -22,6 +22,7 @@ import org.elasticsearch.index.mapper.FieldTypeTestCase; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.core.DoubleFieldMapper; import org.elasticsearch.index.mapper.core.StringFieldMapper; +import org.junit.Before; public class GeoPointFieldTypeTests extends FieldTypeTestCase { @Override @@ -29,20 +30,33 @@ public class GeoPointFieldTypeTests extends FieldTypeTestCase { return new GeoPointFieldMapper.GeoPointFieldType(); } - @Override - protected int numProperties() { - return 4 + super.numProperties(); - } - - @Override - protected void modifyProperty(MappedFieldType ft, int propNum) { - GeoPointFieldMapper.GeoPointFieldType gft = (GeoPointFieldMapper.GeoPointFieldType)ft; - switch (propNum) { - case 0: gft.setGeohashEnabled(new StringFieldMapper.StringFieldType(), 1, true); break; - case 1: gft.setLatLonEnabled(new DoubleFieldMapper.DoubleFieldType(), new DoubleFieldMapper.DoubleFieldType()); break; - case 2: gft.setIgnoreMalformed(!gft.ignoreMalformed()); break; - case 3: gft.setCoerce(!gft.coerce()); break; - default: super.modifyProperty(ft, propNum - 4); - } + @Before + public void setupProperties() { + addModifier(new Modifier("geohash", false, true) { + @Override + public void modify(MappedFieldType ft) { + ((GeoPointFieldMapper.GeoPointFieldType)ft).setGeohashEnabled(new StringFieldMapper.StringFieldType(), 1, true); + } + }); + addModifier(new Modifier("lat_lon", false, true) { + @Override + public void modify(MappedFieldType ft) { + ((GeoPointFieldMapper.GeoPointFieldType)ft).setLatLonEnabled(new DoubleFieldMapper.DoubleFieldType(), new DoubleFieldMapper.DoubleFieldType()); + } + }); + addModifier(new Modifier("ignore_malformed", false, true) { + @Override + public void modify(MappedFieldType ft) { + GeoPointFieldMapper.GeoPointFieldType gft = (GeoPointFieldMapper.GeoPointFieldType)ft; + gft.setIgnoreMalformed(!gft.ignoreMalformed()); + } + }); + addModifier(new Modifier("coerce", false, true) { + @Override + public void modify(MappedFieldType ft) { + GeoPointFieldMapper.GeoPointFieldType gft = (GeoPointFieldMapper.GeoPointFieldType)ft; + gft.setCoerce(!gft.coerce()); + } + }); } } diff --git a/core/src/test/java/org/elasticsearch/index/mapper/geo/GeoShapeFieldMapperTests.java b/core/src/test/java/org/elasticsearch/index/mapper/geo/GeoShapeFieldMapperTests.java index b30589b8c93..b7161c3463b 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/geo/GeoShapeFieldMapperTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/geo/GeoShapeFieldMapperTests.java @@ -377,10 +377,10 @@ public class GeoShapeFieldMapperTests extends ESSingleNodeTestCase { assertThat(mergeResult.hasConflicts(), equalTo(true)); assertThat(mergeResult.buildConflicts().length, equalTo(4)); ArrayList conflicts = new ArrayList<>(Arrays.asList(mergeResult.buildConflicts())); - assertThat("mapper [shape] has different strategy", isIn(conflicts)); - assertThat("mapper [shape] has different tree", isIn(conflicts)); - assertThat("mapper [shape] has different tree_levels", isIn(conflicts)); - assertThat("mapper [shape] has different precision", isIn(conflicts)); + assertThat("mapper [shape] has different [strategy]", isIn(conflicts)); + assertThat("mapper [shape] has different [tree]", isIn(conflicts)); + assertThat("mapper [shape] has different [tree_levels]", isIn(conflicts)); + assertThat("mapper [shape] has different [precision]", isIn(conflicts)); // verify nothing changed FieldMapper fieldMapper = stage1.mappers().getMapper("shape"); diff --git a/core/src/test/java/org/elasticsearch/index/mapper/geo/GeoShapeFieldTypeTests.java b/core/src/test/java/org/elasticsearch/index/mapper/geo/GeoShapeFieldTypeTests.java index 527d79c0ee9..7ce99aa737a 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/geo/GeoShapeFieldTypeTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/geo/GeoShapeFieldTypeTests.java @@ -21,31 +21,51 @@ package org.elasticsearch.index.mapper.geo; import org.elasticsearch.common.geo.builders.ShapeBuilder; import org.elasticsearch.index.mapper.FieldTypeTestCase; import org.elasticsearch.index.mapper.MappedFieldType; +import org.junit.Before; public class GeoShapeFieldTypeTests extends FieldTypeTestCase { @Override protected MappedFieldType createDefaultFieldType() { - GeoShapeFieldMapper.GeoShapeFieldType gft = new GeoShapeFieldMapper.GeoShapeFieldType(); - gft.setNames(new MappedFieldType.Names("testgeoshape")); - return gft; + return new GeoShapeFieldMapper.GeoShapeFieldType(); } - @Override - protected int numProperties() { - return 6 + super.numProperties(); - } - - @Override - protected void modifyProperty(MappedFieldType ft, int propNum) { - GeoShapeFieldMapper.GeoShapeFieldType gft = (GeoShapeFieldMapper.GeoShapeFieldType)ft; - switch (propNum) { - case 0: gft.setTree("quadtree"); break; - case 1: gft.setStrategyName("term"); break; - case 2: gft.setTreeLevels(10); break; - case 3: gft.setPrecisionInMeters(20); break; - case 4: gft.setDefaultDistanceErrorPct(0.5); break; - case 5: gft.setOrientation(ShapeBuilder.Orientation.LEFT); break; - default: super.modifyProperty(ft, propNum - 6); - } + @Before + public void setupProperties() { + addModifier(new Modifier("tree", false, true) { + @Override + public void modify(MappedFieldType ft) { + ((GeoShapeFieldMapper.GeoShapeFieldType)ft).setTree("quadtree"); + } + }); + addModifier(new Modifier("strategy", false, true) { + @Override + public void modify(MappedFieldType ft) { + ((GeoShapeFieldMapper.GeoShapeFieldType)ft).setStrategyName("term"); + } + }); + addModifier(new Modifier("tree_levels", false, true) { + @Override + public void modify(MappedFieldType ft) { + ((GeoShapeFieldMapper.GeoShapeFieldType)ft).setTreeLevels(10); + } + }); + addModifier(new Modifier("precision", false, true) { + @Override + public void modify(MappedFieldType ft) { + ((GeoShapeFieldMapper.GeoShapeFieldType)ft).setPrecisionInMeters(20); + } + }); + addModifier(new Modifier("distance_error_pct", true, true) { + @Override + public void modify(MappedFieldType ft) { + ((GeoShapeFieldMapper.GeoShapeFieldType)ft).setDefaultDistanceErrorPct(0.5); + } + }); + addModifier(new Modifier("orientation", true, true) { + @Override + public void modify(MappedFieldType ft) { + ((GeoShapeFieldMapper.GeoShapeFieldType)ft).setOrientation(ShapeBuilder.Orientation.LEFT); + } + }); } } diff --git a/core/src/test/java/org/elasticsearch/index/mapper/internal/FieldNamesFieldTypeTests.java b/core/src/test/java/org/elasticsearch/index/mapper/internal/FieldNamesFieldTypeTests.java index 8f0ea33d37e..83aa779a61d 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/internal/FieldNamesFieldTypeTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/internal/FieldNamesFieldTypeTests.java @@ -20,6 +20,7 @@ package org.elasticsearch.index.mapper.internal; import org.elasticsearch.index.mapper.FieldTypeTestCase; import org.elasticsearch.index.mapper.MappedFieldType; +import org.junit.Before; public class FieldNamesFieldTypeTests extends FieldTypeTestCase { @Override @@ -27,17 +28,14 @@ public class FieldNamesFieldTypeTests extends FieldTypeTestCase { return new FieldNamesFieldMapper.FieldNamesFieldType(); } - @Override - protected int numProperties() { - return 1 + super.numProperties(); - } - - @Override - protected void modifyProperty(MappedFieldType ft, int propNum) { - FieldNamesFieldMapper.FieldNamesFieldType fnft = (FieldNamesFieldMapper.FieldNamesFieldType)ft; - switch (propNum) { - case 0: fnft.setEnabled(!fnft.isEnabled()); break; - default: super.modifyProperty(ft, propNum - 1); - } + @Before + public void setupProperties() { + addModifier(new Modifier("enabled", true, true) { + @Override + public void modify(MappedFieldType ft) { + FieldNamesFieldMapper.FieldNamesFieldType fnft = (FieldNamesFieldMapper.FieldNamesFieldType)ft; + fnft.setEnabled(!fnft.isEnabled()); + } + }); } } diff --git a/core/src/test/java/org/elasticsearch/index/mapper/multifield/merge/JavaMultiFieldMergeTests.java b/core/src/test/java/org/elasticsearch/index/mapper/multifield/merge/JavaMultiFieldMergeTests.java index 655069ed9e2..eec0002a6ef 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/multifield/merge/JavaMultiFieldMergeTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/multifield/merge/JavaMultiFieldMergeTests.java @@ -173,15 +173,15 @@ public class JavaMultiFieldMergeTests extends ESSingleNodeTestCase { DocumentMapper docMapper4 = parser.parse(mapping); mergeResult = docMapper.merge(docMapper4.mapping(), true, false); assertThat(Arrays.toString(mergeResult.buildConflicts()), mergeResult.hasConflicts(), equalTo(true)); - assertThat(mergeResult.buildConflicts()[0], equalTo("mapper [name] has different index values")); - assertThat(mergeResult.buildConflicts()[1], equalTo("mapper [name] has different store values")); + assertThat(mergeResult.buildConflicts()[0], equalTo("mapper [name] has different [index] values")); + assertThat(mergeResult.buildConflicts()[1], equalTo("mapper [name] has different [store] values")); mergeResult = docMapper.merge(docMapper4.mapping(), false, false); assertThat(Arrays.toString(mergeResult.buildConflicts()), mergeResult.hasConflicts(), equalTo(true)); assertNotSame(IndexOptions.NONE, docMapper.mappers().getMapper("name").fieldType().indexOptions()); - assertThat(mergeResult.buildConflicts()[0], equalTo("mapper [name] has different index values")); - assertThat(mergeResult.buildConflicts()[1], equalTo("mapper [name] has different store values")); + assertThat(mergeResult.buildConflicts()[0], equalTo("mapper [name] has different [index] values")); + assertThat(mergeResult.buildConflicts()[1], equalTo("mapper [name] has different [store] values")); // There are conflicts, but the `name.not_indexed3` has been added, b/c that field has no conflicts assertNotSame(IndexOptions.NONE, docMapper.mappers().getMapper("name").fieldType().indexOptions()); diff --git a/core/src/test/java/org/elasticsearch/index/mapper/string/SimpleStringMappingTests.java b/core/src/test/java/org/elasticsearch/index/mapper/string/SimpleStringMappingTests.java index f122de90202..e6b08fb4978 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/string/SimpleStringMappingTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/string/SimpleStringMappingTests.java @@ -515,7 +515,7 @@ public class SimpleStringMappingTests extends ESSingleNodeTestCase { mergeResult = defaultMapper.merge(parser.parse(updatedMapping).mapping(), true, false); assertTrue(mergeResult.hasConflicts()); assertEquals(1, mergeResult.buildConflicts().length); - assertTrue(mergeResult.buildConflicts()[0].contains("cannot enable norms")); + assertTrue(mergeResult.buildConflicts()[0].contains("different [omit_norms]")); } /** diff --git a/core/src/test/java/org/elasticsearch/index/mapper/string/StringFieldMapperPositionIncrementGapTests.java b/core/src/test/java/org/elasticsearch/index/mapper/string/StringFieldMapperPositionIncrementGapTests.java index 61a9c3ac9bd..c0e957e522f 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/string/StringFieldMapperPositionIncrementGapTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/string/StringFieldMapperPositionIncrementGapTests.java @@ -19,8 +19,6 @@ package org.elasticsearch.index.mapper.string; -import com.google.common.collect.ImmutableList; - import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.client.Client; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -29,6 +27,7 @@ import org.elasticsearch.index.mapper.MapperParsingException; import org.elasticsearch.test.ESSingleNodeTestCase; import java.io.IOException; +import java.util.Arrays; import static org.elasticsearch.index.query.QueryBuilders.matchPhraseQuery; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; @@ -140,7 +139,7 @@ public class StringFieldMapperPositionIncrementGapTests extends ESSingleNodeTest } private static void testGap(Client client, String indexName, String type, int positionIncrementGap) throws IOException { - client.prepareIndex(indexName, type, "position_gap_test").setSource("string", ImmutableList.of("one", "two three")).setRefresh(true).get(); + client.prepareIndex(indexName, type, "position_gap_test").setSource("string", Arrays.asList("one", "two three")).setRefresh(true).get(); // Baseline - phrase query finds matches in the same field value assertHitCount(client.prepareSearch(indexName).setQuery(matchPhraseQuery("string", "two three")).get(), 1); diff --git a/core/src/test/java/org/elasticsearch/index/mapper/timestamp/TimestampMappingTests.java b/core/src/test/java/org/elasticsearch/index/mapper/timestamp/TimestampMappingTests.java index e33f898427d..057dc41f0f9 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/timestamp/TimestampMappingTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/timestamp/TimestampMappingTests.java @@ -579,11 +579,10 @@ public class TimestampMappingTests extends ESSingleNodeTestCase { MergeResult mergeResult = docMapper.merge(parser.parse(mapping).mapping(), true, false); List expectedConflicts = new ArrayList<>(Arrays.asList( - "mapper [_timestamp] has different index values", - "mapper [_timestamp] has different store values", + "mapper [_timestamp] has different [index] values", + "mapper [_timestamp] has different [store] values", "Cannot update default in _timestamp value. Value is 1970-01-01 now encountering 1970-01-02", - "Cannot update path in _timestamp value. Value is foo path in merged mapping is bar", - "mapper [_timestamp] has different tokenize values")); + "Cannot update path in _timestamp value. Value is foo path in merged mapping is bar")); for (String conflict : mergeResult.buildConflicts()) { assertTrue("found unexpected conflict [" + conflict + "]", expectedConflicts.remove(conflict)); @@ -618,12 +617,12 @@ public class TimestampMappingTests extends ESSingleNodeTestCase { MergeResult mergeResult = docMapper.merge(parser.parse(mapping).mapping(), true, false); List expectedConflicts = new ArrayList<>(); - expectedConflicts.add("mapper [_timestamp] has different index values"); - expectedConflicts.add("mapper [_timestamp] has different tokenize values"); + expectedConflicts.add("mapper [_timestamp] has different [index] values"); + expectedConflicts.add("mapper [_timestamp] has different [tokenize] values"); if (indexValues.get(0).equals("not_analyzed") == false) { // if the only index value left is not_analyzed, then the doc values setting will be the same, but in the // other two cases, it will change - expectedConflicts.add("mapper [_timestamp] has different doc_values values"); + expectedConflicts.add("mapper [_timestamp] has different [doc_values] values"); } for (String conflict : mergeResult.buildConflicts()) { diff --git a/core/src/test/java/org/elasticsearch/index/mapper/update/UpdateMappingOnClusterIT.java b/core/src/test/java/org/elasticsearch/index/mapper/update/UpdateMappingOnClusterIT.java index 4d4d06bd292..4ae039a3610 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/update/UpdateMappingOnClusterIT.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/update/UpdateMappingOnClusterIT.java @@ -55,14 +55,14 @@ public class UpdateMappingOnClusterIT extends ESIntegTestCase { String mapping = copyToStringFromClasspath("/org/elasticsearch/index/mapper/update/all_mapping_create_index.json"); String mappingUpdate = copyToStringFromClasspath("/org/elasticsearch/index/mapper/update/all_mapping_update_with_conflicts.json"); String[] errorMessage = {"[_all] enabled is true now encountering false", - "[_all] cannot enable norms (`norms.enabled`)", - "[_all] has different store values", - "[_all] has different store_term_vector values", - "[_all] has different store_term_vector_offsets values", - "[_all] has different store_term_vector_positions values", - "[_all] has different store_term_vector_payloads values", - "[_all] has different analyzer", - "[_all] has different similarity"}; + "[_all] has different [omit_norms] values", + "[_all] has different [store] values", + "[_all] has different [store_term_vector] values", + "[_all] has different [store_term_vector_offsets] values", + "[_all] has different [store_term_vector_positions] values", + "[_all] has different [store_term_vector_payloads] values", + "[_all] has different [analyzer]", + "[_all] has different [similarity]"}; // fielddata and search_analyzer should not report conflict testConflict(mapping, mappingUpdate, errorMessage); } diff --git a/core/src/test/java/org/elasticsearch/index/search/child/ChildrenConstantScoreQueryTests.java b/core/src/test/java/org/elasticsearch/index/search/child/ChildrenConstantScoreQueryTests.java index 2577de5f1c9..95b3bca7694 100644 --- a/core/src/test/java/org/elasticsearch/index/search/child/ChildrenConstantScoreQueryTests.java +++ b/core/src/test/java/org/elasticsearch/index/search/child/ChildrenConstantScoreQueryTests.java @@ -28,6 +28,7 @@ import org.apache.lucene.index.*; import org.apache.lucene.search.*; import org.apache.lucene.search.join.BitDocIdSetFilter; import org.apache.lucene.store.Directory; +import org.apache.lucene.util.Bits; import org.apache.lucene.util.FixedBitSet; import org.apache.lucene.util.LuceneTestCase; import org.elasticsearch.common.lease.Releasables; @@ -258,8 +259,14 @@ public class ChildrenConstantScoreQueryTests extends AbstractChildTestCase { for (String id : parentIds) { TermsEnum.SeekStatus seekStatus = termsEnum.seekCeil(Uid.createUidAsBytes("parent", id)); if (seekStatus == TermsEnum.SeekStatus.FOUND) { - docsEnum = termsEnum.postings(slowLeafReader.getLiveDocs(), docsEnum, PostingsEnum.NONE); - expectedResult.set(docsEnum.nextDoc()); + docsEnum = termsEnum.postings(docsEnum, PostingsEnum.NONE); + final Bits liveDocs = slowLeafReader.getLiveDocs(); + for (int doc = docsEnum.nextDoc(); doc != DocIdSetIterator.NO_MORE_DOCS; doc = docsEnum.nextDoc()) { + if (liveDocs == null || liveDocs.get(doc)) { + break; + } + } + expectedResult.set(docsEnum.docID()); } else if (seekStatus == TermsEnum.SeekStatus.END) { break; } diff --git a/core/src/test/java/org/elasticsearch/index/search/child/ChildrenQueryTests.java b/core/src/test/java/org/elasticsearch/index/search/child/ChildrenQueryTests.java index 60a98ffbe6b..d8d09fe0b9c 100644 --- a/core/src/test/java/org/elasticsearch/index/search/child/ChildrenQueryTests.java +++ b/core/src/test/java/org/elasticsearch/index/search/child/ChildrenQueryTests.java @@ -31,6 +31,7 @@ import org.apache.lucene.index.*; import org.apache.lucene.search.*; import org.apache.lucene.search.join.BitDocIdSetFilter; import org.apache.lucene.store.Directory; +import org.apache.lucene.util.Bits; import org.apache.lucene.util.FixedBitSet; import org.apache.lucene.util.LuceneTestCase; import org.elasticsearch.common.lease.Releasables; @@ -231,8 +232,14 @@ public class ChildrenQueryTests extends AbstractChildTestCase { if (count >= minChildren && (maxChildren == 0 || count <= maxChildren)) { TermsEnum.SeekStatus seekStatus = termsEnum.seekCeil(Uid.createUidAsBytes("parent", entry.getKey())); if (seekStatus == TermsEnum.SeekStatus.FOUND) { - docsEnum = termsEnum.postings(slowLeafReader.getLiveDocs(), docsEnum, PostingsEnum.NONE); - expectedResult.set(docsEnum.nextDoc()); + docsEnum = termsEnum.postings(docsEnum, PostingsEnum.NONE); + final Bits liveDocs = slowLeafReader.getLiveDocs(); + for (int doc = docsEnum.nextDoc(); doc != DocIdSetIterator.NO_MORE_DOCS; doc = docsEnum.nextDoc()) { + if (liveDocs == null || liveDocs.get(doc)) { + break; + } + } + expectedResult.set(docsEnum.docID()); scores[docsEnum.docID()] = new FloatArrayList(entry.getValue()); } else if (seekStatus == TermsEnum.SeekStatus.END) { break; diff --git a/core/src/test/java/org/elasticsearch/index/search/child/ParentConstantScoreQueryTests.java b/core/src/test/java/org/elasticsearch/index/search/child/ParentConstantScoreQueryTests.java index 55047d85bb5..71eb8214d1d 100644 --- a/core/src/test/java/org/elasticsearch/index/search/child/ParentConstantScoreQueryTests.java +++ b/core/src/test/java/org/elasticsearch/index/search/child/ParentConstantScoreQueryTests.java @@ -28,6 +28,7 @@ import org.apache.lucene.index.*; import org.apache.lucene.search.*; import org.apache.lucene.search.join.BitDocIdSetFilter; import org.apache.lucene.store.Directory; +import org.apache.lucene.util.Bits; import org.apache.lucene.util.FixedBitSet; import org.apache.lucene.util.LuceneTestCase; import org.elasticsearch.common.lease.Releasables; @@ -209,8 +210,14 @@ public class ParentConstantScoreQueryTests extends AbstractChildTestCase { for (String id : childIds) { TermsEnum.SeekStatus seekStatus = termsEnum.seekCeil(Uid.createUidAsBytes("child", id)); if (seekStatus == TermsEnum.SeekStatus.FOUND) { - docsEnum = termsEnum.postings(slowLeafReader.getLiveDocs(), docsEnum, PostingsEnum.NONE); - expectedResult.set(docsEnum.nextDoc()); + docsEnum = termsEnum.postings(docsEnum, PostingsEnum.NONE); + final Bits liveDocs = slowLeafReader.getLiveDocs(); + for (int doc = docsEnum.nextDoc(); doc != DocIdSetIterator.NO_MORE_DOCS; doc = docsEnum.nextDoc()) { + if (liveDocs == null || liveDocs.get(doc)) { + break; + } + } + expectedResult.set(docsEnum.docID()); } else if (seekStatus == TermsEnum.SeekStatus.END) { break; } diff --git a/core/src/test/java/org/elasticsearch/index/search/child/ParentQueryTests.java b/core/src/test/java/org/elasticsearch/index/search/child/ParentQueryTests.java index 3fd638473ce..57dd8af9efd 100644 --- a/core/src/test/java/org/elasticsearch/index/search/child/ParentQueryTests.java +++ b/core/src/test/java/org/elasticsearch/index/search/child/ParentQueryTests.java @@ -29,6 +29,7 @@ import org.apache.lucene.index.*; import org.apache.lucene.search.*; import org.apache.lucene.search.join.BitDocIdSetFilter; import org.apache.lucene.store.Directory; +import org.apache.lucene.util.Bits; import org.apache.lucene.util.FixedBitSet; import org.apache.lucene.util.LuceneTestCase; import org.elasticsearch.common.lease.Releasables; @@ -207,8 +208,14 @@ public class ParentQueryTests extends AbstractChildTestCase { for (Map.Entry entry : childIdsAndScore.entrySet()) { TermsEnum.SeekStatus seekStatus = termsEnum.seekCeil(Uid.createUidAsBytes("child", entry.getKey())); if (seekStatus == TermsEnum.SeekStatus.FOUND) { - docsEnum = termsEnum.postings(slowLeafReader.getLiveDocs(), docsEnum, PostingsEnum.NONE); - expectedResult.set(docsEnum.nextDoc()); + docsEnum = termsEnum.postings(docsEnum, PostingsEnum.NONE); + final Bits liveDocs = slowLeafReader.getLiveDocs(); + for (int doc = docsEnum.nextDoc(); doc != DocIdSetIterator.NO_MORE_DOCS; doc = docsEnum.nextDoc()) { + if (liveDocs == null || liveDocs.get(doc)) { + break; + } + } + expectedResult.set(docsEnum.docID()); FloatArrayList s = scores[docsEnum.docID()]; if (s == null) { scores[docsEnum.docID()] = s = new FloatArrayList(2); diff --git a/core/src/test/java/org/elasticsearch/index/store/StoreTest.java b/core/src/test/java/org/elasticsearch/index/store/StoreTest.java index d5f929e12ce..5757764d250 100644 --- a/core/src/test/java/org/elasticsearch/index/store/StoreTest.java +++ b/core/src/test/java/org/elasticsearch/index/store/StoreTest.java @@ -22,8 +22,8 @@ import org.apache.lucene.analysis.MockAnalyzer; import org.apache.lucene.codecs.CodecUtil; import org.apache.lucene.codecs.FilterCodec; import org.apache.lucene.codecs.SegmentInfoFormat; -import org.apache.lucene.codecs.lucene50.Lucene50Codec; import org.apache.lucene.codecs.lucene50.Lucene50SegmentInfoFormat; +import org.apache.lucene.codecs.lucene53.Lucene53Codec; import org.apache.lucene.document.*; import org.apache.lucene.index.*; import org.apache.lucene.store.*; @@ -181,7 +181,7 @@ public class StoreTest extends ESTestCase { private static final class OldSIMockingCodec extends FilterCodec { protected OldSIMockingCodec() { - super(new Lucene50Codec().getName(), new Lucene50Codec()); + super(new Lucene53Codec().getName(), new Lucene53Codec()); } @Override @@ -239,6 +239,10 @@ public class StoreTest extends ESTestCase { } // IF THIS TEST FAILS ON UPGRADE GO LOOK AT THE OldSIMockingCodec!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + @AwaitsFix(bugUrl="Fails with seed E1394B038144F6E") + // The test currently fails because the segment infos and the index don't + // agree on the oldest version of a segment. We should fix this test by + // switching to a static bw index @Test public void testWriteLegacyChecksums() throws IOException { final ShardId shardId = new ShardId(new Index("index"), 1); @@ -754,7 +758,6 @@ public class StoreTest extends ESTestCase { IndexWriterConfig iwc = new IndexWriterConfig(new MockAnalyzer(random)).setCodec(TestUtil.getDefaultCodec()); iwc.setMergePolicy(NoMergePolicy.INSTANCE); iwc.setUseCompoundFile(random.nextBoolean()); - iwc.setMaxThreadStates(1); final ShardId shardId = new ShardId(new Index("index"), 1); DirectoryService directoryService = new LuceneManagedDirectoryService(random); Store store = new Store(shardId, Settings.EMPTY, directoryService, new DummyShardLock(shardId)); @@ -785,7 +788,6 @@ public class StoreTest extends ESTestCase { IndexWriterConfig iwc = new IndexWriterConfig(new MockAnalyzer(random)).setCodec(TestUtil.getDefaultCodec()); iwc.setMergePolicy(NoMergePolicy.INSTANCE); iwc.setUseCompoundFile(random.nextBoolean()); - iwc.setMaxThreadStates(1); final ShardId shardId = new ShardId(new Index("index"), 1); DirectoryService directoryService = new LuceneManagedDirectoryService(random); store = new Store(shardId, Settings.EMPTY, directoryService, new DummyShardLock(shardId)); @@ -826,7 +828,6 @@ public class StoreTest extends ESTestCase { IndexWriterConfig iwc = new IndexWriterConfig(new MockAnalyzer(random)).setCodec(TestUtil.getDefaultCodec()); iwc.setMergePolicy(NoMergePolicy.INSTANCE); iwc.setUseCompoundFile(random.nextBoolean()); - iwc.setMaxThreadStates(1); iwc.setOpenMode(IndexWriterConfig.OpenMode.APPEND); IndexWriter writer = new IndexWriter(store.directory(), iwc); writer.deleteDocuments(new Term("id", Integer.toString(random().nextInt(numDocs)))); @@ -862,7 +863,6 @@ public class StoreTest extends ESTestCase { iwc = new IndexWriterConfig(new MockAnalyzer(random)).setCodec(TestUtil.getDefaultCodec()); iwc.setMergePolicy(NoMergePolicy.INSTANCE); iwc.setUseCompoundFile(true); // force CFS - easier to test here since we know it will add 3 files - iwc.setMaxThreadStates(1); iwc.setOpenMode(IndexWriterConfig.OpenMode.APPEND); writer = new IndexWriter(store.directory(), iwc); writer.addDocument(docs.get(0)); diff --git a/core/src/test/java/org/elasticsearch/indices/store/IndicesStoreIntegrationIT.java b/core/src/test/java/org/elasticsearch/indices/store/IndicesStoreIntegrationIT.java index a287bcb4f54..62f9a84d5f9 100644 --- a/core/src/test/java/org/elasticsearch/indices/store/IndicesStoreIntegrationIT.java +++ b/core/src/test/java/org/elasticsearch/indices/store/IndicesStoreIntegrationIT.java @@ -432,12 +432,12 @@ public class IndicesStoreIntegrationIT extends ESIntegTestCase { * state processing when a recover starts and only unblocking it shortly after the node receives * the ShardActiveRequest. */ - static class ReclocationStartEndTracer extends MockTransportService.Tracer { + public static class ReclocationStartEndTracer extends MockTransportService.Tracer { private final ESLogger logger; private final CountDownLatch beginRelocationLatch; private final CountDownLatch receivedShardExistsRequestLatch; - ReclocationStartEndTracer(ESLogger logger, CountDownLatch beginRelocationLatch, CountDownLatch receivedShardExistsRequestLatch) { + public ReclocationStartEndTracer(ESLogger logger, CountDownLatch beginRelocationLatch, CountDownLatch receivedShardExistsRequestLatch) { this.logger = logger; this.beginRelocationLatch = beginRelocationLatch; this.receivedShardExistsRequestLatch = receivedShardExistsRequestLatch; diff --git a/core/src/test/java/org/elasticsearch/indices/warmer/IndicesWarmerBlocksIT.java b/core/src/test/java/org/elasticsearch/indices/warmer/IndicesWarmerBlocksIT.java index e6afa4c259c..0ee4ab6329f 100644 --- a/core/src/test/java/org/elasticsearch/indices/warmer/IndicesWarmerBlocksIT.java +++ b/core/src/test/java/org/elasticsearch/indices/warmer/IndicesWarmerBlocksIT.java @@ -21,7 +21,6 @@ package org.elasticsearch.indices.warmer; import com.carrotsearch.hppc.cursors.ObjectObjectCursor; -import com.google.common.collect.ImmutableList; import org.elasticsearch.action.admin.indices.warmer.get.GetWarmersResponse; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.search.warmer.IndexWarmersMetaData; @@ -30,6 +29,7 @@ import org.elasticsearch.test.ESIntegTestCase.ClusterScope; import org.junit.Test; import java.util.Arrays; +import java.util.List; import static org.elasticsearch.cluster.metadata.IndexMetaData.*; import static org.elasticsearch.cluster.metadata.MetaData.CLUSTER_READ_ONLY_BLOCK; @@ -106,7 +106,7 @@ public class IndicesWarmerBlocksIT extends ESIntegTestCase { GetWarmersResponse response = client().admin().indices().prepareGetWarmers("test-blocks").get(); assertThat(response.warmers().size(), equalTo(1)); - ObjectObjectCursor> entry = response.warmers().iterator().next(); + ObjectObjectCursor> entry = response.warmers().iterator().next(); assertThat(entry.key, equalTo("test-blocks")); assertThat(entry.value.size(), equalTo(1)); assertThat(entry.value.iterator().next().name(), equalTo("warmer_block")); diff --git a/core/src/test/java/org/elasticsearch/indices/warmer/SimpleIndicesWarmerIT.java b/core/src/test/java/org/elasticsearch/indices/warmer/SimpleIndicesWarmerIT.java index 3a00be0aeb9..82e08588b58 100644 --- a/core/src/test/java/org/elasticsearch/indices/warmer/SimpleIndicesWarmerIT.java +++ b/core/src/test/java/org/elasticsearch/indices/warmer/SimpleIndicesWarmerIT.java @@ -20,32 +20,22 @@ package org.elasticsearch.indices.warmer; import com.carrotsearch.hppc.cursors.ObjectObjectCursor; -import com.google.common.collect.ImmutableList; -import org.elasticsearch.action.admin.indices.create.CreateIndexRequestBuilder; -import org.elasticsearch.action.admin.indices.segments.IndexSegments; -import org.elasticsearch.action.admin.indices.segments.IndexShardSegments; -import org.elasticsearch.action.admin.indices.segments.IndicesSegmentResponse; -import org.elasticsearch.action.admin.indices.segments.ShardSegments; + import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse; import org.elasticsearch.action.admin.indices.warmer.delete.DeleteWarmerResponse; import org.elasticsearch.action.admin.indices.warmer.get.GetWarmersResponse; import org.elasticsearch.action.admin.indices.warmer.put.PutWarmerResponse; -import org.elasticsearch.client.Requests; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.xcontent.json.JsonXContent; -import org.elasticsearch.index.engine.Segment; -import org.elasticsearch.index.mapper.MappedFieldType.Loading; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.indices.cache.request.IndicesRequestCache; -import org.elasticsearch.search.SearchService; import org.elasticsearch.search.warmer.IndexWarmerMissingException; import org.elasticsearch.search.warmer.IndexWarmersMetaData; import org.elasticsearch.test.ESIntegTestCase; import org.hamcrest.Matchers; import org.junit.Test; -import java.util.Locale; +import java.util.List; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.hamcrest.Matchers.equalTo; @@ -199,7 +189,7 @@ public class SimpleIndicesWarmerIT extends ESIntegTestCase { GetWarmersResponse getWarmersResponse = client().admin().indices().prepareGetWarmers("test").get(); assertThat(getWarmersResponse.warmers().size(), equalTo(1)); - ObjectObjectCursor> entry = getWarmersResponse.warmers().iterator().next(); + ObjectObjectCursor> entry = getWarmersResponse.warmers().iterator().next(); assertThat(entry.key, equalTo("test")); assertThat(entry.value.size(), equalTo(1)); assertThat(entry.value.iterator().next().name(), equalTo("custom_warmer")); @@ -267,94 +257,6 @@ public class SimpleIndicesWarmerIT extends ESIntegTestCase { return indicesStatsResponse.getIndex("test").getPrimaries().warmer.total(); } - private long getSegmentsMemoryUsage(String idx) { - IndicesSegmentResponse response = client().admin().indices().segments(Requests.indicesSegmentsRequest(idx)).actionGet(); - IndexSegments indicesSegments = response.getIndices().get(idx); - long total = 0; - for (IndexShardSegments indexShardSegments : indicesSegments) { - for (ShardSegments shardSegments : indexShardSegments) { - for (Segment segment : shardSegments) { - logger.debug("+=" + segment.memoryInBytes + " " + indexShardSegments.getShardId() + " " + shardSegments.getShardRouting().getIndex()); - total += segment.memoryInBytes; - } - } - } - return total; - } - - private enum LoadingMethod { - LAZY { - @Override - CreateIndexRequestBuilder createIndex(String indexName, String type, String fieldName) { - return client().admin().indices().prepareCreate(indexName).setSettings(Settings.builder().put(SINGLE_SHARD_NO_REPLICA).put(SearchService.NORMS_LOADING_KEY, Loading.LAZY_VALUE)); - } - }, - EAGER { - @Override - CreateIndexRequestBuilder createIndex(String indexName, String type, String fieldName) { - return client().admin().indices().prepareCreate(indexName).setSettings(Settings.builder().put(SINGLE_SHARD_NO_REPLICA).put(SearchService.NORMS_LOADING_KEY, Loading.EAGER_VALUE)); - } - - @Override - boolean isLazy() { - return false; - } - }, - EAGER_PER_FIELD { - @Override - CreateIndexRequestBuilder createIndex(String indexName, String type, String fieldName) throws Exception { - return client().admin().indices().prepareCreate(indexName).setSettings(Settings.builder().put(SINGLE_SHARD_NO_REPLICA).put(SearchService.NORMS_LOADING_KEY, Loading.LAZY_VALUE)).addMapping(type, JsonXContent.contentBuilder() - .startObject() - .startObject(type) - .startObject("properties") - .startObject(fieldName) - .field("type", "string") - .startObject("norms") - .field("loading", Loading.EAGER_VALUE) - .endObject() - .endObject() - .endObject() - .endObject() - .endObject() - ); - } - - @Override - boolean isLazy() { - return false; - } - }; - private static Settings SINGLE_SHARD_NO_REPLICA = Settings.builder().put("number_of_shards", 1).put("number_of_replicas", 0).build(); - - abstract CreateIndexRequestBuilder createIndex(String indexName, String type, String fieldName) throws Exception; - - boolean isLazy() { - return true; - } - } - - // NOTE: we have to ensure we defeat compression strategies of the default codec... - public void testEagerLoading() throws Exception { - for (LoadingMethod method : LoadingMethod.values()) { - logger.debug("METHOD " + method); - String indexName = method.name().toLowerCase(Locale.ROOT); - assertAcked(method.createIndex(indexName, "t", "foo")); - // index a doc with 1 token, and one with 3 tokens so we dont get CONST compressed (otherwise norms take zero memory usage) - client().prepareIndex(indexName, "t", "1").setSource("foo", "bar").execute().actionGet(); - client().prepareIndex(indexName, "t", "2").setSource("foo", "bar baz foo").setRefresh(true).execute().actionGet(); - ensureGreen(indexName); - long memoryUsage0 = getSegmentsMemoryUsage(indexName); - // queries load norms if they were not loaded before - client().prepareSearch(indexName).setQuery(QueryBuilders.matchQuery("foo", "bar")).execute().actionGet(); - long memoryUsage1 = getSegmentsMemoryUsage(indexName); - if (method.isLazy()) { - assertThat(memoryUsage1, greaterThan(memoryUsage0)); - } else { - assertThat(memoryUsage1, equalTo(memoryUsage0)); - } - } - } - public void testQueryCacheOnWarmer() { createIndex("test"); ensureGreen(); diff --git a/core/src/test/java/org/elasticsearch/script/CustomScriptContextIT.java b/core/src/test/java/org/elasticsearch/script/CustomScriptContextIT.java index 8f4a71dce08..fae9e44d5aa 100644 --- a/core/src/test/java/org/elasticsearch/script/CustomScriptContextIT.java +++ b/core/src/test/java/org/elasticsearch/script/CustomScriptContextIT.java @@ -20,6 +20,7 @@ package org.elasticsearch.script; import com.google.common.collect.ImmutableSet; +import org.elasticsearch.common.ContextAndHeaderHolder; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.script.expression.ExpressionScriptEngineService; @@ -54,12 +55,14 @@ public class CustomScriptContextIT extends ESIntegTestCase { @Test public void testCustomScriptContextsSettings() { + ContextAndHeaderHolder contextAndHeaders = new ContextAndHeaderHolder(); + ScriptService scriptService = internalCluster().getInstance(ScriptService.class); for (String lang : LANG_SET) { for (ScriptService.ScriptType scriptType : ScriptService.ScriptType.values()) { try { scriptService.compile(new Script("test", scriptType, lang, null), new ScriptContext.Plugin(PLUGIN_NAME, - "custom_globally_disabled_op")); + "custom_globally_disabled_op"), contextAndHeaders); fail("script compilation should have been rejected"); } catch(ScriptException e) { assertThat(e.getMessage(), containsString("scripts of type [" + scriptType + "], operation [" + PLUGIN_NAME + "_custom_globally_disabled_op] and lang [" + lang + "] are disabled")); @@ -69,34 +72,35 @@ public class CustomScriptContextIT extends ESIntegTestCase { try { scriptService.compile(new Script("1", ScriptService.ScriptType.INLINE, "expression", null), new ScriptContext.Plugin( - PLUGIN_NAME, "custom_exp_disabled_op")); + PLUGIN_NAME, "custom_exp_disabled_op"), contextAndHeaders); fail("script compilation should have been rejected"); } catch(ScriptException e) { assertThat(e.getMessage(), containsString("scripts of type [inline], operation [" + PLUGIN_NAME + "_custom_exp_disabled_op] and lang [expression] are disabled")); } CompiledScript compiledScript = scriptService.compile(new Script("1", ScriptService.ScriptType.INLINE, "expression", null), - randomFrom(new ScriptContext[] {ScriptContext.Standard.AGGS, ScriptContext.Standard.SEARCH})); + randomFrom(new ScriptContext[] { ScriptContext.Standard.AGGS, ScriptContext.Standard.SEARCH }), contextAndHeaders); assertThat(compiledScript, notNullValue()); compiledScript = scriptService.compile(new Script("1", ScriptService.ScriptType.INLINE, "mustache", null), - new ScriptContext.Plugin(PLUGIN_NAME, "custom_exp_disabled_op")); + new ScriptContext.Plugin(PLUGIN_NAME, "custom_exp_disabled_op"), contextAndHeaders); assertThat(compiledScript, notNullValue()); for (String lang : LANG_SET) { compiledScript = scriptService.compile(new Script("1", ScriptService.ScriptType.INLINE, lang, null), new ScriptContext.Plugin( - PLUGIN_NAME, "custom_op")); + PLUGIN_NAME, "custom_op"), contextAndHeaders); assertThat(compiledScript, notNullValue()); } } @Test public void testCompileNonRegisteredPluginContext() { + ContextAndHeaderHolder contextAndHeaders = new ContextAndHeaderHolder(); ScriptService scriptService = internalCluster().getInstance(ScriptService.class); try { scriptService.compile( new Script("test", randomFrom(ScriptService.ScriptType.values()), randomFrom(LANG_SET.toArray(new String[LANG_SET - .size()])), null), new ScriptContext.Plugin("test", "unknown")); + .size()])), null), new ScriptContext.Plugin("test", "unknown"), contextAndHeaders); fail("script compilation should have been rejected"); } catch(IllegalArgumentException e) { assertThat(e.getMessage(), containsString("script context [test_unknown] not supported")); @@ -105,6 +109,7 @@ public class CustomScriptContextIT extends ESIntegTestCase { @Test public void testCompileNonRegisteredScriptContext() { + ContextAndHeaderHolder contextAndHeaders = new ContextAndHeaderHolder(); ScriptService scriptService = internalCluster().getInstance(ScriptService.class); try { scriptService.compile( @@ -114,7 +119,7 @@ public class CustomScriptContextIT extends ESIntegTestCase { public String getKey() { return "test"; } - }); + }, contextAndHeaders); fail("script compilation should have been rejected"); } catch(IllegalArgumentException e) { assertThat(e.getMessage(), containsString("script context [test] not supported")); @@ -133,9 +138,9 @@ public class CustomScriptContextIT extends ESIntegTestCase { } public void onModule(ScriptModule scriptModule) { - scriptModule.registerScriptContext(new ScriptContext.Plugin(PLUGIN_NAME, "custom_op")); - scriptModule.registerScriptContext(new ScriptContext.Plugin(PLUGIN_NAME, "custom_exp_disabled_op")); - scriptModule.registerScriptContext(new ScriptContext.Plugin(PLUGIN_NAME, "custom_globally_disabled_op")); + scriptModule.registerScriptContext(new ScriptContext.Plugin(PLUGIN_NAME, "custom_op")); + scriptModule.registerScriptContext(new ScriptContext.Plugin(PLUGIN_NAME, "custom_exp_disabled_op")); + scriptModule.registerScriptContext(new ScriptContext.Plugin(PLUGIN_NAME, "custom_globally_disabled_op")); + } } - } } diff --git a/core/src/test/java/org/elasticsearch/script/NativeScriptTests.java b/core/src/test/java/org/elasticsearch/script/NativeScriptTests.java index e0a7d4ba3cd..e97b97f2c89 100644 --- a/core/src/test/java/org/elasticsearch/script/NativeScriptTests.java +++ b/core/src/test/java/org/elasticsearch/script/NativeScriptTests.java @@ -20,6 +20,7 @@ package org.elasticsearch.script; import com.google.common.collect.ImmutableSet; +import org.elasticsearch.common.ContextAndHeaderHolder; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.inject.Injector; import org.elasticsearch.common.inject.ModulesBuilder; @@ -47,6 +48,7 @@ public class NativeScriptTests extends ESTestCase { @Test public void testNativeScript() throws InterruptedException { + ContextAndHeaderHolder contextAndHeaders = new ContextAndHeaderHolder(); Settings settings = Settings.settingsBuilder() .put("name", "testNativeScript") .put("path.home", createTempDir()) @@ -62,13 +64,14 @@ public class NativeScriptTests extends ESTestCase { ScriptService scriptService = injector.getInstance(ScriptService.class); ExecutableScript executable = scriptService.executable(new Script("my", ScriptType.INLINE, NativeScriptEngineService.NAME, null), - ScriptContext.Standard.SEARCH); + ScriptContext.Standard.SEARCH, contextAndHeaders); assertThat(executable.run().toString(), equalTo("test")); terminate(injector.getInstance(ThreadPool.class)); } @Test public void testFineGrainedSettingsDontAffectNativeScripts() throws IOException { + ContextAndHeaderHolder contextAndHeaders = new ContextAndHeaderHolder(); Settings.Builder builder = Settings.settingsBuilder(); if (randomBoolean()) { ScriptType scriptType = randomFrom(ScriptType.values()); @@ -87,8 +90,8 @@ public class NativeScriptTests extends ESTestCase { ScriptService scriptService = new ScriptService(settings, environment, scriptEngineServices, resourceWatcherService, scriptContextRegistry); for (ScriptContext scriptContext : scriptContextRegistry.scriptContexts()) { - assertThat(scriptService.compile(new Script("my", ScriptType.INLINE, NativeScriptEngineService.NAME, null), scriptContext), - notNullValue()); + assertThat(scriptService.compile(new Script("my", ScriptType.INLINE, NativeScriptEngineService.NAME, null), scriptContext, + contextAndHeaders), notNullValue()); } } diff --git a/core/src/test/java/org/elasticsearch/script/ScriptServiceTests.java b/core/src/test/java/org/elasticsearch/script/ScriptServiceTests.java index 658a2100886..ff51f186949 100644 --- a/core/src/test/java/org/elasticsearch/script/ScriptServiceTests.java +++ b/core/src/test/java/org/elasticsearch/script/ScriptServiceTests.java @@ -21,6 +21,8 @@ package org.elasticsearch.script; import com.google.common.collect.ImmutableSet; import com.google.common.collect.Maps; +import org.elasticsearch.common.ContextAndHeaderHolder; +import org.elasticsearch.common.HasContextAndHeaders; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.io.Streams; import org.elasticsearch.common.settings.Settings; @@ -106,7 +108,7 @@ public class ScriptServiceTests extends ESTestCase { Environment environment = new Environment(finalSettings); scriptService = new ScriptService(finalSettings, environment, scriptEngineServices, resourceWatcherService, scriptContextRegistry) { @Override - String getScriptFromIndex(String scriptLang, String id) { + String getScriptFromIndex(String scriptLang, String id, HasContextAndHeaders headersContext) { //mock the script that gets retrieved from an index return "100"; } @@ -125,6 +127,8 @@ public class ScriptServiceTests extends ESTestCase { @Test public void testScriptsWithoutExtensions() throws IOException { + + ContextAndHeaderHolder contextAndHeaders = new ContextAndHeaderHolder(); buildScriptService(Settings.EMPTY); logger.info("--> setup two test files one with extension and another without"); Path testFileNoExt = scriptsFilePath.resolve("test_no_ext"); @@ -135,7 +139,7 @@ public class ScriptServiceTests extends ESTestCase { logger.info("--> verify that file with extension was correctly processed"); CompiledScript compiledScript = scriptService.compile(new Script("test_script", ScriptType.FILE, "test", null), - ScriptContext.Standard.SEARCH); + ScriptContext.Standard.SEARCH, contextAndHeaders); assertThat(compiledScript.compiled(), equalTo((Object) "compiled_test_file")); logger.info("--> delete both files"); @@ -145,7 +149,8 @@ public class ScriptServiceTests extends ESTestCase { logger.info("--> verify that file with extension was correctly removed"); try { - scriptService.compile(new Script("test_script", ScriptType.FILE, "test", null), ScriptContext.Standard.SEARCH); + scriptService.compile(new Script("test_script", ScriptType.FILE, "test", null), ScriptContext.Standard.SEARCH, + contextAndHeaders); fail("the script test_script should no longer exist"); } catch (IllegalArgumentException ex) { assertThat(ex.getMessage(), containsString("Unable to find on disk file script [test_script] using lang [test]")); @@ -154,49 +159,56 @@ public class ScriptServiceTests extends ESTestCase { @Test public void testScriptsSameNameDifferentLanguage() throws IOException { + ContextAndHeaderHolder contextAndHeaders = new ContextAndHeaderHolder(); buildScriptService(Settings.EMPTY); createFileScripts("groovy", "expression"); CompiledScript groovyScript = scriptService.compile( - new Script("file_script", ScriptType.FILE, GroovyScriptEngineService.NAME, null), randomFrom(scriptContexts)); + new Script("file_script", ScriptType.FILE, GroovyScriptEngineService.NAME, null), randomFrom(scriptContexts), + contextAndHeaders); assertThat(groovyScript.lang(), equalTo(GroovyScriptEngineService.NAME)); CompiledScript expressionScript = scriptService.compile(new Script("file_script", ScriptType.FILE, ExpressionScriptEngineService.NAME, - null), randomFrom(new ScriptContext[] {ScriptContext.Standard.AGGS, ScriptContext.Standard.SEARCH})); + null), randomFrom(new ScriptContext[] { ScriptContext.Standard.AGGS, + ScriptContext.Standard.SEARCH }), contextAndHeaders); assertThat(expressionScript.lang(), equalTo(ExpressionScriptEngineService.NAME)); } @Test public void testInlineScriptCompiledOnceCache() throws IOException { + ContextAndHeaderHolder contextAndHeaders = new ContextAndHeaderHolder(); buildScriptService(Settings.EMPTY); CompiledScript compiledScript1 = scriptService.compile(new Script("1+1", ScriptType.INLINE, "test", null), - randomFrom(scriptContexts)); + randomFrom(scriptContexts), contextAndHeaders); CompiledScript compiledScript2 = scriptService.compile(new Script("1+1", ScriptType.INLINE, "test", null), - randomFrom(scriptContexts)); + randomFrom(scriptContexts), contextAndHeaders); assertThat(compiledScript1.compiled(), sameInstance(compiledScript2.compiled())); } @Test public void testInlineScriptCompiledOnceMultipleLangAcronyms() throws IOException { + ContextAndHeaderHolder contextAndHeaders = new ContextAndHeaderHolder(); buildScriptService(Settings.EMPTY); CompiledScript compiledScript1 = scriptService.compile(new Script("script", ScriptType.INLINE, "test", null), - randomFrom(scriptContexts)); + randomFrom(scriptContexts), contextAndHeaders); CompiledScript compiledScript2 = scriptService.compile(new Script("script", ScriptType.INLINE, "test2", null), - randomFrom(scriptContexts)); + randomFrom(scriptContexts), contextAndHeaders); assertThat(compiledScript1.compiled(), sameInstance(compiledScript2.compiled())); } @Test public void testFileScriptCompiledOnceMultipleLangAcronyms() throws IOException { + ContextAndHeaderHolder contextAndHeaders = new ContextAndHeaderHolder(); buildScriptService(Settings.EMPTY); createFileScripts("test"); CompiledScript compiledScript1 = scriptService.compile(new Script("file_script", ScriptType.FILE, "test", null), - randomFrom(scriptContexts)); + randomFrom(scriptContexts), contextAndHeaders); CompiledScript compiledScript2 = scriptService.compile(new Script("file_script", ScriptType.FILE, "test2", null), - randomFrom(scriptContexts)); + randomFrom(scriptContexts), contextAndHeaders); assertThat(compiledScript1.compiled(), sameInstance(compiledScript2.compiled())); } @Test public void testDefaultBehaviourFineGrainedSettings() throws IOException { + ContextAndHeaderHolder contextAndHeaders = new ContextAndHeaderHolder(); Settings.Builder builder = Settings.builder(); //rarely inject the default settings, which have no effect if (rarely()) { @@ -213,29 +225,30 @@ public class ScriptServiceTests extends ESTestCase { for (ScriptContext scriptContext : scriptContexts) { //groovy is not sandboxed, only file scripts are enabled by default - assertCompileRejected(GroovyScriptEngineService.NAME, "script", ScriptType.INLINE, scriptContext); - assertCompileRejected(GroovyScriptEngineService.NAME, "script", ScriptType.INDEXED, scriptContext); - assertCompileAccepted(GroovyScriptEngineService.NAME, "file_script", ScriptType.FILE, scriptContext); + assertCompileRejected(GroovyScriptEngineService.NAME, "script", ScriptType.INLINE, scriptContext, contextAndHeaders); + assertCompileRejected(GroovyScriptEngineService.NAME, "script", ScriptType.INDEXED, scriptContext, contextAndHeaders); + assertCompileAccepted(GroovyScriptEngineService.NAME, "file_script", ScriptType.FILE, scriptContext, contextAndHeaders); //expression engine is sandboxed, all scripts are enabled by default if (!scriptContext.getKey().equals(ScriptContext.Standard.MAPPING.getKey()) && !scriptContext.getKey().equals(ScriptContext.Standard.UPDATE.getKey())) { - assertCompileAccepted(ExpressionScriptEngineService.NAME, "script", ScriptType.INLINE, scriptContext); - assertCompileAccepted(ExpressionScriptEngineService.NAME, "script", ScriptType.INDEXED, scriptContext); - assertCompileAccepted(ExpressionScriptEngineService.NAME, "file_script", ScriptType.FILE, scriptContext); + assertCompileAccepted(ExpressionScriptEngineService.NAME, "script", ScriptType.INLINE, scriptContext, contextAndHeaders); + assertCompileAccepted(ExpressionScriptEngineService.NAME, "script", ScriptType.INDEXED, scriptContext, contextAndHeaders); + assertCompileAccepted(ExpressionScriptEngineService.NAME, "file_script", ScriptType.FILE, scriptContext, contextAndHeaders); } //mustache engine is sandboxed, all scripts are enabled by default - assertCompileAccepted(MustacheScriptEngineService.NAME, "script", ScriptType.INLINE, scriptContext); - assertCompileAccepted(MustacheScriptEngineService.NAME, "script", ScriptType.INDEXED, scriptContext); - assertCompileAccepted(MustacheScriptEngineService.NAME, "file_script", ScriptType.FILE, scriptContext); + assertCompileAccepted(MustacheScriptEngineService.NAME, "script", ScriptType.INLINE, scriptContext, contextAndHeaders); + assertCompileAccepted(MustacheScriptEngineService.NAME, "script", ScriptType.INDEXED, scriptContext, contextAndHeaders); + assertCompileAccepted(MustacheScriptEngineService.NAME, "file_script", ScriptType.FILE, scriptContext, contextAndHeaders); //custom engine is sandboxed, all scripts are enabled by default - assertCompileAccepted("test", "script", ScriptType.INLINE, scriptContext); - assertCompileAccepted("test", "script", ScriptType.INDEXED, scriptContext); - assertCompileAccepted("test", "file_script", ScriptType.FILE, scriptContext); + assertCompileAccepted("test", "script", ScriptType.INLINE, scriptContext, contextAndHeaders); + assertCompileAccepted("test", "script", ScriptType.INDEXED, scriptContext, contextAndHeaders); + assertCompileAccepted("test", "file_script", ScriptType.FILE, scriptContext, contextAndHeaders); } } @Test public void testFineGrainedSettings() throws IOException { + ContextAndHeaderHolder contextAndHeaders = new ContextAndHeaderHolder(); //collect the fine-grained settings to set for this run int numScriptSettings = randomIntBetween(0, ScriptType.values().length); Map scriptSourceSettings = new HashMap<>(); @@ -345,16 +358,16 @@ public class ScriptServiceTests extends ESTestCase { for (String lang : scriptEngineService.types()) { switch (scriptMode) { case ON: - assertCompileAccepted(lang, script, scriptType, scriptContext); + assertCompileAccepted(lang, script, scriptType, scriptContext, contextAndHeaders); break; case OFF: - assertCompileRejected(lang, script, scriptType, scriptContext); + assertCompileRejected(lang, script, scriptType, scriptContext, contextAndHeaders); break; case SANDBOX: if (scriptEngineService.sandboxed()) { - assertCompileAccepted(lang, script, scriptType, scriptContext); + assertCompileAccepted(lang, script, scriptType, scriptContext, contextAndHeaders); } else { - assertCompileRejected(lang, script, scriptType, scriptContext); + assertCompileRejected(lang, script, scriptType, scriptContext, contextAndHeaders); } break; } @@ -366,6 +379,7 @@ public class ScriptServiceTests extends ESTestCase { @Test public void testCompileNonRegisteredContext() throws IOException { + ContextAndHeaderHolder contextAndHeaders = new ContextAndHeaderHolder(); buildScriptService(Settings.EMPTY); String pluginName; String unknownContext; @@ -378,7 +392,7 @@ public class ScriptServiceTests extends ESTestCase { for (String type : scriptEngineService.types()) { try { scriptService.compile(new Script("test", randomFrom(ScriptType.values()), type, null), new ScriptContext.Plugin( - pluginName, unknownContext)); + pluginName, unknownContext), contextAndHeaders); fail("script compilation should have been rejected"); } catch(IllegalArgumentException e) { assertThat(e.getMessage(), containsString("script context [" + pluginName + "_" + unknownContext + "] not supported")); @@ -389,15 +403,17 @@ public class ScriptServiceTests extends ESTestCase { @Test public void testCompileCountedInCompilationStats() throws IOException { + ContextAndHeaderHolder contextAndHeaders = new ContextAndHeaderHolder(); buildScriptService(Settings.EMPTY); - scriptService.compile(new Script("1+1", ScriptType.INLINE, "test", null), randomFrom(scriptContexts)); + scriptService.compile(new Script("1+1", ScriptType.INLINE, "test", null), randomFrom(scriptContexts), contextAndHeaders); assertEquals(1L, scriptService.stats().getCompilations()); } @Test public void testExecutableCountedInCompilationStats() throws IOException { + ContextAndHeaderHolder contextAndHeaders = new ContextAndHeaderHolder(); buildScriptService(Settings.EMPTY); - scriptService.executable(new Script("1+1", ScriptType.INLINE, "test", null), randomFrom(scriptContexts)); + scriptService.executable(new Script("1+1", ScriptType.INLINE, "test", null), randomFrom(scriptContexts), contextAndHeaders); assertEquals(1L, scriptService.stats().getCompilations()); } @@ -410,46 +426,52 @@ public class ScriptServiceTests extends ESTestCase { @Test public void testMultipleCompilationsCountedInCompilationStats() throws IOException { + ContextAndHeaderHolder contextAndHeaders = new ContextAndHeaderHolder(); buildScriptService(Settings.EMPTY); int numberOfCompilations = randomIntBetween(1, 1024); for (int i = 0; i < numberOfCompilations; i++) { - scriptService.compile(new Script(i + " + " + i, ScriptType.INLINE, "test", null), randomFrom(scriptContexts)); + scriptService + .compile(new Script(i + " + " + i, ScriptType.INLINE, "test", null), randomFrom(scriptContexts), contextAndHeaders); } assertEquals(numberOfCompilations, scriptService.stats().getCompilations()); } @Test public void testCompilationStatsOnCacheHit() throws IOException { + ContextAndHeaderHolder contextAndHeaders = new ContextAndHeaderHolder(); Settings.Builder builder = Settings.builder(); builder.put(ScriptService.SCRIPT_CACHE_SIZE_SETTING, 1); buildScriptService(builder.build()); - scriptService.executable(new Script("1+1", ScriptType.INLINE, "test", null), randomFrom(scriptContexts)); - scriptService.executable(new Script("1+1", ScriptType.INLINE, "test", null), randomFrom(scriptContexts)); + scriptService.executable(new Script("1+1", ScriptType.INLINE, "test", null), randomFrom(scriptContexts), contextAndHeaders); + scriptService.executable(new Script("1+1", ScriptType.INLINE, "test", null), randomFrom(scriptContexts), contextAndHeaders); assertEquals(1L, scriptService.stats().getCompilations()); } @Test public void testFileScriptCountedInCompilationStats() throws IOException { + ContextAndHeaderHolder contextAndHeaders = new ContextAndHeaderHolder(); buildScriptService(Settings.EMPTY); createFileScripts("test"); - scriptService.compile(new Script("file_script", ScriptType.FILE, "test", null), randomFrom(scriptContexts)); + scriptService.compile(new Script("file_script", ScriptType.FILE, "test", null), randomFrom(scriptContexts), contextAndHeaders); assertEquals(1L, scriptService.stats().getCompilations()); } @Test public void testIndexedScriptCountedInCompilationStats() throws IOException { + ContextAndHeaderHolder contextAndHeaders = new ContextAndHeaderHolder(); buildScriptService(Settings.EMPTY); - scriptService.compile(new Script("script", ScriptType.INDEXED, "test", null), randomFrom(scriptContexts)); + scriptService.compile(new Script("script", ScriptType.INDEXED, "test", null), randomFrom(scriptContexts), contextAndHeaders); assertEquals(1L, scriptService.stats().getCompilations()); } @Test public void testCacheEvictionCountedInCacheEvictionsStats() throws IOException { + ContextAndHeaderHolder contextAndHeaders = new ContextAndHeaderHolder(); Settings.Builder builder = Settings.builder(); builder.put(ScriptService.SCRIPT_CACHE_SIZE_SETTING, 1); buildScriptService(builder.build()); - scriptService.executable(new Script("1+1", ScriptType.INLINE, "test", null), randomFrom(scriptContexts)); - scriptService.executable(new Script("2+2", ScriptType.INLINE, "test", null), randomFrom(scriptContexts)); + scriptService.executable(new Script("1+1", ScriptType.INLINE, "test", null), randomFrom(scriptContexts), contextAndHeaders); + scriptService.executable(new Script("2+2", ScriptType.INLINE, "test", null), randomFrom(scriptContexts), contextAndHeaders); assertEquals(2L, scriptService.stats().getCompilations()); assertEquals(1L, scriptService.stats().getCacheEvictions()); } @@ -462,17 +484,19 @@ public class ScriptServiceTests extends ESTestCase { resourceWatcherService.notifyNow(); } - private void assertCompileRejected(String lang, String script, ScriptType scriptType, ScriptContext scriptContext) { + private void assertCompileRejected(String lang, String script, ScriptType scriptType, ScriptContext scriptContext, + HasContextAndHeaders contextAndHeaders) { try { - scriptService.compile(new Script(script, scriptType, lang, null), scriptContext); + scriptService.compile(new Script(script, scriptType, lang, null), scriptContext, contextAndHeaders); fail("compile should have been rejected for lang [" + lang + "], script_type [" + scriptType + "], scripted_op [" + scriptContext + "]"); } catch(ScriptException e) { //all good } } - private void assertCompileAccepted(String lang, String script, ScriptType scriptType, ScriptContext scriptContext) { - assertThat(scriptService.compile(new Script(script, scriptType, lang, null), scriptContext), notNullValue()); + private void assertCompileAccepted(String lang, String script, ScriptType scriptType, ScriptContext scriptContext, + HasContextAndHeaders contextAndHeaders) { + assertThat(scriptService.compile(new Script(script, scriptType, lang, null), scriptContext, contextAndHeaders), notNullValue()); } public static class TestEngineService implements ScriptEngineService { diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/DateHistogramIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/DateHistogramIT.java index 5b815a40544..04f54000774 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/DateHistogramIT.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/DateHistogramIT.java @@ -50,17 +50,10 @@ import java.util.concurrent.TimeUnit; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; -import static org.elasticsearch.search.aggregations.AggregationBuilders.dateHistogram; -import static org.elasticsearch.search.aggregations.AggregationBuilders.histogram; -import static org.elasticsearch.search.aggregations.AggregationBuilders.max; -import static org.elasticsearch.search.aggregations.AggregationBuilders.stats; -import static org.elasticsearch.search.aggregations.AggregationBuilders.sum; +import static org.elasticsearch.search.aggregations.AggregationBuilders.*; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse; -import static org.hamcrest.Matchers.containsString; -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.greaterThan; -import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.*; import static org.hamcrest.core.IsNull.notNullValue; /** @@ -1260,6 +1253,7 @@ public class DateHistogramIT extends ESIntegTestCase { assertThat(bucket.getDocCount(), equalTo(0l)); } } + internalCluster().wipeIndices("test12278"); } @Test @@ -1343,6 +1337,7 @@ public class DateHistogramIT extends ESIntegTestCase { Histogram histo = response.getAggregations().get("histo"); assertThat(histo.getBuckets().size(), equalTo(1)); assertThat(histo.getBuckets().get(0).getKeyAsString(), equalTo("2014-01-01T00:00:00.000+02:00")); + internalCluster().wipeIndices("test9491"); } public void testIssue8209() throws InterruptedException, ExecutionException { @@ -1367,6 +1362,7 @@ public class DateHistogramIT extends ESIntegTestCase { assertThat(histo.getBuckets().get(2).getDocCount(), equalTo(0L)); assertThat(histo.getBuckets().get(3).getKeyAsString(), equalTo("2014-04-01T00:00:00.000+02:00")); assertThat(histo.getBuckets().get(3).getDocCount(), equalTo(2L)); + internalCluster().wipeIndices("test8209"); } /** diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/SignificantTermsSignificanceScoreIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/SignificantTermsSignificanceScoreIT.java index 0461edef6b4..26cb3a968b0 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/SignificantTermsSignificanceScoreIT.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/SignificantTermsSignificanceScoreIT.java @@ -56,6 +56,7 @@ import org.elasticsearch.search.aggregations.bucket.significant.heuristics.Signi import org.elasticsearch.search.aggregations.bucket.terms.StringTerms; import org.elasticsearch.search.aggregations.bucket.terms.Terms; import org.elasticsearch.search.aggregations.bucket.terms.TermsBuilder; +import org.elasticsearch.search.internal.SearchContext; import org.elasticsearch.test.ESIntegTestCase; import org.junit.Test; @@ -235,7 +236,8 @@ public class SignificantTermsSignificanceScoreIT extends ESIntegTestCase { public static class SimpleHeuristicParser implements SignificanceHeuristicParser { @Override - public SignificanceHeuristic parse(XContentParser parser, ParseFieldMatcher parseFieldMatcher) throws IOException, QueryShardException { + public SignificanceHeuristic parse(XContentParser parser, ParseFieldMatcher parseFieldMatcher, SearchContext context) + throws IOException, QueryShardException { parser.nextToken(); return new SimpleHeuristic(); } diff --git a/core/src/test/java/org/elasticsearch/search/fetch/FetchSubPhasePluginIT.java b/core/src/test/java/org/elasticsearch/search/fetch/FetchSubPhasePluginIT.java index ca98047a590..6635f1e5b52 100644 --- a/core/src/test/java/org/elasticsearch/search/fetch/FetchSubPhasePluginIT.java +++ b/core/src/test/java/org/elasticsearch/search/fetch/FetchSubPhasePluginIT.java @@ -171,7 +171,7 @@ public class FetchSubPhasePluginIT extends ESIntegTestCase { TermsEnum terms = termVector.getFields().terms(field).iterator(); BytesRef term; while ((term = terms.next()) != null) { - tv.put(term.utf8ToString(), terms.postings(null, null, PostingsEnum.ALL).freq()); + tv.put(term.utf8ToString(), terms.postings(null, PostingsEnum.ALL).freq()); } hitField.values().add(tv); } catch (IOException e) { diff --git a/core/src/test/java/org/elasticsearch/search/functionscore/FunctionScoreFieldValueIT.java b/core/src/test/java/org/elasticsearch/search/functionscore/FunctionScoreFieldValueIT.java index 577b3eaea31..24d06701b40 100644 --- a/core/src/test/java/org/elasticsearch/search/functionscore/FunctionScoreFieldValueIT.java +++ b/core/src/test/java/org/elasticsearch/search/functionscore/FunctionScoreFieldValueIT.java @@ -104,6 +104,15 @@ public class FunctionScoreFieldValueIT extends ESIntegTestCase { .get(); assertOrderedSearchHits(response, "1", "2", "3"); + // field is not mapped but we're defaulting it to 100 so all documents should have the same score + response = client().prepareSearch("test") + .setExplain(randomBoolean()) + .setQuery(functionScoreQuery(matchAllQuery(), + fieldValueFactorFunction("notmapped").modifier(FieldValueFactorFunction.Modifier.RECIPROCAL).missing(100))) + .get(); + assertEquals(response.getHits().getAt(0).score(), response.getHits().getAt(2).score(), 0); + + // n divided by 0 is infinity, which should provoke an exception. try { response = client().prepareSearch("test") diff --git a/core/src/test/java/org/elasticsearch/search/highlight/HighlighterSearchIT.java b/core/src/test/java/org/elasticsearch/search/highlight/HighlighterSearchIT.java index 2509c32f92b..61c4dc78506 100644 --- a/core/src/test/java/org/elasticsearch/search/highlight/HighlighterSearchIT.java +++ b/core/src/test/java/org/elasticsearch/search/highlight/HighlighterSearchIT.java @@ -1400,6 +1400,7 @@ public class HighlighterSearchIT extends ESIntegTestCase { } @Test + @AwaitsFix(bugUrl="Broken now that BoostingQuery does not extend BooleanQuery anymore") public void testBoostingQueryTermVector() throws IOException { assertAcked(prepareCreate("test").addMapping("type1", type1TermVectorMapping())); ensureGreen(); @@ -1540,7 +1541,7 @@ public class HighlighterSearchIT extends ESIntegTestCase { .fragmentSize(-1).numOfFragments(2).fragmenter("simple")).get(); assertHighlight(response, 0, "tags", 0, equalTo("this is a really long tag i would like to highlight")); - assertHighlight(response, 0, "tags", 1, 2, equalTo("here is another one that is very long tag and has the tag token near the end")); + assertHighlight(response, 0, "tags", 1, 2, equalTo("here is another one that is very long tag and has the tag token near the end")); response = client().prepareSearch("test") .setQuery(QueryBuilders.matchQuery("tags", "long tag").type(MatchQueryBuilder.Type.PHRASE)) @@ -1548,7 +1549,7 @@ public class HighlighterSearchIT extends ESIntegTestCase { .fragmentSize(-1).numOfFragments(2).fragmenter("span")).get(); assertHighlight(response, 0, "tags", 0, equalTo("this is a really long tag i would like to highlight")); - assertHighlight(response, 0, "tags", 1, 2, equalTo("here is another one that is very long tag and has the tag token near the end")); + assertHighlight(response, 0, "tags", 1, 2, equalTo("here is another one that is very long tag and has the tag token near the end")); assertFailures(client().prepareSearch("test") .setQuery(QueryBuilders.matchQuery("tags", "long tag").type(MatchQueryBuilder.Type.PHRASE)) @@ -2048,7 +2049,7 @@ public class HighlighterSearchIT extends ESIntegTestCase { searchResponse = client().search(searchRequest("test").source(source)).actionGet(); - assertHighlight(searchResponse, 0, "field2", 0, 1, equalTo("The quick brown fox jumps over the lazy quick dog")); + assertHighlight(searchResponse, 0, "field2", 0, 1, equalTo("The quick brown fox jumps over the lazy quick dog")); } @Test @@ -2555,6 +2556,7 @@ public class HighlighterSearchIT extends ESIntegTestCase { } @Test + @AwaitsFix(bugUrl="Broken now that BoostingQuery does not extend BooleanQuery anymore") public void testFastVectorHighlighterPhraseBoost() throws Exception { assertAcked(prepareCreate("test").addMapping("type1", type1TermVectorMapping())); phraseBoostTestCase("fvh"); diff --git a/core/src/test/java/org/elasticsearch/search/suggest/CompletionTokenStreamTest.java b/core/src/test/java/org/elasticsearch/search/suggest/CompletionTokenStreamTest.java index 53e17966968..fde5037b850 100644 --- a/core/src/test/java/org/elasticsearch/search/suggest/CompletionTokenStreamTest.java +++ b/core/src/test/java/org/elasticsearch/search/suggest/CompletionTokenStreamTest.java @@ -54,7 +54,7 @@ public class CompletionTokenStreamTest extends ESTokenStreamTestCase { TokenStream suggestTokenStream = new ByteTermAttrToCharTermAttrFilter(new CompletionTokenStream(tokenStream, payload, new CompletionTokenStream.ToFiniteStrings() { @Override public Set toFiniteStrings(TokenStream stream) throws IOException { - return suggester.toFiniteStrings(suggester.getTokenStreamToAutomaton(), stream); + return suggester.toFiniteStrings(stream); } })); assertTokenStreamContents(suggestTokenStream, new String[] {"mykeyword"}, null, null, new String[] {"Surface keyword|friggin payload|10"}, new int[] { 1 }, null, null); @@ -73,7 +73,7 @@ public class CompletionTokenStreamTest extends ESTokenStreamTestCase { TokenStream suggestTokenStream = new ByteTermAttrToCharTermAttrFilter(new CompletionTokenStream(filter, payload, new CompletionTokenStream.ToFiniteStrings() { @Override public Set toFiniteStrings(TokenStream stream) throws IOException { - return suggester.toFiniteStrings(suggester.getTokenStreamToAutomaton(), stream); + return suggester.toFiniteStrings(stream); } })); assertTokenStreamContents(suggestTokenStream, new String[] {"mysynonym", "mykeyword"}, null, null, new String[] {"Surface keyword|friggin payload|10", "Surface keyword|friggin payload|10"}, new int[] { 2, 0 }, null, null); @@ -97,7 +97,7 @@ public class CompletionTokenStreamTest extends ESTokenStreamTestCase { TokenStream suggestTokenStream = new CompletionTokenStream(filter, new BytesRef("Surface keyword|friggin payload|10"), new CompletionTokenStream.ToFiniteStrings() { @Override public Set toFiniteStrings(TokenStream stream) throws IOException { - Set finiteStrings = suggester.toFiniteStrings(suggester.getTokenStreamToAutomaton(), stream); + Set finiteStrings = suggester.toFiniteStrings(stream); return finiteStrings; } }); @@ -137,7 +137,7 @@ public class CompletionTokenStreamTest extends ESTokenStreamTestCase { TokenStream suggestTokenStream = new CompletionTokenStream(filter, new BytesRef("Surface keyword|friggin payload|10"), new CompletionTokenStream.ToFiniteStrings() { @Override public Set toFiniteStrings(TokenStream stream) throws IOException { - Set finiteStrings = suggester.toFiniteStrings(suggester.getTokenStreamToAutomaton(), stream); + Set finiteStrings = suggester.toFiniteStrings(stream); return finiteStrings; } }); @@ -156,17 +156,15 @@ public class CompletionTokenStreamTest extends ESTokenStreamTestCase { TokenStream suggestTokenStream = new ByteTermAttrToCharTermAttrFilter(new CompletionTokenStream(tokenizer, payload, new CompletionTokenStream.ToFiniteStrings() { @Override public Set toFiniteStrings(TokenStream stream) throws IOException { - return suggester.toFiniteStrings(suggester.getTokenStreamToAutomaton(), stream); + return suggester.toFiniteStrings(stream); } })); TermToBytesRefAttribute termAtt = suggestTokenStream.getAttribute(TermToBytesRefAttribute.class); - BytesRef ref = termAtt.getBytesRef(); - assertNotNull(ref); + assertNotNull(termAtt.getBytesRef()); suggestTokenStream.reset(); while (suggestTokenStream.incrementToken()) { - termAtt.fillBytesRef(); - assertThat(ref.utf8ToString(), equalTo("mykeyword")); + assertThat(termAtt.getBytesRef().utf8ToString(), equalTo("mykeyword")); } suggestTokenStream.end(); suggestTokenStream.close(); diff --git a/core/src/test/java/org/elasticsearch/search/suggest/CustomSuggester.java b/core/src/test/java/org/elasticsearch/search/suggest/CustomSuggester.java index e3dfe3b96d3..245a561c339 100644 --- a/core/src/test/java/org/elasticsearch/search/suggest/CustomSuggester.java +++ b/core/src/test/java/org/elasticsearch/search/suggest/CustomSuggester.java @@ -20,6 +20,7 @@ package org.elasticsearch.search.suggest; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.util.CharsRefBuilder; +import org.elasticsearch.common.HasContextAndHeaders; import org.elasticsearch.common.text.StringText; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.mapper.MapperService; @@ -59,7 +60,8 @@ public class CustomSuggester extends Suggester options = parser.map(); CustomSuggestionsContext suggestionContext = new CustomSuggestionsContext(CustomSuggester.this, options); suggestionContext.setField((String) options.get("field")); diff --git a/core/src/test/java/org/elasticsearch/search/suggest/SuggestSearchIT.java b/core/src/test/java/org/elasticsearch/search/suggest/SuggestSearchIT.java index d00658709bb..aae7800c086 100644 --- a/core/src/test/java/org/elasticsearch/search/suggest/SuggestSearchIT.java +++ b/core/src/test/java/org/elasticsearch/search/suggest/SuggestSearchIT.java @@ -20,7 +20,6 @@ package org.elasticsearch.search.suggest; import com.google.common.base.Charsets; -import com.google.common.collect.ImmutableList; import com.google.common.io.Resources; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.admin.indices.create.CreateIndexRequestBuilder; @@ -967,7 +966,7 @@ public class SuggestSearchIT extends ESIntegTestCase { assertAcked(builder.addMapping("type1", mapping)); ensureGreen(); - ImmutableList.Builder titles = ImmutableList.builder(); + List titles = new ArrayList<>(); // We're going to be searching for: // united states house of representatives elections in washington 2006 @@ -1058,7 +1057,7 @@ public class SuggestSearchIT extends ESIntegTestCase { } List builders = new ArrayList<>(); - for (String title: titles.build()) { + for (String title: titles) { builders.add(client().prepareIndex("test", "type1").setSource("title", title)); } indexRandom(true, builders); @@ -1113,7 +1112,7 @@ public class SuggestSearchIT extends ESIntegTestCase { assertAcked(builder.addMapping("type1", mapping)); ensureGreen(); - ImmutableList.Builder titles = ImmutableList.builder(); + List titles = new ArrayList<>(); titles.add("United States House of Representatives Elections in Washington 2006"); titles.add("United States House of Representatives Elections in Washington 2005"); @@ -1123,7 +1122,7 @@ public class SuggestSearchIT extends ESIntegTestCase { titles.add("Election"); List builders = new ArrayList<>(); - for (String title: titles.build()) { + for (String title: titles) { builders.add(client().prepareIndex("test", "type1").setSource("title", title)); } indexRandom(true, builders); diff --git a/core/src/test/java/org/elasticsearch/search/suggest/completion/AnalyzingCompletionLookupProviderV1.java b/core/src/test/java/org/elasticsearch/search/suggest/completion/AnalyzingCompletionLookupProviderV1.java index 23f92bd7ed3..eb78b6599d6 100644 --- a/core/src/test/java/org/elasticsearch/search/suggest/completion/AnalyzingCompletionLookupProviderV1.java +++ b/core/src/test/java/org/elasticsearch/search/suggest/completion/AnalyzingCompletionLookupProviderV1.java @@ -154,7 +154,7 @@ public class AnalyzingCompletionLookupProviderV1 extends CompletionLookupProvide if (term == null) { break; } - docsEnum = termsEnum.postings(null, docsEnum, PostingsEnum.PAYLOADS); + docsEnum = termsEnum.postings(docsEnum, PostingsEnum.PAYLOADS); builder.startTerm(term); int docFreq = 0; while (docsEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) { @@ -330,6 +330,6 @@ public class AnalyzingCompletionLookupProviderV1 extends CompletionLookupProvide @Override public Set toFiniteStrings(TokenStream stream) throws IOException { - return prototype.toFiniteStrings(prototype.getTokenStreamToAutomaton(), stream); + return prototype.toFiniteStrings(stream); } } \ No newline at end of file diff --git a/core/src/test/java/org/elasticsearch/search/suggest/completion/CompletionPostingsFormatTest.java b/core/src/test/java/org/elasticsearch/search/suggest/completion/CompletionPostingsFormatTest.java index 0bbd1cef8bf..35a222a75e1 100644 --- a/core/src/test/java/org/elasticsearch/search/suggest/completion/CompletionPostingsFormatTest.java +++ b/core/src/test/java/org/elasticsearch/search/suggest/completion/CompletionPostingsFormatTest.java @@ -23,7 +23,7 @@ import org.apache.lucene.analysis.standard.StandardAnalyzer; import org.apache.lucene.codecs.Codec; import org.apache.lucene.codecs.FieldsConsumer; import org.apache.lucene.codecs.PostingsFormat; -import org.apache.lucene.codecs.lucene50.Lucene50Codec; +import org.apache.lucene.codecs.lucene53.Lucene53Codec; import org.apache.lucene.document.Document; import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.Fields; @@ -44,7 +44,6 @@ import org.apache.lucene.store.IOContext; import org.apache.lucene.store.IndexInput; import org.apache.lucene.store.IndexOutput; import org.apache.lucene.store.RAMDirectory; -import org.apache.lucene.util.Bits; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.LineFileDocs; import org.elasticsearch.Version; @@ -282,7 +281,7 @@ public class CompletionPostingsFormatTest extends ESTestCase { public Lookup buildAnalyzingLookup(final CompletionFieldMapper mapper, String[] terms, String[] surfaces, long[] weights) throws IOException { RAMDirectory dir = new RAMDirectory(); - Codec codec = new Lucene50Codec() { + Codec codec = new Lucene53Codec() { public PostingsFormat getPostingsFormatForField(String field) { final PostingsFormat in = super.getPostingsFormatForField(field); return mapper.fieldType().postingsFormat(in); @@ -401,13 +400,13 @@ public class CompletionPostingsFormatTest extends ESTestCase { } @Override - public PostingsEnum postings(Bits liveDocs, PostingsEnum reuse, int flags) throws IOException { + public PostingsEnum postings(PostingsEnum reuse, int flags) throws IOException { final TermPosAndPayload data = current; return new PostingsEnum() { boolean done = false; @Override public int nextPosition() throws IOException { - return current.pos; + return data.pos; } @Override @@ -422,7 +421,7 @@ public class CompletionPostingsFormatTest extends ESTestCase { @Override public BytesRef getPayload() throws IOException { - return current.payload; + return data.payload; } @Override diff --git a/core/src/test/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java b/core/src/test/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java index 3454cd2561b..96c563cb604 100644 --- a/core/src/test/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java +++ b/core/src/test/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java @@ -21,7 +21,6 @@ package org.elasticsearch.snapshots; import com.carrotsearch.hppc.IntHashSet; import com.carrotsearch.hppc.IntSet; -import com.google.common.collect.ImmutableList; import com.google.common.util.concurrent.ListenableFuture; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.action.ListenableActionFuture; @@ -482,14 +481,14 @@ public class DedicatedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTest @Override public void run() { SnapshotsStatusResponse snapshotsStatusResponse = client().admin().cluster().prepareSnapshotStatus("test-repo").setSnapshots("test-snap-2").get(); - ImmutableList snapshotStatuses = snapshotsStatusResponse.getSnapshots(); + List snapshotStatuses = snapshotsStatusResponse.getSnapshots(); assertEquals(snapshotStatuses.size(), 1); logger.trace("current snapshot status [{}]", snapshotStatuses.get(0)); assertTrue(snapshotStatuses.get(0).getState().completed()); } }, 1, TimeUnit.MINUTES); SnapshotsStatusResponse snapshotsStatusResponse = client().admin().cluster().prepareSnapshotStatus("test-repo").setSnapshots("test-snap-2").get(); - ImmutableList snapshotStatuses = snapshotsStatusResponse.getSnapshots(); + List snapshotStatuses = snapshotsStatusResponse.getSnapshots(); assertThat(snapshotStatuses.size(), equalTo(1)); SnapshotStatus snapshotStatus = snapshotStatuses.get(0); logger.info("State: [{}], Reason: [{}]", createSnapshotResponse.getSnapshotInfo().state(), createSnapshotResponse.getSnapshotInfo().reason()); diff --git a/core/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java b/core/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java index c3d3f0ff896..b327c064e2e 100644 --- a/core/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java +++ b/core/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java @@ -20,7 +20,6 @@ package org.elasticsearch.snapshots; import com.google.common.base.Predicate; -import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; import org.apache.lucene.util.IOUtils; import org.elasticsearch.ExceptionsHelper; @@ -74,6 +73,7 @@ import java.nio.file.Path; import java.nio.file.StandardOpenOption; import java.util.ArrayList; import java.util.Arrays; +import java.util.Collections; import java.util.List; import java.util.concurrent.CountDownLatch; import java.util.concurrent.ExecutionException; @@ -1832,9 +1832,9 @@ public class SharedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTestCas shards.put(new ShardId("test-idx", 0), new ShardSnapshotStatus("unknown-node", State.ABORTED)); shards.put(new ShardId("test-idx", 1), new ShardSnapshotStatus("unknown-node", State.ABORTED)); shards.put(new ShardId("test-idx", 2), new ShardSnapshotStatus("unknown-node", State.ABORTED)); - ImmutableList.Builder entries = ImmutableList.builder(); - entries.add(new Entry(new SnapshotId("test-repo", "test-snap"), true, State.ABORTED, ImmutableList.of("test-idx"), System.currentTimeMillis(), shards.build())); - return ClusterState.builder(currentState).putCustom(SnapshotsInProgress.TYPE, new SnapshotsInProgress(entries.build())).build(); + List entries = new ArrayList<>(); + entries.add(new Entry(new SnapshotId("test-repo", "test-snap"), true, State.ABORTED, Collections.singletonList("test-idx"), System.currentTimeMillis(), shards.build())); + return ClusterState.builder(currentState).putCustom(SnapshotsInProgress.TYPE, new SnapshotsInProgress(Collections.unmodifiableList(entries))).build(); } @Override diff --git a/core/src/test/java/org/elasticsearch/snapshots/SnapshotUtilsTests.java b/core/src/test/java/org/elasticsearch/snapshots/SnapshotUtilsTests.java index 76f057a60ff..8e9d7cb8428 100644 --- a/core/src/test/java/org/elasticsearch/snapshots/SnapshotUtilsTests.java +++ b/core/src/test/java/org/elasticsearch/snapshots/SnapshotUtilsTests.java @@ -18,14 +18,13 @@ */ package org.elasticsearch.snapshots; -import com.google.common.collect.ImmutableList; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.test.ESTestCase; import org.junit.Test; +import java.util.Arrays; import java.util.List; -import static org.hamcrest.MatcherAssert.assertThat; import static org.hamcrest.Matchers.containsInAnyOrder; /** @@ -52,7 +51,7 @@ public class SnapshotUtilsTests extends ESTestCase { } private void assertIndexNameFiltering(String[] indices, String[] filter, IndicesOptions indicesOptions, String[] expected) { - List indicesList = ImmutableList.copyOf(indices); + List indicesList = Arrays.asList(indices); List actual = SnapshotUtils.filterIndices(indicesList, filter, indicesOptions); assertThat(actual, containsInAnyOrder(expected)); } diff --git a/core/src/test/java/org/elasticsearch/test/ESIntegTestCase.java b/core/src/test/java/org/elasticsearch/test/ESIntegTestCase.java index 5327f4e6325..35dd8cabecb 100644 --- a/core/src/test/java/org/elasticsearch/test/ESIntegTestCase.java +++ b/core/src/test/java/org/elasticsearch/test/ESIntegTestCase.java @@ -218,7 +218,7 @@ import static org.hamcrest.Matchers.*; * This class supports the following system properties (passed with -Dkey=value to the application) *

@@ -268,6 +268,15 @@ public abstract class ESIntegTestCase extends ESTestCase { */ public static final String SETTING_INDEX_SEED = "index.tests.seed"; + /** + * A boolean value to enable or disable mock modules. This is useful to test the + * system without asserting modules that to make sure they don't hide any bugs in + * production. + * + * @see ESIntegTestCase + */ + public static final String TESTS_ENABLE_MOCK_MODULES = "tests.enable_mock_modules"; + /** * Threshold at which indexing switches from frequently async to frequently bulk. */ @@ -1739,7 +1748,7 @@ public abstract class ESIntegTestCase extends ESTestCase { throw new IllegalArgumentException("port is not valid, expected number but was [" + split[1] + "]"); } } - return new ExternalTestCluster(createTempDir(), externalClusterClientSettings(), transportAddresses); + return new ExternalTestCluster(createTempDir(), externalClusterClientSettings(), transportClientPlugins(), transportAddresses); } protected Settings externalClusterClientSettings() { @@ -1806,9 +1815,14 @@ public abstract class ESIntegTestCase extends ESTestCase { nodeMode = "local"; } + boolean enableMockModules = enableMockModules(); return new InternalTestCluster(nodeMode, seed, createTempDir(), minNumDataNodes, maxNumDataNodes, InternalTestCluster.clusterName(scope.name(), seed) + "-cluster", nodeConfigurationSource, getNumClientNodes(), - InternalTestCluster.DEFAULT_ENABLE_HTTP_PIPELINING, nodePrefix); + InternalTestCluster.DEFAULT_ENABLE_HTTP_PIPELINING, nodePrefix, enableMockModules); + } + + protected boolean enableMockModules() { + return RandomizedTest.systemPropertyAsBoolean(TESTS_ENABLE_MOCK_MODULES, true); } /** diff --git a/core/src/test/java/org/elasticsearch/test/ESTestCase.java b/core/src/test/java/org/elasticsearch/test/ESTestCase.java index 7e1be64908f..3057982df1a 100644 --- a/core/src/test/java/org/elasticsearch/test/ESTestCase.java +++ b/core/src/test/java/org/elasticsearch/test/ESTestCase.java @@ -21,7 +21,6 @@ package org.elasticsearch.test; import com.carrotsearch.randomizedtesting.RandomizedContext; import com.carrotsearch.randomizedtesting.RandomizedTest; import com.carrotsearch.randomizedtesting.annotations.Listeners; -import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters; import com.carrotsearch.randomizedtesting.annotations.ThreadLeakLingering; import com.carrotsearch.randomizedtesting.annotations.ThreadLeakScope; import com.carrotsearch.randomizedtesting.annotations.ThreadLeakScope.Scope; @@ -92,10 +91,6 @@ import static org.hamcrest.Matchers.equalTo; LoggingListener.class, AssertionErrorThreadDumpPrinter.class }) -// remove this entire annotation on upgrade to 5.3! -@ThreadLeakFilters(defaultFilters = true, filters = { - IBMJ9HackThreadFilters.class, -}) @ThreadLeakScope(Scope.SUITE) @ThreadLeakLingering(linger = 5000) // 5 sec lingering @TimeoutSuite(millis = 20 * TimeUnits.MINUTE) diff --git a/core/src/test/java/org/elasticsearch/test/ESTokenStreamTestCase.java b/core/src/test/java/org/elasticsearch/test/ESTokenStreamTestCase.java index 685b158862f..29a1a3362d9 100644 --- a/core/src/test/java/org/elasticsearch/test/ESTokenStreamTestCase.java +++ b/core/src/test/java/org/elasticsearch/test/ESTokenStreamTestCase.java @@ -20,7 +20,6 @@ package org.elasticsearch.test; import com.carrotsearch.randomizedtesting.annotations.Listeners; -import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters; import com.carrotsearch.randomizedtesting.annotations.TimeoutSuite; import org.apache.lucene.analysis.BaseTokenStreamTestCase; @@ -35,10 +34,6 @@ import org.elasticsearch.test.junit.listeners.ReproduceInfoPrinter; @Listeners({ ReproduceInfoPrinter.class }) -//remove this entire annotation on upgrade to 5.3! -@ThreadLeakFilters(defaultFilters = true, filters = { - IBMJ9HackThreadFilters.class, -}) @TimeoutSuite(millis = TimeUnits.HOUR) @LuceneTestCase.SuppressReproduceLine @LuceneTestCase.SuppressSysoutChecks(bugUrl = "we log a lot on purpose") diff --git a/core/src/test/java/org/elasticsearch/test/ExternalTestCluster.java b/core/src/test/java/org/elasticsearch/test/ExternalTestCluster.java index 9a919a391cb..90ca7818b9f 100644 --- a/core/src/test/java/org/elasticsearch/test/ExternalTestCluster.java +++ b/core/src/test/java/org/elasticsearch/test/ExternalTestCluster.java @@ -33,10 +33,12 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.InetSocketTransportAddress; import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.node.internal.InternalSettingsPreparer; +import org.elasticsearch.plugins.Plugin; import java.io.IOException; import java.net.InetSocketAddress; import java.nio.file.Path; +import java.util.Collection; import java.util.Collections; import java.util.Iterator; import java.util.concurrent.atomic.AtomicInteger; @@ -65,7 +67,7 @@ public final class ExternalTestCluster extends TestCluster { private final int numDataNodes; private final int numMasterAndDataNodes; - public ExternalTestCluster(Path tempDir, Settings additionalSettings, TransportAddress... transportAddresses) { + public ExternalTestCluster(Path tempDir, Settings additionalSettings, Collection> pluginClasses, TransportAddress... transportAddresses) { super(0); Settings clientSettings = Settings.settingsBuilder() .put(additionalSettings) @@ -75,7 +77,11 @@ public final class ExternalTestCluster extends TestCluster { .put("path.home", tempDir) .put("node.mode", "network").build(); // we require network here! - this.client = TransportClient.builder().settings(clientSettings).build().addTransportAddresses(transportAddresses); + TransportClient.Builder transportClientBuilder = TransportClient.builder().settings(clientSettings); + for (Class pluginClass : pluginClasses) { + transportClientBuilder.addPlugin(pluginClass); + } + this.client = transportClientBuilder.build().addTransportAddresses(transportAddresses); NodesInfoResponse nodeInfos = this.client.admin().cluster().prepareNodesInfo().clear().setSettings(true).setHttp(true).get(); httpAddresses = new InetSocketAddress[nodeInfos.getNodes().length]; diff --git a/core/src/test/java/org/elasticsearch/test/IBMJ9HackThreadFilters.java b/core/src/test/java/org/elasticsearch/test/IBMJ9HackThreadFilters.java deleted file mode 100644 index 45c8277dc02..00000000000 --- a/core/src/test/java/org/elasticsearch/test/IBMJ9HackThreadFilters.java +++ /dev/null @@ -1,53 +0,0 @@ -package org.elasticsearch.test; - -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -import com.carrotsearch.randomizedtesting.ThreadFilter; - -import org.apache.lucene.util.Constants; -import org.apache.lucene.util.Version; - -/** temporary workaround for https://issues.apache.org/jira/browse/LUCENE-6518 - * remove me on upgrade to 5.3! I am just an updated version of QuickPatchThreadFilters from lucene */ -public class IBMJ9HackThreadFilters implements ThreadFilter { - static final boolean isJ9; - - static { - assert Version.LATEST.equals(Version.LUCENE_5_2_1) : "please remove this entire class for 5.3"; - isJ9 = Constants.JAVA_VENDOR.startsWith("IBM"); - } - - @Override - public boolean reject(Thread t) { - if (isJ9) { - // LUCENE-6518 - if ("ClassCache Reaper".equals(t.getName())) { - return true; - } - - // LUCENE-4736 - StackTraceElement [] stack = t.getStackTrace(); - if (stack.length > 0 && stack[stack.length - 1].getClassName().equals("java.util.Timer$TimerImpl")) { - return true; - } - } - return false; - } -} diff --git a/core/src/test/java/org/elasticsearch/test/InternalTestCluster.java b/core/src/test/java/org/elasticsearch/test/InternalTestCluster.java index c6e81f5f80d..07aef2125d4 100644 --- a/core/src/test/java/org/elasticsearch/test/InternalTestCluster.java +++ b/core/src/test/java/org/elasticsearch/test/InternalTestCluster.java @@ -153,15 +153,6 @@ public final class InternalTestCluster extends TestCluster { static NodeConfigurationSource DEFAULT_SETTINGS_SOURCE = NodeConfigurationSource.EMPTY; - /** - * A boolean value to enable or disable mock modules. This is useful to test the - * system without asserting modules that to make sure they don't hide any bugs in - * production. - * - * @see ESIntegTestCase - */ - public static final String TESTS_ENABLE_MOCK_MODULES = "tests.enable_mock_modules"; - /** * A node level setting that holds a per node random seed that is consistent across node restarts */ @@ -192,8 +183,6 @@ public final class InternalTestCluster extends TestCluster { public final int HTTP_BASE_PORT = GLOBAL_HTTP_BASE_PORT + CLUSTER_BASE_PORT_OFFSET; - private static final boolean ENABLE_MOCK_MODULES = RandomizedTest.systemPropertyAsBoolean(TESTS_ENABLE_MOCK_MODULES, true); - static final int DEFAULT_MIN_NUM_DATA_NODES = 1; static final int DEFAULT_MAX_NUM_DATA_NODES = TEST_NIGHTLY ? 6 : 3; @@ -229,6 +218,8 @@ public final class InternalTestCluster extends TestCluster { private final ExecutorService executor; + private final boolean enableMockModules; + /** * All nodes started by the cluster will have their name set to nodePrefix followed by a positive number */ @@ -240,7 +231,7 @@ public final class InternalTestCluster extends TestCluster { public InternalTestCluster(String nodeMode, long clusterSeed, Path baseDir, int minNumDataNodes, int maxNumDataNodes, String clusterName, NodeConfigurationSource nodeConfigurationSource, int numClientNodes, - boolean enableHttpPipelining, String nodePrefix) { + boolean enableHttpPipelining, String nodePrefix, boolean enableMockModules) { super(clusterSeed); if ("network".equals(nodeMode) == false && "local".equals(nodeMode) == false) { throw new IllegalArgumentException("Unknown nodeMode: " + nodeMode); @@ -276,6 +267,7 @@ public final class InternalTestCluster extends TestCluster { this.nodePrefix = nodePrefix; assert nodePrefix != null; + this.enableMockModules = enableMockModules; /* * TODO @@ -387,15 +379,15 @@ public final class InternalTestCluster extends TestCluster { private Collection> getPlugins(long seed) { Set> plugins = new HashSet<>(nodeConfigurationSource.nodePlugins()); Random random = new Random(seed); - if (ENABLE_MOCK_MODULES && usually(random)) { + if (enableMockModules && usually(random)) { plugins.add(MockTransportService.TestPlugin.class); plugins.add(MockFSIndexStore.TestPlugin.class); plugins.add(NodeMocksPlugin.class); plugins.add(MockEngineFactoryPlugin.class); plugins.add(MockSearchService.TestPlugin.class); - } - if (isLocalTransportConfigured()) { - plugins.add(AssertingLocalTransport.TestPlugin.class); + if (isLocalTransportConfigured()) { + plugins.add(AssertingLocalTransport.TestPlugin.class); + } } return plugins; } @@ -440,9 +432,6 @@ public final class InternalTestCluster extends TestCluster { if (random.nextBoolean()) { builder.put("indices.fielddata.cache.size", 1 + random.nextInt(1000), ByteSizeUnit.MB); } - if (random.nextBoolean()) { - builder.put("indices.fielddata.cache.expire", TimeValue.timeValueMillis(1 + random.nextInt(10000))); - } } // randomize netty settings diff --git a/core/src/test/java/org/elasticsearch/test/TestSearchContext.java b/core/src/test/java/org/elasticsearch/test/TestSearchContext.java index e6a37dcf02a..48725f4c3e4 100644 --- a/core/src/test/java/org/elasticsearch/test/TestSearchContext.java +++ b/core/src/test/java/org/elasticsearch/test/TestSearchContext.java @@ -94,7 +94,7 @@ public class TestSearchContext extends SearchContext { private final Map subPhaseContexts = new HashMap<>(); public TestSearchContext(ThreadPool threadPool,PageCacheRecycler pageCacheRecycler, BigArrays bigArrays, IndexService indexService) { - super(ParseFieldMatcher.STRICT); + super(ParseFieldMatcher.STRICT, null); this.pageCacheRecycler = pageCacheRecycler; this.bigArrays = bigArrays.withCircuitBreaking(); this.indexService = indexService; @@ -105,7 +105,7 @@ public class TestSearchContext extends SearchContext { } public TestSearchContext() { - super(ParseFieldMatcher.STRICT); + super(ParseFieldMatcher.STRICT, null); this.pageCacheRecycler = null; this.bigArrays = null; this.indexService = null; diff --git a/core/src/test/java/org/elasticsearch/test/VersionUtils.java b/core/src/test/java/org/elasticsearch/test/VersionUtils.java index ebdad0071df..30a89e4fb5f 100644 --- a/core/src/test/java/org/elasticsearch/test/VersionUtils.java +++ b/core/src/test/java/org/elasticsearch/test/VersionUtils.java @@ -19,7 +19,6 @@ package org.elasticsearch.test; -import com.google.common.collect.ImmutableList; import org.elasticsearch.Version; import java.lang.reflect.Field; @@ -53,11 +52,11 @@ public class VersionUtils { } List idList = new ArrayList<>(ids); Collections.sort(idList); - ImmutableList.Builder version = ImmutableList.builder(); + List version = new ArrayList<>(); for (Integer integer : idList) { version.add(Version.fromId(integer)); } - SORTED_VERSIONS = version.build(); + SORTED_VERSIONS = Collections.unmodifiableList(version); } /** Returns immutable list of all known versions. */ diff --git a/core/src/test/java/org/elasticsearch/test/cluster/TestClusterService.java b/core/src/test/java/org/elasticsearch/test/cluster/TestClusterService.java index 6a55fbd2577..162dff76bf8 100644 --- a/core/src/test/java/org/elasticsearch/test/cluster/TestClusterService.java +++ b/core/src/test/java/org/elasticsearch/test/cluster/TestClusterService.java @@ -25,6 +25,7 @@ import org.elasticsearch.cluster.block.ClusterBlock; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.routing.OperationRouting; +import org.elasticsearch.cluster.routing.allocation.decider.AwarenessAllocationDecider; import org.elasticsearch.cluster.service.PendingClusterTask; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Priority; @@ -55,6 +56,7 @@ public class TestClusterService implements ClusterService { private final Queue onGoingTimeouts = ConcurrentCollections.newQueue(); private final ThreadPool threadPool; private final ESLogger logger = Loggers.getLogger(getClass(), Settings.EMPTY); + private final OperationRouting operationRouting = new OperationRouting(Settings.Builder.EMPTY_SETTINGS, new AwarenessAllocationDecider()); public TestClusterService() { this(ClusterState.builder(new ClusterName("test")).build()); @@ -129,7 +131,7 @@ public class TestClusterService implements ClusterService { @Override public OperationRouting operationRouting() { - return null; + return operationRouting; } @Override diff --git a/core/src/test/java/org/elasticsearch/test/disruption/NetworkPartition.java b/core/src/test/java/org/elasticsearch/test/disruption/NetworkPartition.java index 174e83e15a4..88bcb9024a1 100644 --- a/core/src/test/java/org/elasticsearch/test/disruption/NetworkPartition.java +++ b/core/src/test/java/org/elasticsearch/test/disruption/NetworkPartition.java @@ -18,7 +18,6 @@ */ package org.elasticsearch.test.disruption; -import com.google.common.collect.ImmutableList; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.Loggers; @@ -27,6 +26,8 @@ import org.elasticsearch.test.InternalTestCluster; import org.elasticsearch.test.transport.MockTransportService; import org.elasticsearch.transport.TransportService; +import java.util.Collection; +import java.util.Collections; import java.util.HashSet; import java.util.List; import java.util.Random; @@ -68,15 +69,15 @@ public abstract class NetworkPartition implements ServiceDisruptionScheme { } - public List getNodesSideOne() { - return ImmutableList.copyOf(nodesSideOne); + public Collection getNodesSideOne() { + return Collections.unmodifiableCollection(nodesSideOne); } - public List getNodesSideTwo() { - return ImmutableList.copyOf(nodesSideTwo); + public Collection getNodesSideTwo() { + return Collections.unmodifiableCollection(nodesSideTwo); } - public List getMajoritySide() { + public Collection getMajoritySide() { if (nodesSideOne.size() >= nodesSideTwo.size()) { return getNodesSideOne(); } else { @@ -84,7 +85,7 @@ public abstract class NetworkPartition implements ServiceDisruptionScheme { } } - public List getMinoritySide() { + public Collection getMinoritySide() { if (nodesSideOne.size() >= nodesSideTwo.size()) { return getNodesSideTwo(); } else { diff --git a/core/src/test/java/org/elasticsearch/test/engine/ThrowingLeafReaderWrapper.java b/core/src/test/java/org/elasticsearch/test/engine/ThrowingLeafReaderWrapper.java index 365bf7fb652..422b9375a1e 100644 --- a/core/src/test/java/org/elasticsearch/test/engine/ThrowingLeafReaderWrapper.java +++ b/core/src/test/java/org/elasticsearch/test/engine/ThrowingLeafReaderWrapper.java @@ -146,13 +146,13 @@ public class ThrowingLeafReaderWrapper extends FilterLeafReader { } @Override - public PostingsEnum postings(Bits liveDocs, PostingsEnum reuse, int flags) throws IOException { + public PostingsEnum postings(PostingsEnum reuse, int flags) throws IOException { if ((flags & PostingsEnum.POSITIONS) != 0) { thrower.maybeThrow(Flags.DocsAndPositionsEnum); } else { thrower.maybeThrow(Flags.DocsEnum); } - return super.postings(liveDocs, reuse, flags); + return super.postings(reuse, flags); } } diff --git a/core/src/test/java/org/elasticsearch/test/hamcrest/ElasticsearchAssertions.java b/core/src/test/java/org/elasticsearch/test/hamcrest/ElasticsearchAssertions.java index 8678f20ac12..497322ac7ea 100644 --- a/core/src/test/java/org/elasticsearch/test/hamcrest/ElasticsearchAssertions.java +++ b/core/src/test/java/org/elasticsearch/test/hamcrest/ElasticsearchAssertions.java @@ -67,6 +67,7 @@ import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.suggest.Suggest; import org.elasticsearch.test.VersionUtils; import org.elasticsearch.test.rest.client.http.HttpResponse; +import org.hamcrest.CoreMatchers; import org.hamcrest.Matcher; import org.hamcrest.Matchers; import org.junit.Assert; @@ -126,6 +127,22 @@ public class ElasticsearchAssertions { assertBlocked(builder, null); } + /** + * Checks that all shard requests of a replicated brodcast request failed due to a cluster block + * + * @param replicatedBroadcastResponse the response that should only contain failed shard responses + * + * */ + public static void assertBlocked(BroadcastResponse replicatedBroadcastResponse) { + assertThat("all shard requests should have failed", replicatedBroadcastResponse.getFailedShards(), Matchers.equalTo(replicatedBroadcastResponse.getTotalShards())); + for (ShardOperationFailedException exception : replicatedBroadcastResponse.getShardFailures()) { + ClusterBlockException clusterBlockException = (ClusterBlockException) ExceptionsHelper.unwrap(exception.getCause(), ClusterBlockException.class); + assertNotNull("expected the cause of failure to be a ClusterBlockException but got " + exception.getCause().getMessage(), clusterBlockException); + assertThat(clusterBlockException.blocks().size(), greaterThan(0)); + assertThat(clusterBlockException.status(), CoreMatchers.equalTo(RestStatus.FORBIDDEN)); + } + } + /** * Executes the request and fails if the request has not been blocked by a specific {@link ClusterBlock}. * diff --git a/core/src/test/java/org/elasticsearch/test/junit/listeners/ReproduceInfoPrinter.java b/core/src/test/java/org/elasticsearch/test/junit/listeners/ReproduceInfoPrinter.java index 07f09cc2386..0a52a376610 100644 --- a/core/src/test/java/org/elasticsearch/test/junit/listeners/ReproduceInfoPrinter.java +++ b/core/src/test/java/org/elasticsearch/test/junit/listeners/ReproduceInfoPrinter.java @@ -25,8 +25,8 @@ import com.carrotsearch.randomizedtesting.TraceFormatting; import org.elasticsearch.common.Strings; import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.test.InternalTestCluster; import org.junit.internal.AssumptionViolatedException; import org.junit.runner.Description; import org.junit.runner.notification.Failure; @@ -158,7 +158,7 @@ public class ReproduceInfoPrinter extends RunListener { appendProperties("es.logger.level"); if (inVerifyPhase()) { // these properties only make sense for integration tests - appendProperties("es.node.mode", "es.node.local", TESTS_CLUSTER, InternalTestCluster.TESTS_ENABLE_MOCK_MODULES); + appendProperties("es.node.mode", "es.node.local", TESTS_CLUSTER, ESIntegTestCase.TESTS_ENABLE_MOCK_MODULES); } appendProperties("tests.assertion.disabled", "tests.security.manager", "tests.nightly", "tests.jvms", "tests.client.ratio", "tests.heap.size", "tests.bwc", "tests.bwc.version"); diff --git a/core/src/test/java/org/elasticsearch/test/rest/section/ApiCallSection.java b/core/src/test/java/org/elasticsearch/test/rest/section/ApiCallSection.java index a40d2268d43..c5bb281a6e7 100644 --- a/core/src/test/java/org/elasticsearch/test/rest/section/ApiCallSection.java +++ b/core/src/test/java/org/elasticsearch/test/rest/section/ApiCallSection.java @@ -19,11 +19,11 @@ package org.elasticsearch.test.rest.section; import com.google.common.base.Joiner; -import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; import com.google.common.collect.Maps; import java.util.ArrayList; +import java.util.Collections; import java.util.List; import java.util.Map; @@ -58,7 +58,7 @@ public class ApiCallSection { } public List> getBodies() { - return ImmutableList.copyOf(bodies); + return Collections.unmodifiableList(bodies); } public void addBody(Map body) { diff --git a/core/src/test/java/org/elasticsearch/test/test/InternalTestClusterTests.java b/core/src/test/java/org/elasticsearch/test/test/InternalTestClusterTests.java index 2770e3581d2..dcdec27ae33 100644 --- a/core/src/test/java/org/elasticsearch/test/test/InternalTestClusterTests.java +++ b/core/src/test/java/org/elasticsearch/test/test/InternalTestClusterTests.java @@ -53,8 +53,8 @@ public class InternalTestClusterTests extends ESTestCase { String nodePrefix = randomRealisticUnicodeOfCodepointLengthBetween(1, 10); Path baseDir = createTempDir(); - InternalTestCluster cluster0 = new InternalTestCluster("local", clusterSeed, baseDir, minNumDataNodes, maxNumDataNodes, clusterName, nodeConfigurationSource, numClientNodes, enableHttpPipelining, nodePrefix); - InternalTestCluster cluster1 = new InternalTestCluster("local", clusterSeed, baseDir, minNumDataNodes, maxNumDataNodes, clusterName, nodeConfigurationSource, numClientNodes, enableHttpPipelining, nodePrefix); + InternalTestCluster cluster0 = new InternalTestCluster("local", clusterSeed, baseDir, minNumDataNodes, maxNumDataNodes, clusterName, nodeConfigurationSource, numClientNodes, enableHttpPipelining, nodePrefix, true); + InternalTestCluster cluster1 = new InternalTestCluster("local", clusterSeed, baseDir, minNumDataNodes, maxNumDataNodes, clusterName, nodeConfigurationSource, numClientNodes, enableHttpPipelining, nodePrefix, true); // TODO: this is not ideal - we should have a way to make sure ports are initialized in the same way assertClusters(cluster0, cluster1, false); @@ -111,8 +111,8 @@ public class InternalTestClusterTests extends ESTestCase { String nodePrefix = "foobar"; Path baseDir = createTempDir(); - InternalTestCluster cluster0 = new InternalTestCluster("local", clusterSeed, baseDir, minNumDataNodes, maxNumDataNodes, clusterName1, nodeConfigurationSource, numClientNodes, enableHttpPipelining, nodePrefix); - InternalTestCluster cluster1 = new InternalTestCluster("local", clusterSeed, baseDir, minNumDataNodes, maxNumDataNodes, clusterName2, nodeConfigurationSource, numClientNodes, enableHttpPipelining, nodePrefix); + InternalTestCluster cluster0 = new InternalTestCluster("local", clusterSeed, baseDir, minNumDataNodes, maxNumDataNodes, clusterName1, nodeConfigurationSource, numClientNodes, enableHttpPipelining, nodePrefix, true); + InternalTestCluster cluster1 = new InternalTestCluster("local", clusterSeed, baseDir, minNumDataNodes, maxNumDataNodes, clusterName2, nodeConfigurationSource, numClientNodes, enableHttpPipelining, nodePrefix, true); assertClusters(cluster0, cluster1, false); long seed = randomLong(); diff --git a/core/src/test/java/org/elasticsearch/threadpool/ThreadPoolStatsTests.java b/core/src/test/java/org/elasticsearch/threadpool/ThreadPoolStatsTests.java new file mode 100644 index 00000000000..0fc4f4c7a7f --- /dev/null +++ b/core/src/test/java/org/elasticsearch/threadpool/ThreadPoolStatsTests.java @@ -0,0 +1,118 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.threadpool; + +import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.test.ESTestCase; +import org.junit.Test; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; + +import static org.hamcrest.Matchers.contains; +import static org.hamcrest.Matchers.equalTo; + +public class ThreadPoolStatsTests extends ESTestCase { + + @Test + public void testThreadPoolStatsSort() throws IOException { + List stats = new ArrayList<>(); + stats.add(new ThreadPoolStats.Stats("z", -1, 0, 0, 0, 0, 0L)); + stats.add(new ThreadPoolStats.Stats("m", 3, 0, 0, 0, 0, 0L)); + stats.add(new ThreadPoolStats.Stats("m", 1, 0, 0, 0, 0, 0L)); + stats.add(new ThreadPoolStats.Stats("d", -1, 0, 0, 0, 0, 0L)); + stats.add(new ThreadPoolStats.Stats("m", 2, 0, 0, 0, 0, 0L)); + stats.add(new ThreadPoolStats.Stats("t", -1, 0, 0, 0, 0, 0L)); + stats.add(new ThreadPoolStats.Stats("a", -1, 0, 0, 0, 0, 0L)); + + List copy = new ArrayList<>(stats); + Collections.sort(copy); + + List names = new ArrayList<>(copy.size()); + for (ThreadPoolStats.Stats stat : copy) { + names.add(stat.getName()); + } + assertThat(names, contains("a", "d", "m", "m", "m", "t", "z")); + + List threads = new ArrayList<>(copy.size()); + for (ThreadPoolStats.Stats stat : copy) { + threads.add(stat.getThreads()); + } + assertThat(threads, contains(-1, -1, 1, 2, 3,-1,-1)); + } + + @Test + public void testThreadPoolStatsToXContent() throws IOException { + try (BytesStreamOutput os = new BytesStreamOutput()) { + + List stats = new ArrayList<>(); + stats.add(new ThreadPoolStats.Stats(ThreadPool.Names.SUGGEST, -1, 0, 0, 0, 0, 0L)); + stats.add(new ThreadPoolStats.Stats(ThreadPool.Names.SEARCH, -1, 0, 0, 0, 0, 0L)); + stats.add(new ThreadPoolStats.Stats(ThreadPool.Names.WARMER, -1, 0, 0, 0, 0, 0L)); + stats.add(new ThreadPoolStats.Stats(ThreadPool.Names.GENERIC, -1, 0, 0, 0, 0, 0L)); + stats.add(new ThreadPoolStats.Stats(ThreadPool.Names.OPTIMIZE, -1, 0, 0, 0, 0, 0L)); + stats.add(new ThreadPoolStats.Stats(ThreadPool.Names.PERCOLATE, -1, 0, 0, 0, 0, 0L)); + stats.add(new ThreadPoolStats.Stats(ThreadPool.Names.SAME, -1, 0, 0, 0, 0, 0L)); + + + try (XContentBuilder builder = new XContentBuilder(XContentType.JSON.xContent(), os)) { + new ThreadPoolStats(stats).toXContent(builder, ToXContent.EMPTY_PARAMS); + } + + try (XContentParser parser = XContentType.JSON.xContent().createParser(os.bytes())) { + XContentParser.Token token = parser.currentToken(); + assertNull(token); + + token = parser.nextToken(); + assertThat(token, equalTo(XContentParser.Token.VALUE_STRING)); + + token = parser.nextToken(); + assertThat(token, equalTo(XContentParser.Token.START_OBJECT)); + + token = parser.nextToken(); + assertThat(token, equalTo(XContentParser.Token.FIELD_NAME)); + + List names = new ArrayList<>(); + while (token == XContentParser.Token.FIELD_NAME) { + names.add(parser.currentName()); + + token = parser.nextToken(); + assertThat(token, equalTo(XContentParser.Token.START_OBJECT)); + + parser.skipChildren(); + token = parser.nextToken(); + } + assertThat(names, contains(ThreadPool.Names.GENERIC, + ThreadPool.Names.OPTIMIZE, + ThreadPool.Names.PERCOLATE, + ThreadPool.Names.SAME, + ThreadPool.Names.SEARCH, + ThreadPool.Names.SUGGEST, + ThreadPool.Names.WARMER)); + } + } + } +} diff --git a/core/src/test/java/org/elasticsearch/transport/ContextAndHeaderTransportIT.java b/core/src/test/java/org/elasticsearch/transport/ContextAndHeaderTransportIT.java index 3abca9be066..03ac788511a 100644 --- a/core/src/test/java/org/elasticsearch/transport/ContextAndHeaderTransportIT.java +++ b/core/src/test/java/org/elasticsearch/transport/ContextAndHeaderTransportIT.java @@ -27,13 +27,16 @@ import org.elasticsearch.action.ActionModule; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestBuilder; import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.admin.indices.create.CreateIndexRequestBuilder; import org.elasticsearch.action.admin.indices.refresh.RefreshRequest; import org.elasticsearch.action.get.GetRequest; import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.indexedscripts.put.PutIndexedScriptRequest; import org.elasticsearch.action.indexedscripts.put.PutIndexedScriptResponse; import org.elasticsearch.action.percolate.PercolateResponse; import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.action.search.SearchRequestBuilder; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.support.ActionFilter; import org.elasticsearch.action.termvectors.MultiTermVectorsRequest; @@ -44,6 +47,7 @@ import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.inject.Module; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.http.HttpServerTransport; import org.elasticsearch.index.query.BoolQueryBuilder; import org.elasticsearch.index.query.GeoShapeQueryBuilder; @@ -52,9 +56,16 @@ import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.index.query.TermsQueryBuilder; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.rest.RestController; +import org.elasticsearch.script.Script; import org.elasticsearch.script.ScriptService; +import org.elasticsearch.script.ScriptService.ScriptType; +import org.elasticsearch.script.Template; import org.elasticsearch.script.groovy.GroovyScriptEngineService; import org.elasticsearch.script.mustache.MustacheScriptEngineService; +import org.elasticsearch.search.aggregations.AggregationBuilders; +import org.elasticsearch.search.aggregations.pipeline.PipelineAggregatorBuilders; +import org.elasticsearch.search.suggest.Suggest; +import org.elasticsearch.search.suggest.phrase.PhraseSuggestionBuilder; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.ESIntegTestCase.ClusterScope; import org.elasticsearch.test.rest.client.http.HttpRequestBuilder; @@ -64,6 +75,7 @@ import org.junit.Before; import org.junit.Test; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.HashMap; @@ -77,11 +89,14 @@ import static org.elasticsearch.common.settings.Settings.settingsBuilder; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.elasticsearch.node.Node.HTTP_ENABLED; import static org.elasticsearch.rest.RestStatus.OK; +import static org.elasticsearch.search.suggest.SuggestBuilders.phraseSuggestion; import static org.elasticsearch.test.ESIntegTestCase.Scope.SUITE; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSuggestionSize; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.hasStatus; +import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.is; @@ -273,6 +288,59 @@ public class ContextAndHeaderTransportIT extends ESIntegTestCase { assertRequestsContainHeader(PutIndexedScriptRequest.class); } + @Test + public void testThatIndexedScriptGetRequestInTemplateQueryContainsContextAndHeaders() throws Exception { + PutIndexedScriptResponse scriptResponse = transportClient() + .preparePutIndexedScript( + MustacheScriptEngineService.NAME, + "my_script", + jsonBuilder().startObject().field("script", "{ \"query\": { \"match\": { \"name\": \"Star Wars\" }}}").endObject() + .string()).get(); + assertThat(scriptResponse.isCreated(), is(true)); + + transportClient().prepareIndex(queryIndex, "type", "1") + .setSource(jsonBuilder().startObject().field("name", "Star Wars - The new republic").endObject()).get(); + transportClient().admin().indices().prepareRefresh(queryIndex).get(); + + SearchResponse searchResponse = transportClient() + .prepareSearch(queryIndex) + .setQuery( + QueryBuilders.templateQuery(new Template("my_script", ScriptType.INDEXED, + MustacheScriptEngineService.NAME, null, null))).get(); + assertNoFailures(searchResponse); + assertHitCount(searchResponse, 1); + + assertGetRequestsContainHeaders(".scripts"); + assertRequestsContainHeader(PutIndexedScriptRequest.class); + } + + @Test + public void testThatIndexedScriptGetRequestInReducePhaseContainsContextAndHeaders() throws Exception { + PutIndexedScriptResponse scriptResponse = transportClient().preparePutIndexedScript(GroovyScriptEngineService.NAME, "my_script", + jsonBuilder().startObject().field("script", "_value0 * 10").endObject().string()).get(); + assertThat(scriptResponse.isCreated(), is(true)); + + transportClient().prepareIndex(queryIndex, "type", "1") + .setSource(jsonBuilder().startObject().field("s_field", "foo").field("l_field", 10).endObject()).get(); + transportClient().admin().indices().prepareRefresh(queryIndex).get(); + + SearchResponse searchResponse = transportClient() + .prepareSearch(queryIndex) + .addAggregation( + AggregationBuilders + .terms("terms") + .field("s_field") + .subAggregation(AggregationBuilders.max("max").field("l_field")) + .subAggregation( + PipelineAggregatorBuilders.bucketScript("scripted").setBucketsPaths("max").script( + new Script("my_script", ScriptType.INDEXED, GroovyScriptEngineService.NAME, null)))).get(); + assertNoFailures(searchResponse); + assertHitCount(searchResponse, 1); + + assertGetRequestsContainHeaders(".scripts"); + assertRequestsContainHeader(PutIndexedScriptRequest.class); + } + @Test public void testThatSearchTemplatesWithIndexedTemplatesGetRequestContainsContextAndHeaders() throws Exception { PutIndexedScriptResponse scriptResponse = transportClient().preparePutIndexedScript(MustacheScriptEngineService.NAME, "the_template", @@ -302,6 +370,98 @@ public class ContextAndHeaderTransportIT extends ESIntegTestCase { assertRequestsContainHeader(PutIndexedScriptRequest.class); } + @Test + public void testThatIndexedScriptGetRequestInPhraseSuggestContainsContextAndHeaders() throws Exception { + CreateIndexRequestBuilder builder = transportClient().admin().indices().prepareCreate("test").setSettings(settingsBuilder() + .put(indexSettings()) + .put(SETTING_NUMBER_OF_SHARDS, 1) // A single shard will help to keep the tests repeatable. + .put("index.analysis.analyzer.text.tokenizer", "standard") + .putArray("index.analysis.analyzer.text.filter", "lowercase", "my_shingle") + .put("index.analysis.filter.my_shingle.type", "shingle") + .put("index.analysis.filter.my_shingle.output_unigrams", true) + .put("index.analysis.filter.my_shingle.min_shingle_size", 2) + .put("index.analysis.filter.my_shingle.max_shingle_size", 3)); + + XContentBuilder mapping = XContentFactory.jsonBuilder() + .startObject() + .startObject("type1") + .startObject("properties") + .startObject("title") + .field("type", "string") + .field("analyzer", "text") + .endObject() + .endObject() + .endObject() + .endObject(); + assertAcked(builder.addMapping("type1", mapping)); + ensureGreen(); + + List titles = new ArrayList<>(); + + titles.add("United States House of Representatives Elections in Washington 2006"); + titles.add("United States House of Representatives Elections in Washington 2005"); + titles.add("State"); + titles.add("Houses of Parliament"); + titles.add("Representative Government"); + titles.add("Election"); + + List builders = new ArrayList<>(); + for (String title: titles) { + transportClient().prepareIndex("test", "type1").setSource("title", title).get(); + } + transportClient().admin().indices().prepareRefresh("test").get(); + + String filterStringAsFilter = XContentFactory.jsonBuilder() + .startObject() + .startObject("query") + .startObject("match_phrase") + .field("title", "{{suggestion}}") + .endObject() + .endObject() + .endObject() + .string(); + + PutIndexedScriptResponse scriptResponse = transportClient() + .preparePutIndexedScript( + MustacheScriptEngineService.NAME, + "my_script", + jsonBuilder().startObject().field("script", filterStringAsFilter).endObject() + .string()).get(); + assertThat(scriptResponse.isCreated(), is(true)); + + PhraseSuggestionBuilder suggest = phraseSuggestion("title") + .field("title") + .addCandidateGenerator(PhraseSuggestionBuilder.candidateGenerator("title") + .suggestMode("always") + .maxTermFreq(.99f) + .size(10) + .maxInspections(200) + ) + .confidence(0f) + .maxErrors(2f) + .shardSize(30000) + .size(10); + + PhraseSuggestionBuilder filteredFilterSuggest = suggest.collateQuery(new Template("my_script", ScriptType.INDEXED, + MustacheScriptEngineService.NAME, null, null)); + + SearchRequestBuilder searchRequestBuilder = transportClient().prepareSearch("test").setSize(0); + String suggestText = "united states house of representatives elections in washington 2006"; + if (suggestText != null) { + searchRequestBuilder.setSuggestText(suggestText); + } + searchRequestBuilder.addSuggestion(filteredFilterSuggest); + SearchResponse actionGet = searchRequestBuilder.execute().actionGet(); + assertThat(Arrays.toString(actionGet.getShardFailures()), actionGet.getFailedShards(), equalTo(0)); + Suggest searchSuggest = actionGet.getSuggest(); + + assertSuggestionSize(searchSuggest, 0, 2, "title"); + + assertGetRequestsContainHeaders(".scripts"); + assertRequestsContainHeader(PutIndexedScriptRequest.class); + } + + @Test public void testThatRelevantHttpHeadersBecomeRequestHeaders() throws Exception { String releventHeaderName = "relevant_" + randomHeaderKey; diff --git a/core/src/test/java/org/elasticsearch/tribe/TribeIT.java b/core/src/test/java/org/elasticsearch/tribe/TribeIT.java index a8478aef051..1f8b7f165a5 100644 --- a/core/src/test/java/org/elasticsearch/tribe/TribeIT.java +++ b/core/src/test/java/org/elasticsearch/tribe/TribeIT.java @@ -76,7 +76,7 @@ public class TribeIT extends ESIntegTestCase { public static void setupSecondCluster() throws Exception { ESIntegTestCase.beforeClass(); cluster2 = new InternalTestCluster(InternalTestCluster.configuredNodeMode(), randomLong(), createTempDir(), 2, 2, - Strings.randomBase64UUID(getRandom()), NodeConfigurationSource.EMPTY, 0, false, SECOND_CLUSTER_NODE_PREFIX); + Strings.randomBase64UUID(getRandom()), NodeConfigurationSource.EMPTY, 0, false, SECOND_CLUSTER_NODE_PREFIX, true); cluster2.beforeTest(getRandom(), 0.1); cluster2.ensureAtLeastNumDataNodes(2); diff --git a/core/src/test/java/org/elasticsearch/validate/SimpleValidateQueryIT.java b/core/src/test/java/org/elasticsearch/validate/SimpleValidateQueryIT.java index 6f70d363ec3..832f5d3af2e 100644 --- a/core/src/test/java/org/elasticsearch/validate/SimpleValidateQueryIT.java +++ b/core/src/test/java/org/elasticsearch/validate/SimpleValidateQueryIT.java @@ -233,13 +233,13 @@ public class SimpleValidateQueryIT extends ESIntegTestCase { // common terms queries assertExplanation(QueryBuilders.commonTermsQuery("field", "huge brown pidgin").cutoffFrequency(1), - containsString("(field:huge field:brown) +field:pidgin"), true); + containsString("+field:pidgin (field:huge field:brown)"), true); assertExplanation(QueryBuilders.commonTermsQuery("field", "the brown").analyzer("stop"), containsString("field:brown"), true); // match queries with cutoff frequency assertExplanation(QueryBuilders.matchQuery("field", "huge brown pidgin").cutoffFrequency(1), - containsString("(field:huge field:brown) +field:pidgin"), true); + containsString("+field:pidgin (field:huge field:brown)"), true); assertExplanation(QueryBuilders.matchQuery("field", "the brown").analyzer("stop"), containsString("field:brown"), true); diff --git a/core/src/test/java/org/elasticsearch/action/search/simple-msearch1.json b/core/src/test/resources/org/elasticsearch/action/search/simple-msearch1.json similarity index 100% rename from core/src/test/java/org/elasticsearch/action/search/simple-msearch1.json rename to core/src/test/resources/org/elasticsearch/action/search/simple-msearch1.json diff --git a/core/src/test/java/org/elasticsearch/action/search/simple-msearch2.json b/core/src/test/resources/org/elasticsearch/action/search/simple-msearch2.json similarity index 100% rename from core/src/test/java/org/elasticsearch/action/search/simple-msearch2.json rename to core/src/test/resources/org/elasticsearch/action/search/simple-msearch2.json diff --git a/core/src/test/java/org/elasticsearch/action/search/simple-msearch3.json b/core/src/test/resources/org/elasticsearch/action/search/simple-msearch3.json similarity index 100% rename from core/src/test/java/org/elasticsearch/action/search/simple-msearch3.json rename to core/src/test/resources/org/elasticsearch/action/search/simple-msearch3.json diff --git a/core/src/test/java/org/elasticsearch/action/search/simple-msearch4.json b/core/src/test/resources/org/elasticsearch/action/search/simple-msearch4.json similarity index 100% rename from core/src/test/java/org/elasticsearch/action/search/simple-msearch4.json rename to core/src/test/resources/org/elasticsearch/action/search/simple-msearch4.json diff --git a/core/src/test/resources/org/elasticsearch/action/search/simple-msearch5.json b/core/src/test/resources/org/elasticsearch/action/search/simple-msearch5.json new file mode 100644 index 00000000000..5f08919481f --- /dev/null +++ b/core/src/test/resources/org/elasticsearch/action/search/simple-msearch5.json @@ -0,0 +1,6 @@ +{"index":["test0", "test1"], "request_cache": true} +{"template": {"query" : {"match_{{template}}" {}}}, "params": {"template": "all" } } } +{"index" : "test2,test3", "type" : "type1", "preference": "_local"} +{"template": {"query" : {"match_{{template}}" {}}}, "params": {"template": "all" } } } +{"index" : ["test4", "test1"], "type" : [ "type2", "type1" ], "routing": "123"} +{"template": {"query" : {"match_{{template}}" {}}}, "params": {"template": "all" } } } diff --git a/dev-tools/src/main/resources/ant/integration-tests.xml b/dev-tools/src/main/resources/ant/integration-tests.xml index 7b97f3e089d..9c8df5d4c8f 100644 --- a/dev-tools/src/main/resources/ant/integration-tests.xml +++ b/dev-tools/src/main/resources/ant/integration-tests.xml @@ -32,8 +32,10 @@ + + @@ -394,36 +396,4 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/dev-tools/src/main/resources/forbidden/core-signatures.txt b/dev-tools/src/main/resources/forbidden/core-signatures.txt index d95b793d1ef..f050ec4cf37 100644 --- a/dev-tools/src/main/resources/forbidden/core-signatures.txt +++ b/dev-tools/src/main/resources/forbidden/core-signatures.txt @@ -14,8 +14,7 @@ # either express or implied. See the License for the specific # language governing permissions and limitations under the License. -# For shaded dependencies, please put signatures in third-party-shaded.txt -# and third-party-unshaded.txt instead of here. +# For third-party dependencies, please put signatures in third-party.txt instead of here. @defaultMessage spawns threads with vague names; use a custom thread factory and name threads so that you can tell (by its name) which executor it is associated with @@ -47,7 +46,7 @@ org.apache.lucene.search.NumericRangeFilter#newFloatRange(java.lang.String,java. org.apache.lucene.search.NumericRangeFilter#newIntRange(java.lang.String,java.lang.Integer,java.lang.Integer,boolean,boolean) org.apache.lucene.search.NumericRangeFilter#newLongRange(java.lang.String,java.lang.Long,java.lang.Long,boolean,boolean) -@defaultMessage Only use wait / notify when really needed try to use concurrency primitives, latches or callbacks instead. +@defaultMessage Only use wait / notify when really needed try to use concurrency primitives, latches or callbacks instead. java.lang.Object#wait() java.lang.Object#wait(long) java.lang.Object#wait(long,int) @@ -87,3 +86,4 @@ org.elasticsearch.common.io.PathUtils#get(java.net.URI) @defaultMessage avoid adding additional dependencies on Guava com.google.common.collect.Lists +com.google.common.collect.ImmutableList diff --git a/dev-tools/src/main/resources/forbidden/third-party-shaded-signatures.txt b/dev-tools/src/main/resources/forbidden/third-party-shaded-signatures.txt deleted file mode 100644 index db1cd6f83be..00000000000 --- a/dev-tools/src/main/resources/forbidden/third-party-shaded-signatures.txt +++ /dev/null @@ -1,33 +0,0 @@ -# Licensed to Elasticsearch under one or more contributor -# license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright -# ownership. Elasticsearch licenses this file to you under -# the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on -# an 'AS IS' BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, -# either express or implied. See the License for the specific -# language governing permissions and limitations under the License. - -@defaultMessage Use Long.compare instead we are on Java7 -org.elasticsearch.common.primitives.Longs#compare(long,long) - -@defaultMessage unsafe encoders/decoders have problems in the lzf compress library. Use variants of encode/decode functions which take Encoder/Decoder. -org.elasticsearch.common.compress.lzf.impl.UnsafeChunkDecoder#() -org.elasticsearch.common.compress.lzf.util.ChunkDecoderFactory#optimalInstance() - -@defaultMessage Constructing a DateTime without a time zone is dangerous -org.elasticsearch.joda.time.DateTime#() -org.elasticsearch.joda.time.DateTime#(long) -org.elasticsearch.joda.time.DateTime#(int, int, int, int, int) -org.elasticsearch.joda.time.DateTime#(int, int, int, int, int, int) -org.elasticsearch.joda.time.DateTime#(int, int, int, int, int, int, int) -org.elasticsearch.joda.time.DateTime#now() -org.elasticsearch.joda.time.DateTimeZone#getDefault() - -org.elasticsearch.common.collect.Iterators#emptyIterator() @ Use Collections.emptyIterator instead diff --git a/dev-tools/src/main/resources/forbidden/third-party-unshaded-signatures.txt b/dev-tools/src/main/resources/forbidden/third-party-signatures.txt similarity index 100% rename from dev-tools/src/main/resources/forbidden/third-party-unshaded-signatures.txt rename to dev-tools/src/main/resources/forbidden/third-party-signatures.txt diff --git a/distribution/deb/pom.xml b/distribution/deb/pom.xml index a86fad9513d..aae6f6f4595 100644 --- a/distribution/deb/pom.xml +++ b/distribution/deb/pom.xml @@ -24,15 +24,6 @@ dpkg-sig - - - org.elasticsearch.distribution.fully-loaded - elasticsearch - ${elasticsearch.version} - pom - - - @@ -172,7 +163,7 @@ ${project.build.directory}/../target/lib - ${project.build.finalName}-shaded.jar,${project.build.finalName}-sources.jar,${project.build.finalName}-tests.jar,${project.build.finalName}-test-sources.jar,slf4j-api-*.jar + ${project.build.finalName}-sources.jar,${project.build.finalName}-tests.jar,${project.build.finalName}-test-sources.jar,slf4j-api-*.jar directory perm diff --git a/distribution/fully-loaded/pom.xml b/distribution/fully-loaded/pom.xml deleted file mode 100644 index 92772e9c866..00000000000 --- a/distribution/fully-loaded/pom.xml +++ /dev/null @@ -1,73 +0,0 @@ - - - 4.0.0 - - org.elasticsearch.distribution - distributions - 2.1.0-SNAPSHOT - - - org.elasticsearch.distribution.fully-loaded - elasticsearch - Distribution: with all optional dependencies - pom - - - - org.elasticsearch - elasticsearch - - - - org.apache.lucene - lucene-expressions - - - - com.spatial4j - spatial4j - - - - com.vividsolutions - jts - - - - - com.github.spullara.mustache.java - compiler - - - - org.codehaus.groovy - groovy-all - indy - - - - log4j - log4j - - - - log4j - apache-log4j-extras - - - - - - net.java.dev.jna - jna - - - - diff --git a/distribution/licenses/lucene-analyzers-common-5.2.1.jar.sha1 b/distribution/licenses/lucene-analyzers-common-5.2.1.jar.sha1 deleted file mode 100644 index 48f8e581476..00000000000 --- a/distribution/licenses/lucene-analyzers-common-5.2.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -33b7cc17d5a7c939af6fe3f67563f4709926d7f5 diff --git a/distribution/licenses/lucene-analyzers-common-5.3.0.jar.sha1 b/distribution/licenses/lucene-analyzers-common-5.3.0.jar.sha1 new file mode 100644 index 00000000000..4d79ce9d9e2 --- /dev/null +++ b/distribution/licenses/lucene-analyzers-common-5.3.0.jar.sha1 @@ -0,0 +1 @@ +1502beac94cf437baff848ffbbb8f76172befa6b diff --git a/distribution/licenses/lucene-backward-codecs-5.2.1.jar.sha1 b/distribution/licenses/lucene-backward-codecs-5.2.1.jar.sha1 deleted file mode 100644 index f01d68718f2..00000000000 --- a/distribution/licenses/lucene-backward-codecs-5.2.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -603d1f06b133449272799d698e5118db65e523ba diff --git a/distribution/licenses/lucene-backward-codecs-5.3.0.jar.sha1 b/distribution/licenses/lucene-backward-codecs-5.3.0.jar.sha1 new file mode 100644 index 00000000000..9b802fb5e04 --- /dev/null +++ b/distribution/licenses/lucene-backward-codecs-5.3.0.jar.sha1 @@ -0,0 +1 @@ +f654901e55fe56bdbe4be202767296929c2f8d9e diff --git a/distribution/licenses/lucene-core-5.2.1.jar.sha1 b/distribution/licenses/lucene-core-5.2.1.jar.sha1 deleted file mode 100644 index cbebe2b858c..00000000000 --- a/distribution/licenses/lucene-core-5.2.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a175590aa8b04e079eb1a136fd159f9163482ba4 diff --git a/distribution/licenses/lucene-core-5.3.0.jar.sha1 b/distribution/licenses/lucene-core-5.3.0.jar.sha1 new file mode 100644 index 00000000000..9765d65189b --- /dev/null +++ b/distribution/licenses/lucene-core-5.3.0.jar.sha1 @@ -0,0 +1 @@ +9e12bb7c39e964a544e3a23b9c8ffa9599d38f10 diff --git a/distribution/licenses/lucene-expressions-5.2.1.jar.sha1 b/distribution/licenses/lucene-expressions-5.2.1.jar.sha1 deleted file mode 100644 index 1823826d962..00000000000 --- a/distribution/licenses/lucene-expressions-5.2.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b966460caa7a91be5969dc5c0053d8de4e861fd6 diff --git a/distribution/licenses/lucene-expressions-5.3.0.jar.sha1 b/distribution/licenses/lucene-expressions-5.3.0.jar.sha1 new file mode 100644 index 00000000000..232b4f3ff34 --- /dev/null +++ b/distribution/licenses/lucene-expressions-5.3.0.jar.sha1 @@ -0,0 +1 @@ +dc6f5e352f787d71a7896025c0cdd0eb665b2985 diff --git a/distribution/licenses/lucene-grouping-5.2.1.jar.sha1 b/distribution/licenses/lucene-grouping-5.2.1.jar.sha1 deleted file mode 100644 index 23cea6c545f..00000000000 --- a/distribution/licenses/lucene-grouping-5.2.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -5682a9820d4f8ef99150b80dcc260919e68ebf39 diff --git a/distribution/licenses/lucene-grouping-5.3.0.jar.sha1 b/distribution/licenses/lucene-grouping-5.3.0.jar.sha1 new file mode 100644 index 00000000000..82b09e61a01 --- /dev/null +++ b/distribution/licenses/lucene-grouping-5.3.0.jar.sha1 @@ -0,0 +1 @@ +2d27582889b8676dfed6880a920148f3e32c9b42 diff --git a/distribution/licenses/lucene-highlighter-5.2.1.jar.sha1 b/distribution/licenses/lucene-highlighter-5.2.1.jar.sha1 deleted file mode 100644 index 67e9e8ee40a..00000000000 --- a/distribution/licenses/lucene-highlighter-5.2.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -dd9bba952e362970a1084201fe4858e08f1ceb1f diff --git a/distribution/licenses/lucene-highlighter-5.3.0.jar.sha1 b/distribution/licenses/lucene-highlighter-5.3.0.jar.sha1 new file mode 100644 index 00000000000..406bc446a08 --- /dev/null +++ b/distribution/licenses/lucene-highlighter-5.3.0.jar.sha1 @@ -0,0 +1 @@ +3b9d67c0f93e107a9ad8c179505df56a85e3f027 diff --git a/distribution/licenses/lucene-join-5.2.1.jar.sha1 b/distribution/licenses/lucene-join-5.2.1.jar.sha1 deleted file mode 100644 index 00c2c22e08e..00000000000 --- a/distribution/licenses/lucene-join-5.2.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -168e9c9b826faf60489a25645e4322fb8d130574 diff --git a/distribution/licenses/lucene-join-5.3.0.jar.sha1 b/distribution/licenses/lucene-join-5.3.0.jar.sha1 new file mode 100644 index 00000000000..fbf636c2649 --- /dev/null +++ b/distribution/licenses/lucene-join-5.3.0.jar.sha1 @@ -0,0 +1 @@ +95ddffcd889af106136704ecb7dc7173b3e9cdb3 diff --git a/distribution/licenses/lucene-memory-5.2.1.jar.sha1 b/distribution/licenses/lucene-memory-5.2.1.jar.sha1 deleted file mode 100644 index 93c743ba1ad..00000000000 --- a/distribution/licenses/lucene-memory-5.2.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -601f5404c137600488f5b2f2ca635db4ac9fd0cb diff --git a/distribution/licenses/lucene-memory-5.3.0.jar.sha1 b/distribution/licenses/lucene-memory-5.3.0.jar.sha1 new file mode 100644 index 00000000000..0f39068c29b --- /dev/null +++ b/distribution/licenses/lucene-memory-5.3.0.jar.sha1 @@ -0,0 +1 @@ +44f50f425264b4b17e6781ba07bdc80b4d36bb65 diff --git a/distribution/licenses/lucene-misc-5.2.1.jar.sha1 b/distribution/licenses/lucene-misc-5.2.1.jar.sha1 deleted file mode 100644 index 227b55c2d23..00000000000 --- a/distribution/licenses/lucene-misc-5.2.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -be0a4f0ac06f0a2fa3689b4bf6cd1fe6847f9969 diff --git a/distribution/licenses/lucene-misc-5.3.0.jar.sha1 b/distribution/licenses/lucene-misc-5.3.0.jar.sha1 new file mode 100644 index 00000000000..50949e57486 --- /dev/null +++ b/distribution/licenses/lucene-misc-5.3.0.jar.sha1 @@ -0,0 +1 @@ +d03ce6d1bb8ab3926b3acc717418c474a49ade69 diff --git a/distribution/licenses/lucene-queries-5.2.1.jar.sha1 b/distribution/licenses/lucene-queries-5.2.1.jar.sha1 deleted file mode 100644 index 026e3a9032e..00000000000 --- a/distribution/licenses/lucene-queries-5.2.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -5bada7fe2251e097413a23eefc8c87d009dac24f diff --git a/distribution/licenses/lucene-queries-5.3.0.jar.sha1 b/distribution/licenses/lucene-queries-5.3.0.jar.sha1 new file mode 100644 index 00000000000..51486ac5c70 --- /dev/null +++ b/distribution/licenses/lucene-queries-5.3.0.jar.sha1 @@ -0,0 +1 @@ +a0e8ff0bb90fd762800afdd434fdf769b1f9ac28 diff --git a/distribution/licenses/lucene-queryparser-5.2.1.jar.sha1 b/distribution/licenses/lucene-queryparser-5.2.1.jar.sha1 deleted file mode 100644 index a2d8e2cc291..00000000000 --- a/distribution/licenses/lucene-queryparser-5.2.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -73be0a2d4ab3e6b574be1938bfb27f7f730f0ad9 diff --git a/distribution/licenses/lucene-queryparser-5.3.0.jar.sha1 b/distribution/licenses/lucene-queryparser-5.3.0.jar.sha1 new file mode 100644 index 00000000000..f542844d20b --- /dev/null +++ b/distribution/licenses/lucene-queryparser-5.3.0.jar.sha1 @@ -0,0 +1 @@ +2c5e08580316c90b56a52e3cb686e1cf69db3f9e diff --git a/distribution/licenses/lucene-sandbox-5.2.1.jar.sha1 b/distribution/licenses/lucene-sandbox-5.2.1.jar.sha1 deleted file mode 100644 index 3caf3072079..00000000000 --- a/distribution/licenses/lucene-sandbox-5.2.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8d2355e5d8c95a4c3188ee2734a9f98829b2b10b diff --git a/distribution/licenses/lucene-sandbox-5.3.0.jar.sha1 b/distribution/licenses/lucene-sandbox-5.3.0.jar.sha1 new file mode 100644 index 00000000000..b1bf9194e10 --- /dev/null +++ b/distribution/licenses/lucene-sandbox-5.3.0.jar.sha1 @@ -0,0 +1 @@ +152da54a3b1ea6e3e8648d767616a51857b66a8e diff --git a/distribution/licenses/lucene-spatial-5.2.1.jar.sha1 b/distribution/licenses/lucene-spatial-5.2.1.jar.sha1 deleted file mode 100644 index 20f07e938cb..00000000000 --- a/distribution/licenses/lucene-spatial-5.2.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -ec498e52fdfc8ab751d9712b04c76e26e75e5014 diff --git a/distribution/licenses/lucene-spatial-5.3.0.jar.sha1 b/distribution/licenses/lucene-spatial-5.3.0.jar.sha1 new file mode 100644 index 00000000000..6499667fa8e --- /dev/null +++ b/distribution/licenses/lucene-spatial-5.3.0.jar.sha1 @@ -0,0 +1 @@ +6d57880a0950416035112f4fcc725854c011b081 diff --git a/distribution/licenses/lucene-spatial3d-5.3.0.jar.sha1 b/distribution/licenses/lucene-spatial3d-5.3.0.jar.sha1 new file mode 100644 index 00000000000..d1dd3219632 --- /dev/null +++ b/distribution/licenses/lucene-spatial3d-5.3.0.jar.sha1 @@ -0,0 +1 @@ +23cfd7c19ead7b6fc6b2921f9c490ad3d043770d diff --git a/distribution/licenses/lucene-suggest-5.2.1.jar.sha1 b/distribution/licenses/lucene-suggest-5.2.1.jar.sha1 deleted file mode 100644 index 12a585d32bc..00000000000 --- a/distribution/licenses/lucene-suggest-5.2.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -0d62b25d52f9949b243c9cdb8a78830aa4944415 diff --git a/distribution/licenses/lucene-suggest-5.3.0.jar.sha1 b/distribution/licenses/lucene-suggest-5.3.0.jar.sha1 new file mode 100644 index 00000000000..dc59343223c --- /dev/null +++ b/distribution/licenses/lucene-suggest-5.3.0.jar.sha1 @@ -0,0 +1 @@ +a155fc16a20b11205f99603950025522b173edc9 diff --git a/distribution/pom.xml b/distribution/pom.xml index 4a22d12f458..94cd8ad75e7 100644 --- a/distribution/pom.xml +++ b/distribution/pom.xml @@ -69,6 +69,61 @@ httpclient test + + + + org.elasticsearch + elasticsearch + + + + org.apache.lucene + lucene-expressions + + + + com.spatial4j + spatial4j + + + + com.vividsolutions + jts + + + + + com.github.spullara.mustache.java + compiler + + + + org.codehaus.groovy + groovy-all + indy + + + + log4j + log4j + + + + log4j + apache-log4j-extras + + + + + + net.java.dev.jna + jna + @@ -170,8 +225,6 @@ - fully-loaded - shaded tar zip deb diff --git a/distribution/rpm/pom.xml b/distribution/rpm/pom.xml index 488ed97ac04..f0e22b64963 100644 --- a/distribution/rpm/pom.xml +++ b/distribution/rpm/pom.xml @@ -15,15 +15,6 @@ rpm The RPM distribution of Elasticsearch - - - org.elasticsearch.distribution.fully-loaded - elasticsearch - ${elasticsearch.version} - pom - - - true ${project.build.directory}/releases/ @@ -187,7 +178,6 @@ target/lib/ - ${project.build.finalName}-shaded.jar ${project.build.finalName}-sources.jar ${project.build.finalName}-tests.jar ${project.build.finalName}-test-sources.jar diff --git a/distribution/shaded/pom.xml b/distribution/shaded/pom.xml deleted file mode 100644 index 6a4b54f7b18..00000000000 --- a/distribution/shaded/pom.xml +++ /dev/null @@ -1,173 +0,0 @@ - - - 4.0.0 - - org.elasticsearch.distribution - distributions - 2.1.0-SNAPSHOT - - - org.elasticsearch.distribution.shaded - elasticsearch - Distribution: Shaded JAR - - - - org.elasticsearch - elasticsearch - ${elasticsearch.version} - - - - - - - org.apache.maven.plugins - maven-jar-plugin - - - false - true - - - - org.apache.maven.plugins - maven-shade-plugin - - - - - - org.apache.maven.plugins - maven-antrun-plugin - - - check-for-jar-hell - integration-test - - run - - - - - - - - - - - - org.apache.maven.plugins - maven-shade-plugin - - - package - - shade - - - - - false - false - true - true - ${project.build.directory}/dependency-reduced-pom.xml - - - org.apache.lucene:* - com.spatial4j:* - - - - - - - true - - - - - - com.google.common - org.elasticsearch.common - - - com.google.thirdparty - org.elasticsearch.common.thirdparty - - - com.carrotsearch.hppc - org.elasticsearch.common.hppc - - - org.HdrHistogram - org.elasticsearch.common.HdrHistogram - - - org.yaml - org.elasticsearch.common.yaml - - - com.twitter.jsr166e - org.elasticsearch.common.util.concurrent.jsr166e - - - com.fasterxml.jackson - org.elasticsearch.common.jackson - - - org.joda.time - org.elasticsearch.common.joda.time - - - org.joda.convert - org.elasticsearch.common.joda.convert - - - org.jboss.netty - org.elasticsearch.common.netty - - - com.ning.compress - org.elasticsearch.common.compress - - - com.github.mustachejava - org.elasticsearch.common.mustache - - - com.tdunning.math.stats - org.elasticsearch.common.stats - - - org.apache.commons.lang - org.elasticsearch.common.lang - - - org.apache.commons.cli - org.elasticsearch.common.cli.commons - - - - - *:* - - META-INF/license/** - META-INF/* - META-INF/maven/** - LICENSE - NOTICE - /*.txt - build.properties - - - - - - - - - diff --git a/distribution/tar/pom.xml b/distribution/tar/pom.xml index 33181b281ab..744d7c924e7 100644 --- a/distribution/tar/pom.xml +++ b/distribution/tar/pom.xml @@ -19,15 +19,6 @@ The TAR distribution of Elasticsearch - - - org.elasticsearch.distribution.fully-loaded - elasticsearch - ${elasticsearch.version} - pom - - - ${project.basedir}/../src/main/packaging/packaging.properties diff --git a/distribution/zip/pom.xml b/distribution/zip/pom.xml index 750944f60dc..6bb38fbb579 100644 --- a/distribution/zip/pom.xml +++ b/distribution/zip/pom.xml @@ -19,15 +19,6 @@ The ZIP distribution of Elasticsearch - - - org.elasticsearch.distribution.fully-loaded - elasticsearch - ${elasticsearch.version} - pom - - - ${project.basedir}/../src/main/packaging/packaging.properties diff --git a/docs/java-api/docs/index_.asciidoc b/docs/java-api/docs/index_.asciidoc index 152d76e4a8c..2b29f15fabd 100644 --- a/docs/java-api/docs/index_.asciidoc +++ b/docs/java-api/docs/index_.asciidoc @@ -60,24 +60,8 @@ json.put("message","trying out Elasticsearch"); [[java-docs-index-generate-beans]] ===== Serialize your beans -Elasticsearch already uses Jackson but shades it under -`org.elasticsearch.common.jackson` package. + - So, you can add your own Jackson version in your `pom.xml` file or in -your classpath. See http://wiki.fasterxml.com/JacksonDownload[Jackson -Download Page]. - -For example: - -[source,xml] --------------------------------------------------- - - com.fasterxml.jackson.core - jackson-databind - 2.1.3 - --------------------------------------------------- - -Then, you can start serializing your beans to JSON: +Elasticsearch already uses http://wiki.fasterxml.com/JacksonHome[Jackson]. +So you can use it to serialize your beans to JSON: [source,java] -------------------------------------------------- diff --git a/docs/reference/aggregations/metrics/scripted-metric-aggregation.asciidoc b/docs/reference/aggregations/metrics/scripted-metric-aggregation.asciidoc index 6db8c82a9e8..639fabb62fd 100644 --- a/docs/reference/aggregations/metrics/scripted-metric-aggregation.asciidoc +++ b/docs/reference/aggregations/metrics/scripted-metric-aggregation.asciidoc @@ -79,7 +79,16 @@ The above example can also be specified using file scripts as follows: <1> script parameters for init, map and combine scripts must be specified in a global `params` object so that it can be share between the scripts -For more details on specifying scripts see <>. +For more details on specifying scripts see <>. + +==== Allowed return types + +Whilst and valid script object can be used within a single script. the scripts must return or store in the `_agg` object only the following types: + +* primitive types +* String +* Map (containing only keys and values of the types listed here) +* Array (containing elements of only the types listed here) ==== Scope of scripts diff --git a/docs/reference/aggregations/pipeline.asciidoc b/docs/reference/aggregations/pipeline.asciidoc index b31fda65ca3..670ed6266b0 100644 --- a/docs/reference/aggregations/pipeline.asciidoc +++ b/docs/reference/aggregations/pipeline.asciidoc @@ -18,9 +18,9 @@ _Sibling_:: Pipeline aggregations that are provided with the output of a sibling aggregation and are able to compute a new aggregation which will be at the same level as the sibling aggregation. -Pipeline aggregations can reference the aggregations they need to perform their computation by using the `buckets_paths` +Pipeline aggregations can reference the aggregations they need to perform their computation by using the `buckets_path` parameter to indicate the paths to the required metrics. The syntax for defining these paths can be found in the -<> section below. +<> section below. Pipeline aggregations cannot have sub-aggregations but depending on the type it can reference another pipeline in the `buckets_path` allowing pipeline aggregations to be chained. For example, you can chain together two derivatives to calculate the second derivative @@ -29,7 +29,7 @@ allowing pipeline aggregations to be chained. For example, you can chain togeth NOTE: Because pipeline aggregations only add to the output, when chaining pipeline aggregations the output of each pipeline aggregation will be included in the final output. -[[bucket-path-syntax]] +[[buckets-path-syntax]] [float] === `buckets_path` Syntax @@ -96,13 +96,13 @@ a metric embedded inside a sibling aggregation: }, "max_monthly_sales": { "max_bucket": { - "buckets_paths": "sales_per_month>sales" <1> + "buckets_path": "sales_per_month>sales" <1> } } } } -------------------------------------------------- -<1> `bucket_paths` instructs this max_bucket aggregation that we want the maximum value of the `sales` aggregation in the +<1> `buckets_path` instructs this max_bucket aggregation that we want the maximum value of the `sales` aggregation in the `sales_per_month` date histogram. [float] diff --git a/docs/reference/aggregations/pipeline/avg-bucket-aggregation.asciidoc b/docs/reference/aggregations/pipeline/avg-bucket-aggregation.asciidoc index bbd540dc01f..b2b9d93f767 100644 --- a/docs/reference/aggregations/pipeline/avg-bucket-aggregation.asciidoc +++ b/docs/reference/aggregations/pipeline/avg-bucket-aggregation.asciidoc @@ -24,7 +24,7 @@ An `avg_bucket` aggregation looks like this in isolation: .`avg_bucket` Parameters |=== |Parameter Name |Description |Required |Default Value -|`buckets_path` |The path to the buckets we wish to find the average for (see <> for more +|`buckets_path` |The path to the buckets we wish to find the average for (see <> for more details) |Required | |`gap_policy` |The policy to apply when gaps are found in the data (see <> for more details)|Optional, defaults to `skip` || diff --git a/docs/reference/aggregations/pipeline/bucket-script-aggregation.asciidoc b/docs/reference/aggregations/pipeline/bucket-script-aggregation.asciidoc index 6c790403af0..72addadaefa 100644 --- a/docs/reference/aggregations/pipeline/bucket-script-aggregation.asciidoc +++ b/docs/reference/aggregations/pipeline/bucket-script-aggregation.asciidoc @@ -34,7 +34,7 @@ the metrics to use for that variable. |`script` |The script to run for this aggregation. The script can be inline, file or indexed. (see <> for more details) |Required | |`buckets_path` |A map of script variables and their associated path to the buckets we wish to use for the variable -(see <> for more details) |Required | +(see <> for more details) |Required | |`gap_policy` |The policy to apply when gaps are found in the data (see <> for more details)|Optional, defaults to `skip` | |`format` |format to apply to the output value of this aggregation |Optional, defaults to `null` | @@ -73,7 +73,7 @@ The following snippet calculates the ratio percentage of t-shirt sales compared }, "t-shirt-percentage": { "bucket_script": { - "buckets_paths": { + "buckets_path": { "tShirtSales": "t-shirts>sales", "totalSales": "total_sales" }, diff --git a/docs/reference/aggregations/pipeline/bucket-selector-aggregation.asciidoc b/docs/reference/aggregations/pipeline/bucket-selector-aggregation.asciidoc index 7ac4f66dba4..2b838ba45fb 100644 --- a/docs/reference/aggregations/pipeline/bucket-selector-aggregation.asciidoc +++ b/docs/reference/aggregations/pipeline/bucket-selector-aggregation.asciidoc @@ -39,7 +39,7 @@ the metrics to use for that variable. |`script` |The script to run for this aggregation. The script can be inline, file or indexed. (see <> for more details) |Required | |`buckets_path` |A map of script variables and their associated path to the buckets we wish to use for the variable -(see <> for more details) |Required | +(see <> for more details) |Required | |`gap_policy` |The policy to apply when gaps are found in the data (see <> for more details)|Optional, defaults to `skip` | |=== @@ -63,7 +63,7 @@ The following snippet only retains buckets where the total sales for the month i } "sales_bucket_filter": { "bucket_selector": { - "buckets_paths": { + "buckets_path": { "totalSales": "total_sales" }, "script": "totalSales <= 50" diff --git a/docs/reference/aggregations/pipeline/cumulative-sum-aggregation.asciidoc b/docs/reference/aggregations/pipeline/cumulative-sum-aggregation.asciidoc index 88fcd83831e..823c5c80d6d 100644 --- a/docs/reference/aggregations/pipeline/cumulative-sum-aggregation.asciidoc +++ b/docs/reference/aggregations/pipeline/cumulative-sum-aggregation.asciidoc @@ -25,7 +25,7 @@ A `cumulative_sum` aggregation looks like this in isolation: .`cumulative_sum` Parameters |=== |Parameter Name |Description |Required |Default Value -|`buckets_path` |The path to the buckets we wish to find the cumulative sum for (see <> for more +|`buckets_path` |The path to the buckets we wish to find the cumulative sum for (see <> for more details) |Required | |`format` |format to apply to the output value of this aggregation |Optional, defaults to `null` | |=== @@ -49,7 +49,7 @@ The following snippet calculates the cumulative sum of the total monthly `sales` }, "cumulative_sales": { "cumulative_sum": { - "buckets_paths": "sales" <1> + "buckets_path": "sales" <1> } } } @@ -58,7 +58,7 @@ The following snippet calculates the cumulative sum of the total monthly `sales` } -------------------------------------------------- -<1> `bucket_paths` instructs this cumulative sum aggregation to use the output of the `sales` aggregation for the cumulative sum +<1> `buckets_path` instructs this cumulative sum aggregation to use the output of the `sales` aggregation for the cumulative sum And the following may be the response: diff --git a/docs/reference/aggregations/pipeline/derivative-aggregation.asciidoc b/docs/reference/aggregations/pipeline/derivative-aggregation.asciidoc index ec63600a321..48296caf608 100644 --- a/docs/reference/aggregations/pipeline/derivative-aggregation.asciidoc +++ b/docs/reference/aggregations/pipeline/derivative-aggregation.asciidoc @@ -25,7 +25,7 @@ A `derivative` aggregation looks like this in isolation: .`derivative` Parameters |=== |Parameter Name |Description |Required |Default Value -|`buckets_path` |The path to the buckets we wish to find the derivative for (see <> for more +|`buckets_path` |The path to the buckets we wish to find the derivative for (see <> for more details) |Required | |`gap_policy` |The policy to apply when gaps are found in the data (see <> for more details)|Optional, defaults to `skip` | diff --git a/docs/reference/aggregations/pipeline/max-bucket-aggregation.asciidoc b/docs/reference/aggregations/pipeline/max-bucket-aggregation.asciidoc index 0d15cc02e2a..310a643a66c 100644 --- a/docs/reference/aggregations/pipeline/max-bucket-aggregation.asciidoc +++ b/docs/reference/aggregations/pipeline/max-bucket-aggregation.asciidoc @@ -25,7 +25,7 @@ A `max_bucket` aggregation looks like this in isolation: .`max_bucket` Parameters |=== |Parameter Name |Description |Required |Default Value -|`buckets_path` |The path to the buckets we wish to find the maximum for (see <> for more +|`buckets_path` |The path to the buckets we wish to find the maximum for (see <> for more details) |Required | |`gap_policy` |The policy to apply when gaps are found in the data (see <> for more details)|Optional, defaults to `skip` | @@ -53,13 +53,13 @@ The following snippet calculates the maximum of the total monthly `sales`: }, "max_monthly_sales": { "max_bucket": { - "buckets_paths": "sales_per_month>sales" <1> + "buckets_path": "sales_per_month>sales" <1> } } } } -------------------------------------------------- -<1> `bucket_paths` instructs this max_bucket aggregation that we want the maximum value of the `sales` aggregation in the +<1> `buckets_path` instructs this max_bucket aggregation that we want the maximum value of the `sales` aggregation in the `sales_per_month` date histogram. And the following may be the response: diff --git a/docs/reference/aggregations/pipeline/min-bucket-aggregation.asciidoc b/docs/reference/aggregations/pipeline/min-bucket-aggregation.asciidoc index ed02f7b2051..11d3d559512 100644 --- a/docs/reference/aggregations/pipeline/min-bucket-aggregation.asciidoc +++ b/docs/reference/aggregations/pipeline/min-bucket-aggregation.asciidoc @@ -25,7 +25,7 @@ A `max_bucket` aggregation looks like this in isolation: .`min_bucket` Parameters |=== |Parameter Name |Description |Required |Default Value -|`buckets_path` |The path to the buckets we wish to find the minimum for (see <> for more +|`buckets_path` |The path to the buckets we wish to find the minimum for (see <> for more details) |Required | |`gap_policy` |The policy to apply when gaps are found in the data (see <> for more details)|Optional, defaults to `skip` | @@ -54,14 +54,14 @@ The following snippet calculates the minimum of the total monthly `sales`: }, "min_monthly_sales": { "min_bucket": { - "buckets_paths": "sales_per_month>sales" <1> + "buckets_path": "sales_per_month>sales" <1> } } } } -------------------------------------------------- -<1> `bucket_paths` instructs this max_bucket aggregation that we want the minimum value of the `sales` aggregation in the +<1> `buckets_path` instructs this max_bucket aggregation that we want the minimum value of the `sales` aggregation in the `sales_per_month` date histogram. And the following may be the response: diff --git a/docs/reference/aggregations/pipeline/movavg-aggregation.asciidoc b/docs/reference/aggregations/pipeline/movavg-aggregation.asciidoc index b7c86d5826c..6fe91cb45c6 100644 --- a/docs/reference/aggregations/pipeline/movavg-aggregation.asciidoc +++ b/docs/reference/aggregations/pipeline/movavg-aggregation.asciidoc @@ -40,7 +40,7 @@ A `moving_avg` aggregation looks like this in isolation: .`moving_avg` Parameters |=== |Parameter Name |Description |Required |Default Value -|`buckets_path` |Path to the metric of interest (see <> for more details |Required | +|`buckets_path` |Path to the metric of interest (see <> for more details |Required | |`model` |The moving average weighting model that we wish to use |Optional |`simple` |`gap_policy` |Determines what should happen when a gap in the data is encountered. |Optional |`insert_zero` |`window` |The size of window to "slide" across the histogram. |Optional |`5` @@ -78,7 +78,7 @@ embedded like any other metric aggregation: Moving averages are built by first specifying a `histogram` or `date_histogram` over a field. You can then optionally add normal metrics, such as a `sum`, inside of that histogram. Finally, the `moving_avg` is embedded inside the histogram. The `buckets_path` parameter is then used to "point" at one of the sibling metrics inside of the histogram (see -<> for a description of the syntax for `buckets_path`. +<> for a description of the syntax for `buckets_path`. ==== Models diff --git a/docs/reference/aggregations/pipeline/serial-diff-aggregation.asciidoc b/docs/reference/aggregations/pipeline/serial-diff-aggregation.asciidoc index 84283bd9f3f..7193510bf1a 100644 --- a/docs/reference/aggregations/pipeline/serial-diff-aggregation.asciidoc +++ b/docs/reference/aggregations/pipeline/serial-diff-aggregation.asciidoc @@ -52,7 +52,7 @@ A `serial_diff` aggregation looks like this in isolation: .`moving_avg` Parameters |=== |Parameter Name |Description |Required |Default Value -|`buckets_path` |Path to the metric of interest (see <> for more details |Required | +|`buckets_path` |Path to the metric of interest (see <> for more details |Required | |`lag` |The historical bucket to subtract from the current value. E.g. a lag of 7 will subtract the current value from the value 7 buckets ago. Must be a positive, non-zero integer |Optional |`1` |`gap_policy` |Determines what should happen when a gap in the data is encountered. |Optional |`insert_zero` @@ -94,7 +94,7 @@ A `serial_diff` aggregation looks like this in isolation: Serial differences are built by first specifying a `histogram` or `date_histogram` over a field. You can then optionally add normal metrics, such as a `sum`, inside of that histogram. Finally, the `serial_diff` is embedded inside the histogram. The `buckets_path` parameter is then used to "point" at one of the sibling metrics inside of the histogram (see -<> for a description of the syntax for `buckets_path`. +<> for a description of the syntax for `buckets_path`. diff --git a/docs/reference/aggregations/pipeline/sum-bucket-aggregation.asciidoc b/docs/reference/aggregations/pipeline/sum-bucket-aggregation.asciidoc index 3729056d783..56d786f59f0 100644 --- a/docs/reference/aggregations/pipeline/sum-bucket-aggregation.asciidoc +++ b/docs/reference/aggregations/pipeline/sum-bucket-aggregation.asciidoc @@ -24,7 +24,7 @@ A `sum_bucket` aggregation looks like this in isolation: .`sum_bucket` Parameters |=== |Parameter Name |Description |Required |Default Value -|`buckets_path` |The path to the buckets we wish to find the sum for (see <> for more +|`buckets_path` |The path to the buckets we wish to find the sum for (see <> for more details) |Required | |`gap_policy` |The policy to apply when gaps are found in the data (see <> for more details)|Optional, defaults to `skip` || @@ -52,13 +52,13 @@ The following snippet calculates the sum of all the total monthly `sales` bucket }, "sum_monthly_sales": { "sum_bucket": { - "buckets_paths": "sales_per_month>sales" <1> + "buckets_path": "sales_per_month>sales" <1> } } } } -------------------------------------------------- -<1> `bucket_paths` instructs this sum_bucket aggregation that we want the sum of the `sales` aggregation in the +<1> `buckets_path` instructs this sum_bucket aggregation that we want the sum of the `sales` aggregation in the `sales_per_month` date histogram. And the following may be the response: diff --git a/docs/reference/analysis/analyzers/lang-analyzer.asciidoc b/docs/reference/analysis/analyzers/lang-analyzer.asciidoc index 69388ffa7a1..5d849807bc7 100644 --- a/docs/reference/analysis/analyzers/lang-analyzer.asciidoc +++ b/docs/reference/analysis/analyzers/lang-analyzer.asciidoc @@ -25,6 +25,7 @@ following types are supported: <>, <>, <>, +<>, <>, <>, <>, @@ -56,8 +57,9 @@ with the `keywords` set to the value of the `stem_exclusion` parameter. The following analyzers support setting custom `stem_exclusion` list: `arabic`, `armenian`, `basque`, `catalan`, `bulgarian`, `catalan`, `czech`, `finnish`, `dutch`, `english`, `finnish`, `french`, `galician`, -`german`, `irish`, `hindi`, `hungarian`, `indonesian`, `italian`, `latvian`, `norwegian`, -`portuguese`, `romanian`, `russian`, `sorani`, `spanish`, `swedish`, `turkish`. +`german`, `irish`, `hindi`, `hungarian`, `indonesian`, `italian`, `latvian`, +`lithuanian`, `norwegian`, `portuguese`, `romanian`, `russian`, `sorani`, +`spanish`, `swedish`, `turkish`. ==== Reimplementing language analyzers @@ -1082,6 +1084,50 @@ The `latvian` analyzer could be reimplemented as a `custom` analyzer as follows: <2> This filter should be removed unless there are words which should be excluded from stemming. +[[lithuanian-analyzer]] +===== `lithuanian` analyzer + +The `lithuanian` analyzer could be reimplemented as a `custom` analyzer as follows: + +[source,js] +---------------------------------------------------- +{ + "settings": { + "analysis": { + "filter": { + "lithuanian_stop": { + "type": "stop", + "stopwords": "_lithuanian_" <1> + }, + "lithuanian_keywords": { + "type": "keyword_marker", + "keywords": [] <2> + }, + "lithuanian_stemmer": { + "type": "stemmer", + "language": "lithuanian" + } + }, + "analyzer": { + "lithuanian": { + "tokenizer": "standard", + "filter": [ + "lowercase", + "lithuanian_stop", + "lithuanian_keywords", + "lithuanian_stemmer" + ] + } + } + } + } +} +---------------------------------------------------- +<1> The default stopwords can be overridden with the `stopwords` + or `stopwords_path` parameters. +<2> This filter should be removed unless there are words which should + be excluded from stemming. + [[norwegian-analyzer]] ===== `norwegian` analyzer diff --git a/docs/reference/analysis/tokenfilters/snowball-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/snowball-tokenfilter.asciidoc index 58d88988745..6042642027c 100644 --- a/docs/reference/analysis/tokenfilters/snowball-tokenfilter.asciidoc +++ b/docs/reference/analysis/tokenfilters/snowball-tokenfilter.asciidoc @@ -5,8 +5,8 @@ A filter that stems words using a Snowball-generated stemmer. The `language` parameter controls the stemmer with the following available values: `Armenian`, `Basque`, `Catalan`, `Danish`, `Dutch`, `English`, `Finnish`, `French`, `German`, `German2`, `Hungarian`, `Italian`, `Kp`, -`Lovins`, `Norwegian`, `Porter`, `Portuguese`, `Romanian`, `Russian`, -`Spanish`, `Swedish`, `Turkish`. +`Lithuanian`, `Lovins`, `Norwegian`, `Porter`, `Portuguese`, `Romanian`, +`Russian`, `Spanish`, `Swedish`, `Turkish`. For example: diff --git a/docs/reference/analysis/tokenfilters/stemmer-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/stemmer-tokenfilter.asciidoc index 3d83b2044d9..548342c521b 100644 --- a/docs/reference/analysis/tokenfilters/stemmer-tokenfilter.asciidoc +++ b/docs/reference/analysis/tokenfilters/stemmer-tokenfilter.asciidoc @@ -133,6 +133,10 @@ Latvian:: http://lucene.apache.org/core/4_9_0/analyzers-common/org/apache/lucene/analysis/lv/LatvianStemmer.html[*`latvian`*] +Lithuanian:: + +http://svn.apache.org/viewvc/lucene/dev/branches/lucene_solr_5_3/lucene/analysis/common/src/java/org/apache/lucene/analysis/lt/stem_ISO_8859_1.sbl?view=markup[*`lithuanian`*] + Norwegian (Bokmål):: http://snowball.tartarus.org/algorithms/norwegian/stemmer.html[*`norwegian`*], diff --git a/docs/reference/cluster/nodes-stats.asciidoc b/docs/reference/cluster/nodes-stats.asciidoc index 1425bfee1ee..b22312e2130 100644 --- a/docs/reference/cluster/nodes-stats.asciidoc +++ b/docs/reference/cluster/nodes-stats.asciidoc @@ -39,7 +39,7 @@ of `indices`, `os`, `process`, `jvm`, `transport`, `http`, pools, number of loaded/unloaded classes `os`:: - Operating system stats, load average, cpu, mem, swap + Operating system stats, load average, mem, swap (see <>) `process`:: diff --git a/docs/reference/index-modules.asciidoc b/docs/reference/index-modules.asciidoc index 075c460321f..3a1c2de1385 100644 --- a/docs/reference/index-modules.asciidoc +++ b/docs/reference/index-modules.asciidoc @@ -68,6 +68,12 @@ corruption is detected, it will prevent the shard from being opened. Accepts: Checking shards may take a lot of time on large indices. -- +[[index-codec]] `index.codec`:: + + experimental[] The `default` value compresses stored data with LZ4 + compression, but this can be set to `best_compression` for a higher + compression ratio, at the expense of slower stored fields performance. + [float] [[dynamic-index-settings]] === Dynamic index settings @@ -92,12 +98,6 @@ specific index module: index visible to search. Defaults to `1s`. Can be set to `-1` to disable refresh. -[[index-codec]] `index.codec`:: - - experimental[] The `default` value compresses stored data with LZ4 - compression, but this can be set to `best_compression` for a higher - compression ratio, at the expense of slower stored fields performance. - `index.blocks.read_only`:: Set to `true` to make the index and index metadata read only, `false` to @@ -152,10 +152,6 @@ Other index settings are available in index modules: Enable or disable dynamic mapping for an index. -<>:: - - Control over how shards are merged by the background merge process. - <>:: Configure custom similarity settings to customize how search results are @@ -181,8 +177,6 @@ include::index-modules/allocation.asciidoc[] include::index-modules/mapper.asciidoc[] -include::index-modules/merge.asciidoc[] - include::index-modules/similarity.asciidoc[] include::index-modules/slowlog.asciidoc[] diff --git a/docs/reference/index-modules/merge.asciidoc b/docs/reference/index-modules/merge.asciidoc deleted file mode 100644 index ac6de1d3a9e..00000000000 --- a/docs/reference/index-modules/merge.asciidoc +++ /dev/null @@ -1,113 +0,0 @@ -[[index-modules-merge]] -== Merge - -experimental[All of the settings exposed in the `merge` module are expert only and may be removed in the future] - -A shard in elasticsearch is a Lucene index, and a Lucene index is broken -down into segments. Segments are internal storage elements in the index -where the index data is stored, and are immutable up to delete markers. -Segments are, periodically, merged into larger segments to keep the -index size at bay and expunge deletes. - -Merges segments of approximately equal size, subject to an allowed -number of segments per tier. The merge policy is able to merge -non-adjacent segments, and separates how many segments are merged at once from how many -segments are allowed per tier. It also does not over-merge (i.e., cascade merges). - -[float] -[[merge-settings]] -=== Merge policy settings - -All merge policy settings are _dynamic_ and can be updated on a live index. -The merge policy has the following settings: - -`index.merge.policy.expunge_deletes_allowed`:: - - When expungeDeletes is called, we only merge away a segment if its delete - percentage is over this threshold. Default is `10`. - -`index.merge.policy.floor_segment`:: - - Segments smaller than this are "rounded up" to this size, i.e. treated as - equal (floor) size for merge selection. This is to prevent frequent - flushing of tiny segments, thus preventing a long tail in the index. Default - is `2mb`. - -`index.merge.policy.max_merge_at_once`:: - - Maximum number of segments to be merged at a time during "normal" merging. - Default is `10`. - -`index.merge.policy.max_merge_at_once_explicit`:: - - Maximum number of segments to be merged at a time, during optimize or - expungeDeletes. Default is `30`. - -`index.merge.policy.max_merged_segment`:: - - Maximum sized segment to produce during normal merging (not explicit - optimize). This setting is approximate: the estimate of the merged segment - size is made by summing sizes of to-be-merged segments (compensating for - percent deleted docs). Default is `5gb`. - -`index.merge.policy.segments_per_tier`:: - - Sets the allowed number of segments per tier. Smaller values mean more - merging but fewer segments. Default is `10`. Note, this value needs to be - >= than the `max_merge_at_once` otherwise you'll force too many merges to - occur. - -`index.merge.policy.reclaim_deletes_weight`:: - - Controls how aggressively merges that reclaim more deletions are favored. - Higher values favor selecting merges that reclaim deletions. A value of - `0.0` means deletions don't impact merge selection. Defaults to `2.0`. - -For normal merging, the policy first computes a "budget" of how many -segments are allowed to be in the index. If the index is over-budget, -then the policy sorts segments by decreasing size (proportionally considering percent -deletes), and then finds the least-cost merge. Merge cost is measured by -a combination of the "skew" of the merge (size of largest seg divided by -smallest seg), total merge size and pct deletes reclaimed, so that -merges with lower skew, smaller size and those reclaiming more deletes, -are favored. - -If a merge will produce a segment that's larger than -`max_merged_segment` then the policy will merge fewer segments (down to -1 at once, if that one has deletions) to keep the segment size under -budget. - -Note, this can mean that for large shards that holds many gigabytes of -data, the default of `max_merged_segment` (`5gb`) can cause for many -segments to be in an index, and causing searches to be slower. Use the -indices segments API to see the segments that an index has, and -possibly either increase the `max_merged_segment` or issue an optimize -call for the index (try and aim to issue it on a low traffic time). - -[float] -[[merge-scheduling]] -=== Merge scheduling - -The merge scheduler (ConcurrentMergeScheduler) controls the execution of -merge operations once they are needed (according to the merge policy). Merges -run in separate threads, and when the maximum number of threads is reached, -further merges will wait until a merge thread becomes available. - -The merge scheduler supports the following _dynamic_ settings: - -`index.merge.scheduler.max_thread_count`:: - - The maximum number of threads that may be merging at once. Defaults to - `Math.max(1, Math.min(4, Runtime.getRuntime().availableProcessors() / 2))` - which works well for a good solid-state-disk (SSD). If your index is on - spinning platter drives instead, decrease this to 1. - -`index.merge.scheduler.auto_throttle`:: - - If this is true (the default), then the merge scheduler will rate-limit IO - (writes) for merges to an adaptive value depending on how many merges are - requested over time. An application with a low indexing rate that - unluckily suddenly requires a large merge will see that merge aggressively - throttled, while an application doing heavy indexing will see the throttle - move higher to allow merges to keep up with ongoing indexing. - diff --git a/docs/reference/mapping/params/position-increment-gap.asciidoc b/docs/reference/mapping/params/position-increment-gap.asciidoc index 918e3d493a5..16a963c439e 100644 --- a/docs/reference/mapping/params/position-increment-gap.asciidoc +++ b/docs/reference/mapping/params/position-increment-gap.asciidoc @@ -4,12 +4,12 @@ <> string fields take term <> into account, in order to be able to support <>. -When indexing an array of strings, each string of the array is indexed -directly after the previous one, almost as though all the strings in the array -had been concatenated into one big string. +When indexing string fields with multiple values a "fake" gap is added between +the values to prevent most phrase queries from matching across the values. The +size of this gap is configured using `position_increment_gap` and defaults to +`100`. -This can result in matches from phrase queries spanning two array elements. -For instance: +For example: [source,js] -------------------------------------------------- @@ -26,11 +26,24 @@ GET /my_index/groups/_search } } } + +GET /my_index/groups/_search +{ + "query": { + "match_phrase": { + "names": "Abraham Lincoln", + "slop": 101 <2> + } + } +} -------------------------------------------------- // AUTOSENSE -<1> This phrase query matches our document, even though `Abraham` and `Lincoln` are in separate strings. +<1> This phrase query doesn't match our document which is totally expected. +<2> This phrase query matches our document, even though `Abraham` and `Lincoln` + are in separate strings, because `slop` > `position_increment_gap`. -The `position_increment_gap` can introduce a fake gap between each array element. For instance: + +The `position_increment_gap` can be specified in the mapping. For instance: [source,js] -------------------------------------------------- @@ -41,7 +54,7 @@ PUT my_index "properties": { "names": { "type": "string", - "position_increment_gap": 50 <1> + "position_increment_gap": 0 <1> } } } @@ -63,11 +76,11 @@ GET /my_index/groups/_search } -------------------------------------------------- // AUTOSENSE -<1> The first term in the next array element will be 50 terms apart from the +<1> The first term in the next array element will be 0 terms apart from the last term in the previous array element. -<2> The phrase query no longer matches our document. +<2> The phrase query matches our document which is weird, but its what we asked + for in the mapping. TIP: The `position_increment_gap` setting is allowed to have different settings for fields of the same name in the same index. Its value can be updated on existing fields using the <>. - diff --git a/docs/reference/migration/migrate_2_1.asciidoc b/docs/reference/migration/migrate_2_1.asciidoc index 63092d9250e..a530fc1193d 100644 --- a/docs/reference/migration/migrate_2_1.asciidoc +++ b/docs/reference/migration/migrate_2_1.asciidoc @@ -35,3 +35,10 @@ We've switched the default value of the `detect_noop` option from `false` to source unless you explicitly set `"detect_noop": false`. `detect_noop` was always computationally cheap compared to the expense of the update which can be thought of as a delete operation followed by an index operation. + +=== Removed features + +==== `indices.fielddata.cache.expire` + +The experimental feature `indices.fielddata.cache.expire` has been removed. +For indices that have this setting configured, this config will be ignored. \ No newline at end of file diff --git a/docs/reference/modules/cluster/disk_allocator.asciidoc b/docs/reference/modules/cluster/disk_allocator.asciidoc index 09b504529db..9baf8a379fb 100644 --- a/docs/reference/modules/cluster/disk_allocator.asciidoc +++ b/docs/reference/modules/cluster/disk_allocator.asciidoc @@ -5,7 +5,7 @@ Elasticsearch factors in the available disk space on a node before deciding whether to allocate new shards to that node or to actively relocate shards away from that node. -Below are the settings that can be configred in the `elasticsearch.yml` config +Below are the settings that can be configured in the `elasticsearch.yml` config file or updated dynamically on a live cluster with the <> API: @@ -67,3 +67,10 @@ PUT /_cluster/settings -------------------------------------------------- // AUTOSENSE +NOTE: Prior to 2.0.0, when using multiple data paths, the disk threshold +decider only factored in the usage across all data paths (if you had two +data paths, one with 50b out of 100b free (50% used) and another with +40b out of 50b free (80% used) it would see the node's disk usage as 90b +out of 150b). In 2.0.0, the minimum and maximum disk usages are tracked +separately. + diff --git a/docs/reference/modules/indices/fielddata.asciidoc b/docs/reference/modules/indices/fielddata.asciidoc index eda1ff48e79..e8c8a8d2c49 100644 --- a/docs/reference/modules/indices/fielddata.asciidoc +++ b/docs/reference/modules/indices/fielddata.asciidoc @@ -18,12 +18,6 @@ and perform poorly. absolute value, eg `12GB`. Defaults to unbounded. Also see <>. -`indices.fielddata.cache.expire`:: - - experimental[] A time based setting that expires field data after a - certain time of inactivity. Defaults to `-1`. For example, can be set to - `5m` for a 5 minute expiry. - NOTE: These are static settings which must be configured on every data node in the cluster. diff --git a/docs/reference/modules/transport.asciidoc b/docs/reference/modules/transport.asciidoc index f836f66bcfe..1c92eb24e4c 100644 --- a/docs/reference/modules/transport.asciidoc +++ b/docs/reference/modules/transport.asciidoc @@ -43,6 +43,9 @@ time setting format). Defaults to `30s`. |`transport.tcp.compress` |Set to `true` to enable compression (LZF) between all nodes. Defaults to `false`. + +|`transport.ping_schedule` | Schedule a regular ping message to ensure that connections are kept alive. Defaults to `5s` in the transport client and `-1` (disabled) elsewhere. + |======================================================================= It also uses the common diff --git a/docs/reference/search/request/scroll.asciidoc b/docs/reference/search/request/scroll.asciidoc index 2ad1f57388e..29214415bbf 100644 --- a/docs/reference/search/request/scroll.asciidoc +++ b/docs/reference/search/request/scroll.asciidoc @@ -115,7 +115,7 @@ process all data -- it just needs to be long enough to process the previous batch of results. Each `scroll` request (with the `scroll` parameter) sets a new expiry time. -Normally, the <> optimizes the +Normally, the background merge process optimizes the index by merging together smaller segments to create new bigger segments, at which time the smaller segments are deleted. This process continues during scrolling, but an open search context prevents the old segments from being diff --git a/docs/reference/search/suggesters/completion-suggest.asciidoc b/docs/reference/search/suggesters/completion-suggest.asciidoc index 3b4f70f3e89..ee8969b95e5 100644 --- a/docs/reference/search/suggesters/completion-suggest.asciidoc +++ b/docs/reference/search/suggesters/completion-suggest.asciidoc @@ -136,8 +136,8 @@ payloads or weights. This form does still work inside of multi fields. NOTE: The suggest data structure might not reflect deletes on documents immediately. You may need to do an <> for that. -You can call optimize with the `only_expunge_deletes=true` to only cater for deletes -or alternatively call a <> operation. +You can call optimize with the `only_expunge_deletes=true` to only target +deletions for merging. [[querying]] ==== Querying diff --git a/plugins/analysis-icu/licenses/lucene-analyzers-icu-5.2.1.jar.sha1 b/plugins/analysis-icu/licenses/lucene-analyzers-icu-5.2.1.jar.sha1 deleted file mode 100644 index b98bcd59656..00000000000 --- a/plugins/analysis-icu/licenses/lucene-analyzers-icu-5.2.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b80ddba6b892937f3e9b8321fa6cef5cdd0fadfa diff --git a/plugins/analysis-icu/licenses/lucene-analyzers-icu-5.3.0.jar.sha1 b/plugins/analysis-icu/licenses/lucene-analyzers-icu-5.3.0.jar.sha1 new file mode 100644 index 00000000000..393ebc59ee0 --- /dev/null +++ b/plugins/analysis-icu/licenses/lucene-analyzers-icu-5.3.0.jar.sha1 @@ -0,0 +1 @@ +e6dd489db555ad84279732c5f189406d20b63c84 diff --git a/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-5.2.1.jar.sha1 b/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-5.2.1.jar.sha1 deleted file mode 100644 index 0553975c25a..00000000000 --- a/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-5.2.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -bd348c9f18662b73d22427209075a739ad33d689 diff --git a/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-5.3.0.jar.sha1 b/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-5.3.0.jar.sha1 new file mode 100644 index 00000000000..b9e01cd40fd --- /dev/null +++ b/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-5.3.0.jar.sha1 @@ -0,0 +1 @@ +b3e67473646e3869fcdeb4a3151ab597b957fbf2 diff --git a/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-5.2.1.jar.sha1 b/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-5.2.1.jar.sha1 deleted file mode 100644 index ccd7a4db633..00000000000 --- a/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-5.2.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -bf61aebd9d895884dcac77e7ed17e45683ddbd66 diff --git a/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-5.3.0.jar.sha1 b/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-5.3.0.jar.sha1 new file mode 100644 index 00000000000..1008732a647 --- /dev/null +++ b/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-5.3.0.jar.sha1 @@ -0,0 +1 @@ +471f3ee15053413e75c5c24a978494a6d4984240 diff --git a/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-5.2.1.jar.sha1 b/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-5.2.1.jar.sha1 deleted file mode 100644 index c514c619955..00000000000 --- a/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-5.2.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -81a9e3dc63fb3661cccfa7e90e1b38535e895933 diff --git a/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-5.3.0.jar.sha1 b/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-5.3.0.jar.sha1 new file mode 100644 index 00000000000..34377b92824 --- /dev/null +++ b/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-5.3.0.jar.sha1 @@ -0,0 +1 @@ +e37000b73d34ba33dda26f46893b09ba275c5294 diff --git a/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-5.2.1.jar.sha1 b/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-5.2.1.jar.sha1 deleted file mode 100644 index f542650fccf..00000000000 --- a/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-5.2.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -bf939976dc70ccab9a9ba40bde58c247afb72d99 diff --git a/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-5.3.0.jar.sha1 b/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-5.3.0.jar.sha1 new file mode 100644 index 00000000000..6c2857f65b1 --- /dev/null +++ b/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-5.3.0.jar.sha1 @@ -0,0 +1 @@ +fcc4bf8ccbda52435d13525d7cfc66cecf5c5125 diff --git a/pom.xml b/pom.xml index b4fa4af2da0..0cc34ed578f 100644 --- a/pom.xml +++ b/pom.xml @@ -41,18 +41,17 @@ 1.7 - 5.2.1 - 5.2.1 + 5.3.0 + 5.3.0 2.1.16 2.5.3 1.6.2 1.2.17 - 0.7.2.201409121644 + 0.7.5.201505241946 s3://download.elasticsearch.org/elasticsearch/staging/ ${project.build.directory}/dev-tools - unshaded ${elasticsearch.tools.directory}/license-check/elasticsearch_license_header.txt ${elasticsearch.tools.directory}/license-check/license_header_definition.xml ${elasticsearch.tools.directory}/ant/integration-tests.xml @@ -336,7 +335,6 @@ ${lucene.maven.version} - com.google.guava guava @@ -416,8 +414,6 @@ 1.3.1 - - org.codehaus.groovy groovy-all @@ -543,6 +539,19 @@ + + enforce-maven-version + + enforce + + + + + [3.1.0,) + + + + print-versions validate @@ -633,6 +642,8 @@ ${tests.showSuccess} ${tests.thirdparty} ${tests.config} + ${tests.coverage} + ${project.build.directory} ${tests.client.ratio} ${tests.enable_mock_modules} ${tests.assertion.disabled} @@ -831,7 +842,7 @@ ${elasticsearch.tools.directory}/forbidden/core-signatures.txt ${elasticsearch.tools.directory}/forbidden/all-signatures.txt - ${elasticsearch.tools.directory}/forbidden/third-party-${elasticsearch.thirdparty.config}-signatures.txt + ${elasticsearch.tools.directory}/forbidden/third-party-signatures.txt ${forbidden.signatures} **.SuppressForbidden @@ -878,7 +889,7 @@ org.apache.maven.plugins maven-resources-plugin 2.7 - @@ -1092,12 +1103,6 @@ org.eclipse.jdt.ui.text.custom_code_templates=report - - default-check - - check - - @@ -1408,7 +1413,8 @@ org.eclipse.jdt.ui.text.custom_code_templates= - false + + true diff --git a/qa/pom.xml b/qa/pom.xml index d918de924ae..f8b1f38f6bd 100644 --- a/qa/pom.xml +++ b/qa/pom.xml @@ -146,7 +146,6 @@ smoke-test-plugins - smoke-test-shaded smoke-test-multinode diff --git a/qa/smoke-test-shaded/pom.xml b/qa/smoke-test-shaded/pom.xml deleted file mode 100644 index c1eb2e72e36..00000000000 --- a/qa/smoke-test-shaded/pom.xml +++ /dev/null @@ -1,39 +0,0 @@ - - - 4.0.0 - - - org.elasticsearch.qa - elasticsearch-qa - 2.1.0-SNAPSHOT - - - smoke-test-shaded - QA: Smoke Test Shaded Jar - Runs a simple - - - shaded - true - - - - - org.elasticsearch.distribution.shaded - elasticsearch - ${elasticsearch.version} - - - org.hamcrest - hamcrest-all - test - - - org.apache.lucene - lucene-test-framework - test - - - diff --git a/qa/smoke-test-shaded/src/test/java/org/elasticsearch/shaded/test/ShadedIT.java b/qa/smoke-test-shaded/src/test/java/org/elasticsearch/shaded/test/ShadedIT.java deleted file mode 100644 index 0befbf440fa..00000000000 --- a/qa/smoke-test-shaded/src/test/java/org/elasticsearch/shaded/test/ShadedIT.java +++ /dev/null @@ -1,77 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.shaded.test; - -import org.apache.lucene.util.LuceneTestCase; -import org.elasticsearch.action.search.SearchResponse; -import org.elasticsearch.client.Client; -import org.elasticsearch.common.logging.ESLoggerFactory; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.node.Node; -import org.elasticsearch.node.NodeBuilder; -import org.junit.Test; - -import java.nio.file.Path; - -/** - */ -public class ShadedIT extends LuceneTestCase { - - public void testStartShadedNode() { - ESLoggerFactory.getRootLogger().setLevel("ERROR"); - Path data = createTempDir(); - Settings settings = Settings.builder() - .put("path.home", data.toAbsolutePath().toString()) - .put("node.mode", "local") - .put("http.enabled", "false") - .build(); - NodeBuilder builder = NodeBuilder.nodeBuilder().data(true).settings(settings).loadConfigSettings(false).local(true); - try (Node node = builder.node()) { - Client client = node.client(); - client.admin().indices().prepareCreate("test").get(); - client.prepareIndex("test", "foo").setSource("{ \"field\" : \"value\" }").get(); - client.admin().indices().prepareRefresh().get(); - SearchResponse response = client.prepareSearch("test").get(); - assertEquals(response.getHits().getTotalHits(), 1l); - } - - } - - @Test - public void testLoadShadedClasses() throws ClassNotFoundException { - Class.forName("org.elasticsearch.common.collect.ImmutableList"); - Class.forName("org.elasticsearch.common.joda.time.DateTime"); - Class.forName("org.elasticsearch.common.util.concurrent.jsr166e.LongAdder"); - } - - @Test(expected = ClassNotFoundException.class) - public void testGuavaIsNotOnTheCP() throws ClassNotFoundException { - Class.forName("com.google.common.collect.ImmutableList"); - } - - @Test(expected = ClassNotFoundException.class) - public void testJodaIsNotOnTheCP() throws ClassNotFoundException { - Class.forName("org.joda.time.DateTime"); - } - - @Test(expected = ClassNotFoundException.class) - public void testjsr166eIsNotOnTheCP() throws ClassNotFoundException { - Class.forName("com.twitter.jsr166e.LongAdder"); - } -} diff --git a/qa/vagrant/pom.xml b/qa/vagrant/pom.xml index 8f3b8ade0f8..ed9422b64e2 100644 --- a/qa/vagrant/pom.xml +++ b/qa/vagrant/pom.xml @@ -98,6 +98,90 @@ ${elasticsearch.version} zip + + org.elasticsearch.plugin + analysis-icu + ${elasticsearch.version} + zip + + + org.elasticsearch.plugin + analysis-kuromoji + ${elasticsearch.version} + zip + + + org.elasticsearch.plugin + analysis-phonetic + ${elasticsearch.version} + zip + + + org.elasticsearch.plugin + analysis-smartcn + ${elasticsearch.version} + zip + + + org.elasticsearch.plugin + analysis-stempel + ${elasticsearch.version} + zip + + + org.elasticsearch.plugin + cloud-aws + ${elasticsearch.version} + zip + + + org.elasticsearch.plugin + cloud-azure + ${elasticsearch.version} + zip + + + org.elasticsearch.plugin + cloud-gce + ${elasticsearch.version} + zip + + + org.elasticsearch.plugin + delete-by-query + ${elasticsearch.version} + zip + + + org.elasticsearch.plugin + lang-javascript + ${elasticsearch.version} + zip + + + org.elasticsearch.plugin + lang-python + ${elasticsearch.version} + zip + + + org.elasticsearch.plugin + mapper-size + ${elasticsearch.version} + zip + + + org.elasticsearch.plugin + mapper-murmur3 + ${elasticsearch.version} + zip + + + org.elasticsearch.plugin + site-example + ${elasticsearch.version} + zip + @@ -192,14 +276,6 @@ - - - org.elasticsearch.distribution - elasticsearch-rpm - ${elasticsearch.version} - rpm - - ok diff --git a/qa/vagrant/src/dev/ant/vagrant-integration-tests.xml b/qa/vagrant/src/dev/ant/vagrant-integration-tests.xml index efed4960b5e..67b208803b9 100644 --- a/qa/vagrant/src/dev/ant/vagrant-integration-tests.xml +++ b/qa/vagrant/src/dev/ant/vagrant-integration-tests.xml @@ -68,6 +68,13 @@ That isn't to say that the updates will always be compatible. Its ok to just destroy the boxes if they get busted. --> + + + diff --git a/qa/vagrant/src/test/resources/packaging/scripts/20_tar_package.bats b/qa/vagrant/src/test/resources/packaging/scripts/20_tar_package.bats index 61210d2df68..a4332ce4eb1 100644 --- a/qa/vagrant/src/test/resources/packaging/scripts/20_tar_package.bats +++ b/qa/vagrant/src/test/resources/packaging/scripts/20_tar_package.bats @@ -30,6 +30,7 @@ # Load test utilities load packaging_test_utils +load tar setup() { skip_not_tar_gz @@ -61,13 +62,12 @@ setup() { count=$(find /tmp -type d -name 'elasticsearch*' | wc -l) [ "$count" -eq 1 ] -} -################################## -# Check that the archive is correctly installed -################################## -@test "[TAR] verify archive installation" { - verify_archive_installation "/tmp/elasticsearch" + # Its simpler to check that the install was correct in this test rather + # than in another test because install_archive sets a number of path + # variables that verify_archive_installation reads. To separate this into + # another test you'd have to recreate the variables. + verify_archive_installation } ################################## diff --git a/qa/vagrant/src/test/resources/packaging/scripts/25_tar_plugins.bats b/qa/vagrant/src/test/resources/packaging/scripts/25_tar_plugins.bats deleted file mode 100644 index 2b8fe631cec..00000000000 --- a/qa/vagrant/src/test/resources/packaging/scripts/25_tar_plugins.bats +++ /dev/null @@ -1,315 +0,0 @@ -#!/usr/bin/env bats - -# This file is used to test the installation and removal -# of plugins with a tar gz archive. - -# WARNING: This testing file must be executed as root and can -# dramatically change your system. It removes the 'elasticsearch' -# user/group and also many directories. Do not execute this file -# unless you know exactly what you are doing. - -# The test case can be executed with the Bash Automated -# Testing System tool available at https://github.com/sstephenson/bats -# Thanks to Sam Stephenson! - -# Licensed to Elasticsearch under one or more contributor -# license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright -# ownership. Elasticsearch licenses this file to you under -# the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -# Load test utilities -load packaging_test_utils - -setup() { - # Cleans everything for every test execution - clean_before_test -} - -################################## -# Install plugins with a tar archive -################################## -@test "[TAR] install jvm-example plugin" { - # Install the archive - install_archive - - # Checks that the archive is correctly installed - verify_archive_installation - - # Checks that plugin archive is available - [ -e "$EXAMPLE_PLUGIN_ZIP" ] - - # Install jvm-example - run /tmp/elasticsearch/bin/plugin install jvm-example -u "file://$EXAMPLE_PLUGIN_ZIP" - [ "$status" -eq 0 ] - - # Checks that the plugin is correctly installed - assert_file_exist "/tmp/elasticsearch/bin/jvm-example" - assert_file_exist "/tmp/elasticsearch/bin/jvm-example/test" - assert_file_exist "/tmp/elasticsearch/config/jvm-example" - assert_file_exist "/tmp/elasticsearch/config/jvm-example/example.yaml" - assert_file_exist "/tmp/elasticsearch/plugins/jvm-example" - assert_file_exist "/tmp/elasticsearch/plugins/jvm-example/plugin-descriptor.properties" - assert_file_exist "/tmp/elasticsearch/plugins/jvm-example/jvm-example-"*".jar" - echo "Running jvm-example's bin script...." - /tmp/elasticsearch/bin/jvm-example/test | grep test - - # Remove the plugin - run /tmp/elasticsearch/bin/plugin remove jvm-example - [ "$status" -eq 0 ] - - # Checks that the plugin is correctly removed - assert_file_not_exist "/tmp/elasticsearch/bin/jvm-example" - assert_file_exist "/tmp/elasticsearch/config/jvm-example" - assert_file_exist "/tmp/elasticsearch/config/jvm-example/example.yaml" - assert_file_not_exist "/tmp/elasticsearch/plugins/jvm-example" -} - -@test "[TAR] install jvm-example plugin with a custom path.plugins" { - # Install the archive - install_archive - - # Checks that the archive is correctly installed - verify_archive_installation - - # Creates a temporary directory - TEMP_PLUGINS_DIR=`mktemp -d 2>/dev/null || mktemp -d -t 'tmp'` - - # Modify the path.plugins setting in configuration file - echo "path.plugins: $TEMP_PLUGINS_DIR" >> "/tmp/elasticsearch/config/elasticsearch.yml" - - run chown -R elasticsearch:elasticsearch "$TEMP_PLUGINS_DIR" - [ "$status" -eq 0 ] - - # Checks that plugin archive is available - [ -e "$EXAMPLE_PLUGIN_ZIP" ] - - # Install jvm-example - run /tmp/elasticsearch/bin/plugin install jvm-example -u "file://$EXAMPLE_PLUGIN_ZIP" - [ "$status" -eq 0 ] - - # Checks that the plugin is correctly installed - assert_file_exist "/tmp/elasticsearch/bin/jvm-example" - assert_file_exist "/tmp/elasticsearch/bin/jvm-example/test" - assert_file_exist "/tmp/elasticsearch/config/jvm-example" - assert_file_exist "/tmp/elasticsearch/config/jvm-example/example.yaml" - assert_file_exist "$TEMP_PLUGINS_DIR/jvm-example" - assert_file_exist "$TEMP_PLUGINS_DIR/jvm-example/plugin-descriptor.properties" - assert_file_exist "$TEMP_PLUGINS_DIR/jvm-example/jvm-example-"*".jar" - - # Remove the plugin - run /tmp/elasticsearch/bin/plugin remove jvm-example - [ "$status" -eq 0 ] - - # Checks that the plugin is correctly removed - assert_file_not_exist "/tmp/elasticsearch/bin/jvm-example" - assert_file_exist "/tmp/elasticsearch/config/jvm-example" - assert_file_exist "/tmp/elasticsearch/config/jvm-example/example.yaml" - assert_file_not_exist "$TEMP_PLUGINS_DIR/jvm-example" - - # Delete the custom plugins directory - run rm -rf "$TEMP_PLUGINS_DIR" - [ "$status" -eq 0 ] -} - -@test "[TAR] install jvm-example plugin with a custom CONFIG_DIR" { - # Install the archive - install_archive - - # Checks that the archive is correctly installed - verify_archive_installation - - # Creates a temporary directory - TEMP_CONFIG_DIR=`mktemp -d 2>/dev/null || mktemp -d -t 'tmp'` - - # Move configuration files to the new configuration directory - run mv /tmp/elasticsearch/config/* $TEMP_CONFIG_DIR - [ "$status" -eq 0 ] - - run chown -R elasticsearch:elasticsearch "$TEMP_CONFIG_DIR" - [ "$status" -eq 0 ] - - assert_file_exist "$TEMP_CONFIG_DIR/elasticsearch.yml" - - # Checks that plugin archive is available - [ -e "$EXAMPLE_PLUGIN_ZIP" ] - - # Install jvm-example with the CONF_DIR environment variable - run env "CONF_DIR=$TEMP_CONFIG_DIR" /tmp/elasticsearch/bin/plugin install jvm-example -u "file://$EXAMPLE_PLUGIN_ZIP" - [ "$status" -eq 0 ] - - # Checks that jvm-example is correctly installed - assert_file_exist "/tmp/elasticsearch/bin/jvm-example" - assert_file_exist "/tmp/elasticsearch/bin/jvm-example/test" - assert_file_exist "$TEMP_CONFIG_DIR/jvm-example" - assert_file_exist "$TEMP_CONFIG_DIR/jvm-example/example.yaml" - assert_file_exist "/tmp/elasticsearch/plugins/jvm-example" - assert_file_exist "/tmp/elasticsearch/plugins/jvm-example/plugin-descriptor.properties" - assert_file_exist "/tmp/elasticsearch/plugins/jvm-example/jvm-example-"*".jar" - - # Remove the plugin - run /tmp/elasticsearch/bin/plugin remove jvm-example - [ "$status" -eq 0 ] - - # Checks that the plugin is correctly removed - assert_file_not_exist "/tmp/elasticsearch/bin/jvm-example" - assert_file_exist "$TEMP_CONFIG_DIR/jvm-example" - assert_file_exist "$TEMP_CONFIG_DIR/jvm-example/example.yaml" - assert_file_not_exist "/tmp/elasticsearch/plugins/jvm-example" - - # Delete the custom plugins directory - run rm -rf "$TEMP_CONFIG_DIR" - [ "$status" -eq 0 ] -} - -@test "[TAR] install jvm-example plugin with a custom ES_JAVA_OPTS" { - # Install the archive - install_archive - - # Checks that the archive is correctly installed - verify_archive_installation - - # Creates a temporary directory - TEMP_CONFIG_DIR=`mktemp -d 2>/dev/null || mktemp -d -t 'tmp'` - - # Move configuration files to the new configuration directory - run mv /tmp/elasticsearch/config/* $TEMP_CONFIG_DIR - [ "$status" -eq 0 ] - - run chown -R elasticsearch:elasticsearch "$TEMP_CONFIG_DIR" - [ "$status" -eq 0 ] - - assert_file_exist "$TEMP_CONFIG_DIR/elasticsearch.yml" - - # Export ES_JAVA_OPTS - export ES_JAVA_OPTS="-Des.path.conf=$TEMP_CONFIG_DIR" - [ "$status" -eq 0 ] - - # Checks that plugin archive is available - [ -e "$EXAMPLE_PLUGIN_ZIP" ] - - # Install jvm-example - run /tmp/elasticsearch/bin/plugin install jvm-example -u "file://$EXAMPLE_PLUGIN_ZIP" - [ "$status" -eq 0 ] - - # Checks that jvm-example is correctly installed - assert_file_exist "/tmp/elasticsearch/bin/jvm-example" - assert_file_exist "/tmp/elasticsearch/bin/jvm-example/test" - assert_file_exist "$TEMP_CONFIG_DIR/jvm-example" - assert_file_exist "$TEMP_CONFIG_DIR/jvm-example/example.yaml" - assert_file_exist "/tmp/elasticsearch/plugins/jvm-example" - assert_file_exist "/tmp/elasticsearch/plugins/jvm-example/plugin-descriptor.properties" - assert_file_exist "/tmp/elasticsearch/plugins/jvm-example/jvm-example-"*".jar" - - # Remove the plugin - run /tmp/elasticsearch/bin/plugin remove jvm-example - [ "$status" -eq 0 ] - - # Checks that the plugin is correctly removed - assert_file_not_exist "/tmp/elasticsearch/bin/jvm-example" - assert_file_exist "$TEMP_CONFIG_DIR/jvm-example" - assert_file_exist "$TEMP_CONFIG_DIR/jvm-example/example.yaml" - assert_file_not_exist "/tmp/elasticsearch/plugins/jvm-example" - - # Delete the custom plugins directory - run rm -rf "$TEMP_CONFIG_DIR" - [ "$status" -eq 0 ] -} - -@test "[TAR] install jvm-example plugin to elasticsearch directory with a space" { - export ES_DIR="/tmp/elastic search" - - # Install the archive - install_archive - - # Checks that the archive is correctly installed - verify_archive_installation - - # Move the Elasticsearch installation to a directory with a space in it - rm -rf "$ES_DIR" - mv /tmp/elasticsearch "$ES_DIR" - - # Checks that plugin archive is available - [ -e "$EXAMPLE_PLUGIN_ZIP" ] - - # Install jvm-example - run "$ES_DIR/bin/plugin" install jvm-example -u "file://$EXAMPLE_PLUGIN_ZIP" - [ "$status" -eq 0 ] - - # Checks that jvm-example is correctly installed - assert_file_exist "$ES_DIR/bin/jvm-example" - assert_file_exist "$ES_DIR/bin/jvm-example/test" - assert_file_exist "$ES_DIR/config/jvm-example" - assert_file_exist "$ES_DIR/config/jvm-example/example.yaml" - assert_file_exist "$ES_DIR/plugins/jvm-example" - assert_file_exist "$ES_DIR/plugins/jvm-example/plugin-descriptor.properties" - assert_file_exist "$ES_DIR/plugins/jvm-example/jvm-example-"*".jar" - - # Remove the plugin - run "$ES_DIR/bin/plugin" remove jvm-example - [ "$status" -eq 0 ] - - # Checks that the plugin is correctly removed - assert_file_not_exist "$ES_DIR/bin/jvm-example" - assert_file_exist "$ES_DIR/config/jvm-example" - assert_file_exist "$ES_DIR/config/jvm-example/example.yaml" - assert_file_not_exist "$ES_DIR/plugins/jvm-example" - - #Cleanup our temporary Elasticsearch installation - rm -rf "$ES_DIR" -} - -@test "[TAR] install jvm-example plugin from a directory with a space" { - export EXAMPLE_PLUGIN_ZIP_WITH_SPACE="/tmp/plugins with space/jvm-example.zip" - - # Install the archive - install_archive - - # Checks that the archive is correctly installed - verify_archive_installation - - # Checks that plugin archive is available - [ -e "$EXAMPLE_PLUGIN_ZIP" ] - - # Copy the jvm-example plugin to a directory with a space in it - rm -f "$EXAMPLE_PLUGIN_ZIP_WITH_SPACE" - mkdir -p "$(dirname "$EXAMPLE_PLUGIN_ZIP_WITH_SPACE")" - cp $EXAMPLE_PLUGIN_ZIP "$EXAMPLE_PLUGIN_ZIP_WITH_SPACE" - - # Install jvm-example - run /tmp/elasticsearch/bin/plugin install jvm-example -u "file://$EXAMPLE_PLUGIN_ZIP_WITH_SPACE" - [ "$status" -eq 0 ] - - # Checks that the plugin is correctly installed - assert_file_exist "/tmp/elasticsearch/bin/jvm-example" - assert_file_exist "/tmp/elasticsearch/bin/jvm-example/test" - assert_file_exist "/tmp/elasticsearch/config/jvm-example" - assert_file_exist "/tmp/elasticsearch/config/jvm-example/example.yaml" - assert_file_exist "/tmp/elasticsearch/plugins/jvm-example" - assert_file_exist "/tmp/elasticsearch/plugins/jvm-example/plugin-descriptor.properties" - assert_file_exist "/tmp/elasticsearch/plugins/jvm-example/jvm-example-"*".jar" - - # Remove the plugin - run /tmp/elasticsearch/bin/plugin remove jvm-example - [ "$status" -eq 0 ] - - # Checks that the plugin is correctly removed - assert_file_not_exist "/tmp/elasticsearch/bin/jvm-example" - assert_file_exist "/tmp/elasticsearch/config/jvm-example" - assert_file_exist "/tmp/elasticsearch/config/jvm-example/example.yaml" - assert_file_not_exist "/tmp/elasticsearch/plugins/jvm-example" - - #Cleanup our plugin directory with a space - rm -rf "$EXAMPLE_PLUGIN_ZIP_WITH_SPACE" -} diff --git a/qa/vagrant/src/test/resources/packaging/scripts/25_tar_plugins.bats b/qa/vagrant/src/test/resources/packaging/scripts/25_tar_plugins.bats new file mode 120000 index 00000000000..8f55b1eb78c --- /dev/null +++ b/qa/vagrant/src/test/resources/packaging/scripts/25_tar_plugins.bats @@ -0,0 +1 @@ +plugin_test_cases.bash \ No newline at end of file diff --git a/qa/vagrant/src/test/resources/packaging/scripts/30_deb_package.bats b/qa/vagrant/src/test/resources/packaging/scripts/30_deb_package.bats index aa7a370d80b..3367e62fcf8 100644 --- a/qa/vagrant/src/test/resources/packaging/scripts/30_deb_package.bats +++ b/qa/vagrant/src/test/resources/packaging/scripts/30_deb_package.bats @@ -34,50 +34,39 @@ load packaging_test_utils # Cleans everything for the 1st execution setup() { - if [ "$BATS_TEST_NUMBER" -eq 1 ]; then - clean_before_test - fi + skip_not_dpkg } ################################## # Install DEB package ################################## @test "[DEB] dpkg command is available" { - skip_not_dpkg - run dpkg --version - [ "$status" -eq 0 ] + clean_before_test + dpkg --version } @test "[DEB] package is available" { - skip_not_dpkg count=$(find . -type f -name 'elastic*.deb' | wc -l) [ "$count" -eq 1 ] } @test "[DEB] package is not installed" { - skip_not_dpkg - run dpkg -s 'elasticsearch' >&2 + run dpkg -s 'elasticsearch' [ "$status" -eq 1 ] } @test "[DEB] install package" { - skip_not_dpkg - run dpkg -i elasticsearch*.deb >&2 - [ "$status" -eq 0 ] + dpkg -i elasticsearch*.deb } @test "[DEB] package is installed" { - skip_not_dpkg - run dpkg -s 'elasticsearch' >&2 - [ "$status" -eq 0 ] + dpkg -s 'elasticsearch' } ################################## # Check that the package is correctly installed ################################## @test "[DEB] verify package installation" { - skip_not_dpkg - verify_package_installation } @@ -85,8 +74,6 @@ setup() { # Check that Elasticsearch is working ################################## @test "[DEB] test elasticsearch" { - skip_not_dpkg - start_elasticsearch_service run_elasticsearch_tests @@ -96,21 +83,16 @@ setup() { # Uninstall DEB package ################################## @test "[DEB] remove package" { - skip_not_dpkg - run dpkg -r 'elasticsearch' >&2 - [ "$status" -eq 0 ] + dpkg -r 'elasticsearch' } @test "[DEB] package has been removed" { - skip_not_dpkg - run dpkg -s 'elasticsearch' >&2 + run dpkg -s 'elasticsearch' [ "$status" -eq 0 ] echo "$output" | grep -i "status" | grep -i "deinstall ok" } @test "[DEB] verify package removal" { - skip_not_dpkg - # The removal must stop the service count=$(ps | grep Elasticsearch | wc -l) [ "$count" -eq 0 ] @@ -146,14 +128,10 @@ setup() { } @test "[DEB] purge package" { - skip_not_dpkg - run dpkg --purge 'elasticsearch' >&2 - [ "$status" -eq 0 ] + dpkg --purge 'elasticsearch' } @test "[DEB] verify package purge" { - skip_not_dpkg - # all remaining files are deleted by the purge assert_file_not_exist "/etc/elasticsearch" assert_file_not_exist "/etc/elasticsearch/elasticsearch.yml" @@ -171,7 +149,6 @@ setup() { } @test "[DEB] package has been completly removed" { - skip_not_dpkg - run dpkg -s 'elasticsearch' >&2 + run dpkg -s 'elasticsearch' [ "$status" -eq 1 ] } diff --git a/qa/vagrant/src/test/resources/packaging/scripts/40_rpm_package.bats b/qa/vagrant/src/test/resources/packaging/scripts/40_rpm_package.bats index 6d8aff66410..cbcdd794c76 100644 --- a/qa/vagrant/src/test/resources/packaging/scripts/40_rpm_package.bats +++ b/qa/vagrant/src/test/resources/packaging/scripts/40_rpm_package.bats @@ -33,50 +33,39 @@ load packaging_test_utils # Cleans everything for the 1st execution setup() { - if [ "$BATS_TEST_NUMBER" -eq 1 ]; then - clean_before_test - fi + skip_not_rpm } ################################## # Install RPM package ################################## @test "[RPM] rpm command is available" { - skip_not_rpm - run rpm --version - [ "$status" -eq 0 ] + clean_before_test + rpm --version } @test "[RPM] package is available" { - skip_not_rpm count=$(find . -type f -name 'elastic*.rpm' | wc -l) [ "$count" -eq 1 ] } @test "[RPM] package is not installed" { - skip_not_rpm - run rpm -qe 'elasticsearch' >&2 + run rpm -qe 'elasticsearch' [ "$status" -eq 1 ] } @test "[RPM] install package" { - skip_not_rpm - run rpm -i elasticsearch*.rpm >&2 - [ "$status" -eq 0 ] + rpm -i elasticsearch*.rpm } @test "[RPM] package is installed" { - skip_not_rpm - run rpm -qe 'elasticsearch' >&2 - [ "$status" -eq 0 ] + rpm -qe 'elasticsearch' } ################################## # Check that the package is correctly installed ################################## @test "[RPM] verify package installation" { - skip_not_rpm - verify_package_installation } @@ -84,8 +73,6 @@ setup() { # Check that Elasticsearch is working ################################## @test "[RPM] test elasticsearch" { - skip_not_rpm - start_elasticsearch_service run_elasticsearch_tests @@ -95,20 +82,15 @@ setup() { # Uninstall RPM package ################################## @test "[RPM] remove package" { - skip_not_rpm - run rpm -e 'elasticsearch' >&2 - [ "$status" -eq 0 ] + rpm -e 'elasticsearch' } @test "[RPM] package has been removed" { - skip_not_rpm - run rpm -qe 'elasticsearch' >&2 + run rpm -qe 'elasticsearch' [ "$status" -eq 1 ] } @test "[RPM] verify package removal" { - skip_not_rpm - # The removal must stop the service count=$(ps | grep Elasticsearch | wc -l) [ "$count" -eq 0 ] diff --git a/qa/vagrant/src/test/resources/packaging/scripts/50_plugins.bats b/qa/vagrant/src/test/resources/packaging/scripts/50_plugins.bats deleted file mode 100644 index 77ce7f0cecb..00000000000 --- a/qa/vagrant/src/test/resources/packaging/scripts/50_plugins.bats +++ /dev/null @@ -1,259 +0,0 @@ -#!/usr/bin/env bats - -# This file is used to test the installation and removal -# of plugins when Elasticsearch is installed as a DEB/RPM -# package. - -# WARNING: This testing file must be executed as root and can -# dramatically change your system. It removes the 'elasticsearch' -# user/group and also many directories. Do not execute this file -# unless you know exactly what you are doing. - -# The test case can be executed with the Bash Automated -# Testing System tool available at https://github.com/sstephenson/bats -# Thanks to Sam Stephenson! - -# Licensed to Elasticsearch under one or more contributor -# license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright -# ownership. Elasticsearch licenses this file to you under -# the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -# Load test utilities -load packaging_test_utils - -setup() { - # Cleans everything for every test execution - clean_before_test -} - -# Install a deb or rpm package -install_package() { - if is_rpm; then - run rpm -i elasticsearch*.rpm >&2 - [ "$status" -eq 0 ] - - elif is_dpkg; then - run dpkg -i elasticsearch*.deb >&2 - [ "$status" -eq 0 ] - fi -} - -################################## -# Install plugins with DEB/RPM package -################################## -@test "[PLUGINS] install jvm-example plugin" { - # Install the package - install_package - - # Checks that the package is correctly installed - verify_package_installation - - # Checks that plugin archive is available - [ -e "$EXAMPLE_PLUGIN_ZIP" ] - - # Install jvm-example - run /usr/share/elasticsearch/bin/plugin install jvm-example -u "file://$EXAMPLE_PLUGIN_ZIP" - [ "$status" -eq 0 ] - - # Checks that jvm-example is correctly installed - assert_file_exist "/usr/share/elasticsearch/bin/jvm-example" - assert_file_exist "/usr/share/elasticsearch/bin/jvm-example/test" - assert_file_exist "/etc/elasticsearch/jvm-example" - assert_file_exist "/etc/elasticsearch/jvm-example/example.yaml" - assert_file_exist "/usr/share/elasticsearch/plugins/jvm-example" - assert_file_exist "/usr/share/elasticsearch/plugins/jvm-example/plugin-descriptor.properties" - assert_file_exist "/usr/share/elasticsearch/plugins/jvm-example/jvm-example-"*".jar" - - # Remove the plugin - run /usr/share/elasticsearch/bin/plugin remove jvm-example - [ "$status" -eq 0 ] - - # Checks that the plugin is correctly removed - assert_file_not_exist "/usr/share/elasticsearch/bin/jvm-example" - assert_file_exist "/etc/elasticsearch/jvm-example" - assert_file_exist "/etc/elasticsearch/jvm-example/example.yaml" - assert_file_not_exist "/usr/share/elasticsearch/plugins/jvm-example" -} - -@test "[PLUGINS] install jvm-example plugin with a custom path.plugins" { - # Install the package - install_package - - # Checks that the package is correctly installed - verify_package_installation - - # Creates a temporary directory - TEMP_PLUGINS_DIR=`mktemp -d 2>/dev/null || mktemp -d -t 'tmp'` - - # Modify the path.plugins setting in configuration file - echo "path.plugins: $TEMP_PLUGINS_DIR" >> "/etc/elasticsearch/elasticsearch.yml" - - # Sets privileges - run chown -R root:elasticsearch "$TEMP_PLUGINS_DIR" - [ "$status" -eq 0 ] - - run chmod -R 750 "$TEMP_PLUGINS_DIR" - [ "$status" -eq 0 ] - - # Checks that plugin archive is available - [ -e "$EXAMPLE_PLUGIN_ZIP" ] - - # Install jvm-example - run /usr/share/elasticsearch/bin/plugin install jvm-example -u "file://$EXAMPLE_PLUGIN_ZIP" - [ "$status" -eq 0 ] - - # Checks that jvm-example is correctly installed - assert_file_exist "/usr/share/elasticsearch/bin/jvm-example" - assert_file_exist "/usr/share/elasticsearch/bin/jvm-example/test" - assert_file_exist "/etc/elasticsearch/jvm-example" - assert_file_exist "/etc/elasticsearch/jvm-example/example.yaml" - assert_file_exist "$TEMP_PLUGINS_DIR/jvm-example" - assert_file_exist "$TEMP_PLUGINS_DIR/jvm-example/plugin-descriptor.properties" - assert_file_exist "$TEMP_PLUGINS_DIR/jvm-example/jvm-example-"*".jar" - - - # Remove the plugin - run /usr/share/elasticsearch/bin/plugin remove jvm-example - [ "$status" -eq 0 ] - - # Checks that the plugin is correctly removed - assert_file_not_exist "/usr/share/elasticsearch/bin/jvm-example" - assert_file_exist "/etc/elasticsearch/jvm-example" - assert_file_exist "/etc/elasticsearch/jvm-example/example.yaml" - assert_file_not_exist "$TEMP_PLUGINS_DIR/jvm-example" - - # Delete the custom plugins directory - run rm -rf "$TEMP_PLUGINS_DIR" - [ "$status" -eq 0 ] -} - -@test "[PLUGINS] install jvm-example plugin with a custom CONFIG_DIR" { - # Install the package - install_package - - # Checks that the package is correctly installed - verify_package_installation - - # Creates a temporary directory - TEMP_CONFIG_DIR=`mktemp -d 2>/dev/null || mktemp -d -t 'tmp'` - - # Modify the CONF_DIR variable in environment file - if is_rpm; then - echo "CONF_DIR=$TEMP_CONFIG_DIR" >> "/etc/sysconfig/elasticsearch" - elif is_dpkg; then - echo "CONF_DIR=$TEMP_CONFIG_DIR" >> "/etc/default/elasticsearch" - fi - - # Move configuration files to the new configuration directory - run mv /etc/elasticsearch/* $TEMP_CONFIG_DIR - [ "$status" -eq 0 ] - - assert_file_exist "$TEMP_CONFIG_DIR/elasticsearch.yml" - - # Sets privileges - run chown -R root:elasticsearch "$TEMP_CONFIG_DIR" - [ "$status" -eq 0 ] - - run chmod -R 750 "$TEMP_CONFIG_DIR" - [ "$status" -eq 0 ] - - # Checks that plugin archive is available - [ -e "$EXAMPLE_PLUGIN_ZIP" ] - - # Install jvm-exampel - run /usr/share/elasticsearch/bin/plugin install jvm-example -u "file://$EXAMPLE_PLUGIN_ZIP" - [ "$status" -eq 0 ] - - # Checks that jvm-example is correctly installed - assert_file_exist "/usr/share/elasticsearch/bin/jvm-example" - assert_file_exist "/usr/share/elasticsearch/bin/jvm-example/test" - assert_file_exist "$TEMP_CONFIG_DIR/jvm-example" - assert_file_exist "$TEMP_CONFIG_DIR/jvm-example/example.yaml" - assert_file_exist "/usr/share/elasticsearch/plugins/jvm-example" - assert_file_exist "/usr/share/elasticsearch/plugins/jvm-example/plugin-descriptor.properties" - assert_file_exist "/usr/share/elasticsearch/plugins/jvm-example/jvm-example-"*".jar" - - # Remove the plugin - run /usr/share/elasticsearch/bin/plugin remove jvm-example - [ "$status" -eq 0 ] - - # Checks that the plugin is correctly removed - assert_file_not_exist "/usr/share/elasticsearch/bin/jvm-example" - assert_file_exist "$TEMP_CONFIG_DIR/jvm-example" - assert_file_exist "$TEMP_CONFIG_DIR/jvm-example/example.yaml" - assert_file_not_exist "/usr/share/elasticsearch/plugins/jvm-example" - - # Delete the custom plugins directory - run rm -rf "$TEMP_CONFIG_DIR" - [ "$status" -eq 0 ] -} - -@test "[PLUGINS] install jvm-example plugin with a custom ES_JAVA_OPTS" { - # Install the package - install_package - - # Checks that the package is correctly installed - verify_package_installation - - # Creates a temporary directory - TEMP_CONFIG_DIR=`mktemp -d 2>/dev/null || mktemp -d -t 'tmp'` - - # Move configuration files to the new configuration directory - run mv /etc/elasticsearch/* $TEMP_CONFIG_DIR - [ "$status" -eq 0 ] - - assert_file_exist "$TEMP_CONFIG_DIR/elasticsearch.yml" - - # Sets privileges - run chown -R root:elasticsearch "$TEMP_CONFIG_DIR" - [ "$status" -eq 0 ] - - run chmod -R 750 "$TEMP_CONFIG_DIR" - [ "$status" -eq 0 ] - - # Export ES_JAVA_OPTS - export ES_JAVA_OPTS="-Des.path.conf=$TEMP_CONFIG_DIR" - [ "$status" -eq 0 ] - - # Checks that plugin archive is available - [ -e "$EXAMPLE_PLUGIN_ZIP" ] - - # Install jvm-example - run /usr/share/elasticsearch/bin/plugin install jvm-example -u "file://$EXAMPLE_PLUGIN_ZIP" - [ "$status" -eq 0 ] - - # Checks that jvm-example is correctly installed - assert_file_exist "/usr/share/elasticsearch/bin/jvm-example" - assert_file_exist "/usr/share/elasticsearch/bin/jvm-example/test" - assert_file_exist "$TEMP_CONFIG_DIR/jvm-example" - assert_file_exist "$TEMP_CONFIG_DIR/jvm-example/example.yaml" - assert_file_exist "/usr/share/elasticsearch/plugins/jvm-example" - assert_file_exist "/usr/share/elasticsearch/plugins/jvm-example/plugin-descriptor.properties" - assert_file_exist "/usr/share/elasticsearch/plugins/jvm-example/jvm-example-"*".jar" - - # Remove the plugin - run /usr/share/elasticsearch/bin/plugin remove jvm-example - [ "$status" -eq 0 ] - - # Checks that the plugin is correctly removed - assert_file_not_exist "/usr/share/elasticsearch/bin/jvm-example" - assert_file_exist "$TEMP_CONFIG_DIR/jvm-example" - assert_file_exist "$TEMP_CONFIG_DIR/jvm-example/example.yaml" - assert_file_not_exist "/usr/share/elasticsearch/plugins/jvm-example" - - # Delete the custom plugins directory - run rm -rf "$TEMP_CONFIG_DIR" - [ "$status" -eq 0 ] -} diff --git a/qa/vagrant/src/test/resources/packaging/scripts/50_plugins.bats b/qa/vagrant/src/test/resources/packaging/scripts/50_plugins.bats new file mode 120000 index 00000000000..8f55b1eb78c --- /dev/null +++ b/qa/vagrant/src/test/resources/packaging/scripts/50_plugins.bats @@ -0,0 +1 @@ +plugin_test_cases.bash \ No newline at end of file diff --git a/qa/vagrant/src/test/resources/packaging/scripts/60_systemd.bats b/qa/vagrant/src/test/resources/packaging/scripts/60_systemd.bats index 011b063a161..8df4f4a980b 100644 --- a/qa/vagrant/src/test/resources/packaging/scripts/60_systemd.bats +++ b/qa/vagrant/src/test/resources/packaging/scripts/60_systemd.bats @@ -33,42 +33,27 @@ load packaging_test_utils # Cleans everything for the 1st execution setup() { - if [ "$BATS_TEST_NUMBER" -eq 1 ]; then - clean_before_test - fi - - - # Installs a package before test - if is_dpkg; then - dpkg -i elasticsearch*.deb >&2 || true - fi - if is_rpm; then - rpm -i elasticsearch*.rpm >&2 || true - fi + skip_not_systemd + skip_not_dpkg_or_rpm } -@test "[SYSTEMD] daemon reload" { - skip_not_systemd +@test "[SYSTEMD] install elasticsearch" { + clean_before_test + install_package +} - run systemctl daemon-reload - [ "$status" -eq 0 ] +@test "[SYSTEMD] daemon reload after install" { + systemctl daemon-reload } @test "[SYSTEMD] enable" { - skip_not_systemd + systemctl enable elasticsearch.service - run systemctl enable elasticsearch.service - [ "$status" -eq 0 ] - - run systemctl is-enabled elasticsearch.service - [ "$status" -eq 0 ] + systemctl is-enabled elasticsearch.service } @test "[SYSTEMD] start" { - skip_not_systemd - - run systemctl start elasticsearch.service - [ "$status" -eq 0 ] + systemctl start elasticsearch.service wait_for_elasticsearch_status @@ -76,72 +61,53 @@ setup() { } @test "[SYSTEMD] start (running)" { - skip_not_systemd - - run systemctl start elasticsearch.service - [ "$status" -eq 0 ] + systemctl start elasticsearch.service } @test "[SYSTEMD] is active (running)" { - skip_not_systemd - run systemctl is-active elasticsearch.service [ "$status" -eq 0 ] [ "$output" = "active" ] } @test "[SYSTEMD] status (running)" { - skip_not_systemd - - run systemctl status elasticsearch.service - [ "$status" -eq 0 ] + systemctl status elasticsearch.service } ################################## # Check that Elasticsearch is working ################################## @test "[SYSTEMD] test elasticsearch" { - skip_not_systemd - run_elasticsearch_tests } @test "[SYSTEMD] restart" { - skip_not_systemd - - run systemctl restart elasticsearch.service - [ "$status" -eq 0 ] + systemctl restart elasticsearch.service wait_for_elasticsearch_status - run service elasticsearch status - [ "$status" -eq 0 ] + service elasticsearch status } @test "[SYSTEMD] stop (running)" { - skip_not_systemd - - run systemctl stop elasticsearch.service - [ "$status" -eq 0 ] + systemctl stop elasticsearch.service run systemctl status elasticsearch.service + [ "$status" -eq 3 ] || "Expected exit code 3 meaning stopped" echo "$output" | grep "Active:" | grep "inactive" } @test "[SYSTEMD] stop (stopped)" { - skip_not_systemd - - run systemctl stop elasticsearch.service - [ "$status" -eq 0 ] + systemctl stop elasticsearch.service run systemctl status elasticsearch.service + [ "$status" -eq 3 ] || "Expected exit code 3 meaning stopped" echo "$output" | grep "Active:" | grep "inactive" } @test "[SYSTEMD] status (stopped)" { - skip_not_systemd - run systemctl status elasticsearch.service + [ "$status" -eq 3 ] || "Expected exit code 3 meaning stopped" echo "$output" | grep "Active:" | grep "inactive" } @@ -150,21 +116,15 @@ setup() { # but it should not block ES from starting # see https://github.com/elastic/elasticsearch/issues/11594 @test "[SYSTEMD] delete PID_DIR and restart" { - skip_not_systemd + rm -rf /var/run/elasticsearch - run rm -rf /var/run/elasticsearch - [ "$status" -eq 0 ] + systemd-tmpfiles --create - run systemd-tmpfiles --create - [ "$status" -eq 0 ] - - run systemctl start elasticsearch.service - [ "$status" -eq 0 ] + systemctl start elasticsearch.service wait_for_elasticsearch_status assert_file_exist "/var/run/elasticsearch/elasticsearch.pid" - run systemctl stop elasticsearch.service - [ "$status" -eq 0 ] -} \ No newline at end of file + systemctl stop elasticsearch.service +} diff --git a/qa/vagrant/src/test/resources/packaging/scripts/70_sysv_initd.bats b/qa/vagrant/src/test/resources/packaging/scripts/70_sysv_initd.bats index 97d1cce918f..5bf43163ab1 100644 --- a/qa/vagrant/src/test/resources/packaging/scripts/70_sysv_initd.bats +++ b/qa/vagrant/src/test/resources/packaging/scripts/70_sysv_initd.bats @@ -33,24 +33,17 @@ load packaging_test_utils # Cleans everything for the 1st execution setup() { - if [ "$BATS_TEST_NUMBER" -eq 1 ]; then - clean_before_test - fi + skip_not_sysvinit + skip_not_dpkg_or_rpm +} - # Installs a package before test - if is_dpkg; then - dpkg -i elasticsearch*.deb >&2 || true - fi - if is_rpm; then - rpm -i elasticsearch*.rpm >&2 || true - fi +@test "[INIT.D] install elasticsearch" { + clean_before_test + install_package } @test "[INIT.D] start" { - skip_not_sysvinit - - run service elasticsearch start - [ "$status" -eq 0 ] + service elasticsearch start wait_for_elasticsearch_status @@ -58,44 +51,29 @@ setup() { } @test "[INIT.D] status (running)" { - skip_not_sysvinit - - run service elasticsearch status - [ "$status" -eq 0 ] + service elasticsearch status } ################################## # Check that Elasticsearch is working ################################## @test "[INIT.D] test elasticsearch" { - skip_not_sysvinit - run_elasticsearch_tests } @test "[INIT.D] restart" { - skip_not_sysvinit - - run service elasticsearch restart - [ "$status" -eq 0 ] + service elasticsearch restart wait_for_elasticsearch_status - run service elasticsearch status - [ "$status" -eq 0 ] + service elasticsearch status } @test "[INIT.D] stop (running)" { - skip_not_sysvinit - - run service elasticsearch stop - [ "$status" -eq 0 ] - + service elasticsearch stop } @test "[INIT.D] status (stopped)" { - skip_not_sysvinit - run service elasticsearch status # precise returns 4, trusty 3 [ "$status" -eq 3 ] || [ "$status" -eq 4 ] @@ -106,19 +84,13 @@ setup() { # but it should not block ES from starting # see https://github.com/elastic/elasticsearch/issues/11594 @test "[INIT.D] delete PID_DIR and restart" { - skip_not_sysvinit + rm -rf /var/run/elasticsearch - run rm -rf /var/run/elasticsearch - [ "$status" -eq 0 ] - - - run service elasticsearch start - [ "$status" -eq 0 ] + service elasticsearch start wait_for_elasticsearch_status assert_file_exist "/var/run/elasticsearch/elasticsearch.pid" - run service elasticsearch stop - [ "$status" -eq 0 ] + service elasticsearch stop } diff --git a/qa/vagrant/src/test/resources/packaging/scripts/packaging_test_utils.bash b/qa/vagrant/src/test/resources/packaging/scripts/packaging_test_utils.bash index cbb68389cc0..9eff5894c17 100644 --- a/qa/vagrant/src/test/resources/packaging/scripts/packaging_test_utils.bash +++ b/qa/vagrant/src/test/resources/packaging/scripts/packaging_test_utils.bash @@ -25,10 +25,6 @@ # specific language governing permissions and limitations # under the License. - -# Variables used by tests -EXAMPLE_PLUGIN_ZIP=$(readlink -m jvm-example-*.zip) - # Checks if necessary commands are available to run the tests if [ ! -x /usr/bin/which ]; then @@ -83,16 +79,16 @@ is_rpm() { # Skip test if the 'dpkg' command is not supported skip_not_dpkg() { - if [ ! -x "`which dpkg 2>/dev/null`" ]; then - skip "dpkg is not supported" - fi + is_dpkg || skip "dpkg is not supported" } # Skip test if the 'rpm' command is not supported skip_not_rpm() { - if [ ! -x "`which rpm 2>/dev/null`" ]; then - skip "rpm is not supported" - fi + is_rpm || skip "rpm is not supported" +} + +skip_not_dpkg_or_rpm() { + is_dpkg || is_rpm || skip "only dpkg or rpm systems are supported" } # Returns 0 if the system supports Systemd @@ -151,16 +147,18 @@ assert_file_not_exist() { } assert_file() { - local file=$1 + local file="$1" local type=$2 local user=$3 local privileges=$4 - [ -n "$file" ] && [ -e "$file" ] + assert_file_exist "$file" if [ "$type" = "d" ]; then + echo "And be a directory...." [ -d "$file" ] else + echo "And be a regular file...." [ -f "$file" ] fi @@ -234,59 +232,32 @@ verify_package_installation() { fi } - -# Install the tar.gz archive -install_archive() { - local eshome="/tmp" - if [ "x$1" != "x" ]; then - eshome="$1" +# Install the rpm or deb package +install_package() { + if is_rpm; then + rpm -i elasticsearch*.rpm + elif is_dpkg; then + dpkg -i elasticsearch*.deb + else + skip "Only rpm or deb supported" fi - - tar -xzvf elasticsearch*.tar.gz -C "$eshome" - - find "$eshome" -depth -type d -name 'elasticsearch*' -exec mv {} "$eshome/elasticsearch" \; - - # ES cannot run as root so create elasticsearch user & group if needed - if ! getent group "elasticsearch" > /dev/null 2>&1 ; then - if is_dpkg; then - addgroup --system "elasticsearch" - else - groupadd -r "elasticsearch" - fi - fi - if ! id "elasticsearch" > /dev/null 2>&1 ; then - if is_dpkg; then - adduser --quiet --system --no-create-home --ingroup "elasticsearch" --disabled-password --shell /bin/false "elasticsearch" - else - useradd --system -M --gid "elasticsearch" --shell /sbin/nologin --comment "elasticsearch user" "elasticsearch" - fi - fi - - chown -R elasticsearch:elasticsearch "$eshome/elasticsearch" } - # Checks that all directories & files are correctly installed # after a archive (tar.gz/zip) install verify_archive_installation() { - local eshome="/tmp/elasticsearch" - if [ "x$1" != "x" ]; then - eshome="$1" - fi - - assert_file "$eshome" d - assert_file "$eshome/bin" d - assert_file "$eshome/bin/elasticsearch" f - assert_file "$eshome/bin/elasticsearch.in.sh" f - assert_file "$eshome/bin/plugin" f - assert_file "$eshome/config" d - assert_file "$eshome/config/elasticsearch.yml" f - assert_file "$eshome/config/logging.yml" f - assert_file "$eshome/config" d - assert_file "$eshome/lib" d - assert_file "$eshome/NOTICE.txt" f - assert_file "$eshome/LICENSE.txt" f - assert_file "$eshome/README.textile" f + assert_file "$ESHOME" d + assert_file "$ESHOME/bin" d + assert_file "$ESHOME/bin/elasticsearch" f + assert_file "$ESHOME/bin/elasticsearch.in.sh" f + assert_file "$ESHOME/bin/plugin" f + assert_file "$ESCONFIG" d + assert_file "$ESCONFIG/elasticsearch.yml" f + assert_file "$ESCONFIG/logging.yml" f + assert_file "$ESHOME/lib" d + assert_file "$ESHOME/NOTICE.txt" f + assert_file "$ESHOME/LICENSE.txt" f + assert_file "$ESHOME/README.textile" f } # Deletes everything before running a test file @@ -478,3 +449,15 @@ run_elasticsearch_tests() { curl -s -XDELETE 'http://localhost:9200/_all' } + +# Move the config directory to another directory and properly chown it. +move_config() { + local oldConfig="$ESCONFIG" + export ESCONFIG="${1:-$(mktemp -d -t 'config.XXXX')}" + echo "Moving configuration directory from $oldConfig to $ESCONFIG" + + # Move configuration files to the new configuration directory + mv "$oldConfig"/* "$ESCONFIG" + chown -R elasticsearch:elasticsearch "$ESCONFIG" + assert_file_exist "$ESCONFIG/elasticsearch.yml" +} diff --git a/qa/vagrant/src/test/resources/packaging/scripts/plugin_test_cases.bash b/qa/vagrant/src/test/resources/packaging/scripts/plugin_test_cases.bash new file mode 100644 index 00000000000..ebd23eeba2d --- /dev/null +++ b/qa/vagrant/src/test/resources/packaging/scripts/plugin_test_cases.bash @@ -0,0 +1,185 @@ +#!/usr/bin/env bats + +# This file is used to test the installation and removal +# of plugins after Elasticsearch has been installed with tar.gz, +# rpm, and deb. + +# WARNING: This testing file must be executed as root and can +# dramatically change your system. It removes the 'elasticsearch' +# user/group and also many directories. Do not execute this file +# unless you know exactly what you are doing. + +# The test case can be executed with the Bash Automated +# Testing System tool available at https://github.com/sstephenson/bats +# Thanks to Sam Stephenson! + +# Licensed to Elasticsearch under one or more contributor +# license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright +# ownership. Elasticsearch licenses this file to you under +# the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +################################## +# Common test cases for both tar and rpm/deb based plugin tests +################################## +# This file is symlinked to both 25_tar_plugins.bats and 50_plugins.bats so its +# executed twice - once to test plugins using the tar distribution and once to +# test files using the rpm distribution or the deb distribution, whichever the +# system uses. + +# Load test utilities +load packaging_test_utils +load plugins + +setup() { + # The rules on when we should clean an reinstall are complex - all the + # jvm-example tests need cleaning because they are rough on the filesystem. + # The first and any tests that find themselves without an ESHOME need to + # clean as well.... this is going to mostly only happen on the first + # non-jvm-example-plugin-test _and_ any first test if you comment out the + # other tests. Commenting out lots of test cases seems like a reasonably + # common workflow. + if [ $BATS_TEST_NUMBER == 1 ] || + [[ $BATS_TEST_NAME =~ install_jvm.*example ]] || + [ ! -d "$ESHOME" ]; then + echo "cleaning" >> /tmp/ss + clean_before_test + install + fi +} + +if [[ "$BATS_TEST_FILENAME" =~ 25_tar_plugins.bats$ ]]; then + load tar + GROUP='TAR PLUGINS' + install() { + install_archive + verify_archive_installation + } + export ESHOME=/tmp/elasticsearch + export_elasticsearch_paths +else + if is_rpm; then + GROUP='RPM PLUGINS' + elif is_dpkg; then + GROUP='DEB PLUGINS' + fi + export ESHOME="/usr/share/elasticsearch" + export ESPLUGINS="$ESHOME/plugins" + export ESCONFIG="/etc/elasticsearch" + install() { + install_package + verify_package_installation + } +fi + +@test "[$GROUP] install jvm-example plugin" { + install_jvm_example + remove_jvm_example +} + +@test "[$GROUP] install jvm-example plugin with a custom path.plugins" { + # Clean up after the last time this test was run + rm -rf /tmp/plugins.* + + local oldPlugins="$ESPLUGINS" + export ESPLUGINS=$(mktemp -d -t 'plugins.XXXX') + + # Modify the path.plugins setting in configuration file + echo "path.plugins: $ESPLUGINS" >> "$ESCONFIG/elasticsearch.yml" + chown -R elasticsearch:elasticsearch "$ESPLUGINS" + + install_jvm_example + remove_jvm_example +} + +@test "[$GROUP] install jvm-example plugin with a custom CONFIG_DIR" { + # Clean up after the last time we ran this test + rm -rf /tmp/config.* + + move_config + + CONF_DIR="$ESCONFIG" install_jvm_example + CONF_DIR="$ESCONFIG" remove_jvm_example +} + +@test "[$GROUP] install jvm-example plugin from a directory with a space" { + rm -rf "/tmp/plugins with space" + mkdir -p "/tmp/plugins with space" + local zip=$(ls jvm-example-*.zip) + cp $zip "/tmp/plugins with space" + + install_jvm_example "/tmp/plugins with space/$zip" + remove_jvm_example +} + +@test "[$GROUP] install jvm-example plugin to elasticsearch directory with a space" { + [ "$GROUP" == "TAR PLUGINS" ] || skip "Test case only supported by TAR PLUGINS" + + move_elasticsearch "/tmp/elastic search" + + install_jvm_example + remove_jvm_example +} + +@test "[$GROUP] install icu plugin" { + install_and_remove_special_plugin analysis icu icu4j-*.jar +} + +@test "[$GROUP] install kuromoji plugin" { + install_and_remove_special_plugin analysis kuromoji +} + +@test "[$GROUP] install phonetic plugin" { + install_and_remove_special_plugin analysis phonetic commons-codec-*.jar +} + +@test "[$GROUP] install smartcn plugin" { + install_and_remove_special_plugin analysis smartcn +} + +@test "[$GROUP] install stempel plugin" { + install_and_remove_special_plugin analysis stempel +} + +@test "[$GROUP] install aws plugin" { + install_and_remove_special_plugin cloud aws aws-java-sdk-core-*.jar +} + +@test "[$GROUP] install azure plugin" { + install_and_remove_special_plugin cloud azure azure-core-*.jar +} + +@test "[$GROUP] install gce plugin" { + install_and_remove_special_plugin cloud gce google-api-client-*.jar +} + +@test "[$GROUP] install delete by query" { + install_and_remove_special_plugin - delete-by-query +} + +@test "[$GROUP] install javascript plugin" { + install_and_remove_special_plugin lang javascript rhino-*.jar +} + +@test "[$GROUP] install python plugin" { + install_and_remove_special_plugin lang python jython-standalone-*.jar +} + +@test "[$GROUP] install murmur3 mapper" { + install_and_remove_special_plugin mapper murmur3 +} + +@test "[$GROUP] install size mapper" { + install_and_remove_special_plugin mapper size +} diff --git a/qa/vagrant/src/test/resources/packaging/scripts/plugins.bash b/qa/vagrant/src/test/resources/packaging/scripts/plugins.bash new file mode 100644 index 00000000000..2ca6e7501ab --- /dev/null +++ b/qa/vagrant/src/test/resources/packaging/scripts/plugins.bash @@ -0,0 +1,104 @@ +#!/bin/sh + +# This file contains some utilities to test the elasticsearch scripts, +# the .deb/.rpm packages and the SysV/Systemd scripts. + +# WARNING: This testing file must be executed as root and can +# dramatically change your system. It removes the 'elasticsearch' +# user/group and also many directories. Do not execute this file +# unless you know exactly what you are doing. + +# Licensed to Elasticsearch under one or more contributor +# license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright +# ownership. Elasticsearch licenses this file to you under +# the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# Install a plugin an run all the common post installation tests. +install_plugin() { + local name=$1 + local path="$2" + + assert_file_exist "$path" + + "$ESHOME/bin/plugin" install "file://$path" + + assert_file_exist "$ESPLUGINS/$name" + assert_file_exist "$ESPLUGINS/$name/plugin-descriptor.properties" + assert_file_exist "$ESPLUGINS/$name/$name"*".jar" +} + +# Remove a plugin and make sure its plugin directory is removed. +remove_plugin() { + local name=$1 + + echo "Removing $name...." + "$ESHOME/bin/plugin" remove $name + + assert_file_not_exist "$ESPLUGINS/$name" +} + +# Install the jvm-example plugin which fully excercises the special case file +# placements for non-site plugins. +install_jvm_example() { + local relativePath=${1:-$(readlink -m jvm-example-*.zip)} + install_plugin jvm-example "$relativePath" + + assert_file_exist "$ESHOME/bin/jvm-example" + assert_file_exist "$ESHOME/bin/jvm-example/test" + assert_file_exist "$ESCONFIG/jvm-example" + assert_file_exist "$ESCONFIG/jvm-example/example.yaml" + + echo "Running jvm-example's bin script...." + "$ESHOME/bin/jvm-example/test" | grep test +} + +# Remove the jvm-example plugin which fully excercises the special cases of +# removing bin and not removing config. +remove_jvm_example() { + remove_plugin jvm-example + + assert_file_not_exist "$ESHOME/bin/jvm-example" + assert_file_exist "$ESCONFIG/jvm-example" + assert_file_exist "$ESCONFIG/jvm-example/example.yaml" +} + +# Install and remove a plugin with a special prefix. For the most part prefixes +# are just useful for grouping but the "analysis" prefix is special because all +# analysis plugins come with a corresponding lucene-analyzers jar. +# $1 - the prefix +# $2 - the plugin name +# $@ - all remaining arguments are jars that must exist in the plugin's +# installation directory +install_and_remove_special_plugin() { + local prefix=$1 + shift + local name=$1 + shift + + if [ "$prefix" == "-" ]; then + local fullName="$name" + else + local fullName="$prefix-$name" + fi + + install_plugin $fullName "$(readlink -m $fullName-*.zip)" + if [ $prefix == 'analysis' ]; then + assert_file_exist "$(readlink -m $ESPLUGINS/$fullName/lucene-analyzers-$name-*.jar)" + fi + for file in "$@"; do + assert_file_exist "$(readlink -m $ESPLUGINS/$fullName/$file)" + done + remove_plugin $fullName +} diff --git a/qa/vagrant/src/test/resources/packaging/scripts/tar.bash b/qa/vagrant/src/test/resources/packaging/scripts/tar.bash new file mode 100644 index 00000000000..7725edf4b94 --- /dev/null +++ b/qa/vagrant/src/test/resources/packaging/scripts/tar.bash @@ -0,0 +1,73 @@ +#!/bin/sh + +# This file contains some utilities to test the elasticsearch scripts, +# the .deb/.rpm packages and the SysV/Systemd scripts. + +# WARNING: This testing file must be executed as root and can +# dramatically change your system. It removes the 'elasticsearch' +# user/group and also many directories. Do not execute this file +# unless you know exactly what you are doing. + +# Licensed to Elasticsearch under one or more contributor +# license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright +# ownership. Elasticsearch licenses this file to you under +# the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + + +# Install the tar.gz archive +install_archive() { + export ESHOME=${1:-/tmp/elasticsearch} + + echo "Unpacking tarball to $ESHOME" + rm -rf /tmp/untar + mkdir -p /tmp/untar + tar -xzf elasticsearch*.tar.gz -C /tmp/untar + + find /tmp/untar -depth -type d -name 'elasticsearch*' -exec mv {} "$ESHOME" \; > /dev/null + + # ES cannot run as root so create elasticsearch user & group if needed + if ! getent group "elasticsearch" > /dev/null 2>&1 ; then + if is_dpkg; then + addgroup --system "elasticsearch" + else + groupadd -r "elasticsearch" + fi + fi + if ! id "elasticsearch" > /dev/null 2>&1 ; then + if is_dpkg; then + adduser --quiet --system --no-create-home --ingroup "elasticsearch" --disabled-password --shell /bin/false "elasticsearch" + else + useradd --system -M --gid "elasticsearch" --shell /sbin/nologin --comment "elasticsearch user" "elasticsearch" + fi + fi + + chown -R elasticsearch:elasticsearch "$ESHOME" + export_elasticsearch_paths +} + +# Move the unzipped tarball to another location. +move_elasticsearch() { + local oldhome="$ESHOME" + export ESHOME="$1" + rm -rf "$ESHOME" + mv "$oldhome" "$ESHOME" + export_elasticsearch_paths +} + +# Export some useful paths. +export_elasticsearch_paths() { + export ESPLUGINS="$ESHOME/plugins" + export ESCONFIG="$ESHOME/config" +} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/msearch/10_basic.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/msearch/10_basic.yaml index 8b736b860bd..49e34fb16cd 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/msearch/10_basic.yaml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/msearch/10_basic.yaml @@ -44,4 +44,10 @@ - match: { responses.1.error.root_cause.0.index: test_2 } - match: { responses.2.hits.total: 1 } - + - do: + msearch: + body: + - index: test_1 + - query: + { "template": { "query": { "term": { "foo": { "value": "{{template}}" } } }, "params": { "template": "bar" } } } + - match: { responses.0.hits.total: 1 }