Merge branch 'master' into feature/query-refactoring
Conflicts: core/src/main/java/org/elasticsearch/index/query/BoolQueryParser.java core/src/main/java/org/elasticsearch/index/query/IdsQueryParser.java core/src/main/java/org/elasticsearch/index/query/SimpleQueryStringParser.java core/src/main/java/org/elasticsearch/index/query/TemplateQueryParser.java core/src/main/java/org/elasticsearch/index/query/TermsQueryParser.java core/src/main/java/org/elasticsearch/index/query/functionscore/fieldvaluefactor/FieldValueFactorFunctionParser.java core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/GND.java core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/JLHScore.java core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/NXYSignificanceHeuristic.java core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/PercentageScore.java core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/ScriptHeuristic.java core/src/test/java/org/elasticsearch/search/aggregations/bucket/SignificantTermsSignificanceScoreIT.java
This commit is contained in:
commit
52be313c69
|
@ -338,8 +338,8 @@ time to setup all the VMs one at a time. Run this to download and setup the VMs
|
|||
we use for testing by default:
|
||||
|
||||
--------------------------------------------------------
|
||||
vagrant up --provision trusty && vagrant halt trusty
|
||||
vagrant up --provision centos-7 && vagrant halt centos-7
|
||||
vagrant up --provision trusty --provider virtualbox && vagrant halt trusty
|
||||
vagrant up --provision centos-7 --provider virtualbox && vagrant halt centos-7
|
||||
--------------------------------------------------------
|
||||
|
||||
or run this to download and setup all the VMs:
|
||||
|
@ -347,7 +347,7 @@ or run this to download and setup all the VMs:
|
|||
-------------------------------------------------------------------------------
|
||||
vagrant halt
|
||||
for box in $(vagrant status | grep 'poweroff\|not created' | cut -f1 -d' '); do
|
||||
vagrant up --provision $box
|
||||
vagrant up --provision $box --provider virtualbox
|
||||
vagrant halt $box
|
||||
done
|
||||
-------------------------------------------------------------------------------
|
||||
|
@ -420,13 +420,13 @@ This is just regular vagrant so you can run normal multi box vagrant commands
|
|||
to test things manually. Just run:
|
||||
|
||||
---------------------------------------
|
||||
vagrant up trusty && vagrant ssh trusty
|
||||
vagrant up trusty --provider virtualbox && vagrant ssh trusty
|
||||
---------------------------------------
|
||||
|
||||
to get an Ubuntu or
|
||||
|
||||
-------------------------------------------
|
||||
vagrant up centos-7 && vagrant ssh centos-7
|
||||
vagrant up centos-7 --provider virtualbox && vagrant ssh centos-7
|
||||
-------------------------------------------
|
||||
|
||||
to get a CentOS. Once you are done with them you should halt them:
|
||||
|
@ -469,7 +469,7 @@ vagrant ssh precise -c 'sudo rm -rf /bin'; echo oops
|
|||
All you've got to do to get another one is
|
||||
|
||||
----------------------------------------------
|
||||
vagrant destroy -f trusty && vagrant up trusty
|
||||
vagrant destroy -f trusty && vagrant up trusty --provider virtualbox
|
||||
----------------------------------------------
|
||||
|
||||
The whole process takes a minute and a half on a modern laptop, two and a half
|
||||
|
@ -508,7 +508,7 @@ mvn -pl distribution/rpm package
|
|||
and in another window:
|
||||
|
||||
----------------------------------------------------
|
||||
vagrant up centos-7 && vagrant ssh centos-7
|
||||
vagrant up centos-7 --provider virtualbox && vagrant ssh centos-7
|
||||
cd $RPM
|
||||
sudo bats $BATS/*rpm*.bats
|
||||
----------------------------------------------------
|
||||
|
@ -520,20 +520,34 @@ If you wanted to retest all the release artifacts on a single VM you could:
|
|||
mvn -amd -pl distribution install -DskipTests
|
||||
# Copy them all the testroot
|
||||
mvn -Dtests.vagrant -pl qa/vagrant pre-integration-test
|
||||
vagrant up trusty && vagrant ssh trusty
|
||||
vagrant up trusty --provider virtualbox && vagrant ssh trusty
|
||||
cd $TESTROOT
|
||||
sudo bats $BATS/*.bats
|
||||
-------------------------------------------------
|
||||
|
||||
== Coverage analysis
|
||||
|
||||
To run tests instrumented with jacoco and produce a coverage report in
|
||||
`target/site/jacoco/`:
|
||||
Tests can be run instrumented with jacoco to produce a coverage report in
|
||||
`target/site/jacoco/`.
|
||||
|
||||
Unit test coverage:
|
||||
|
||||
---------------------------------------------------------------------------
|
||||
mvn -Dtests.coverage test jacoco:report
|
||||
---------------------------------------------------------------------------
|
||||
|
||||
Integration test coverage:
|
||||
|
||||
---------------------------------------------------------------------------
|
||||
mvn -Dtests.coverage -Dskip.unit.tests verify jacoco:report
|
||||
---------------------------------------------------------------------------
|
||||
|
||||
Combined (Unit+Integration) coverage:
|
||||
|
||||
---------------------------------------------------------------------------
|
||||
mvn -Dtests.coverage verify jacoco:report
|
||||
---------------------------------------------------------------------------
|
||||
|
||||
== Debugging from an IDE
|
||||
|
||||
If you want to run elasticsearch from your IDE, you should execute ./run.sh
|
||||
|
|
|
@ -105,8 +105,6 @@
|
|||
</dependency>
|
||||
<!-- Lucene spatial -->
|
||||
|
||||
|
||||
<!-- START: dependencies that might be shaded -->
|
||||
<dependency>
|
||||
<groupId>com.google.guava</groupId>
|
||||
<artifactId>guava</artifactId>
|
||||
|
@ -165,7 +163,6 @@
|
|||
<groupId>commons-cli</groupId>
|
||||
<artifactId>commons-cli</artifactId>
|
||||
</dependency>
|
||||
<!-- END: dependencies that might be shaded -->
|
||||
|
||||
<dependency>
|
||||
<groupId>org.codehaus.groovy</groupId>
|
||||
|
|
|
@ -27,7 +27,6 @@ import org.apache.lucene.search.IndexSearcher;
|
|||
import org.apache.lucene.search.Query;
|
||||
import org.apache.lucene.search.Scorer;
|
||||
import org.apache.lucene.search.Weight;
|
||||
import org.apache.lucene.util.Bits;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
|
@ -60,7 +59,7 @@ public final class MinDocQuery extends Query {
|
|||
public Weight createWeight(IndexSearcher searcher, boolean needsScores) throws IOException {
|
||||
return new ConstantScoreWeight(this) {
|
||||
@Override
|
||||
public Scorer scorer(LeafReaderContext context, final Bits acceptDocs) throws IOException {
|
||||
public Scorer scorer(LeafReaderContext context) throws IOException {
|
||||
final int maxDoc = context.reader().maxDoc();
|
||||
if (context.docBase + maxDoc <= minDoc) {
|
||||
return null;
|
||||
|
@ -89,12 +88,6 @@ public final class MinDocQuery extends Query {
|
|||
} else {
|
||||
doc = target;
|
||||
}
|
||||
while (doc < maxDoc) {
|
||||
if (acceptDocs == null || acceptDocs.get(doc)) {
|
||||
break;
|
||||
}
|
||||
doc += 1;
|
||||
}
|
||||
if (doc >= maxDoc) {
|
||||
doc = NO_MORE_DOCS;
|
||||
}
|
||||
|
|
|
@ -279,7 +279,7 @@ public class MapperQueryParser extends QueryParser {
|
|||
if (q != null) {
|
||||
added = true;
|
||||
applyBoost(mField, q);
|
||||
applySlop(q, slop);
|
||||
q = applySlop(q, slop);
|
||||
disMaxQuery.add(q);
|
||||
}
|
||||
}
|
||||
|
@ -293,7 +293,7 @@ public class MapperQueryParser extends QueryParser {
|
|||
Query q = super.getFieldQuery(mField, queryText, slop);
|
||||
if (q != null) {
|
||||
applyBoost(mField, q);
|
||||
applySlop(q, slop);
|
||||
q = applySlop(q, slop);
|
||||
clauses.add(new BooleanClause(q, BooleanClause.Occur.SHOULD));
|
||||
}
|
||||
}
|
||||
|
@ -718,15 +718,6 @@ public class MapperQueryParser extends QueryParser {
|
|||
return super.getWildcardQuery(field, aggStr.toString());
|
||||
}
|
||||
|
||||
@Override
|
||||
protected WildcardQuery newWildcardQuery(Term t) {
|
||||
// Backport: https://issues.apache.org/jira/browse/LUCENE-6677
|
||||
assert Version.LATEST == Version.LUCENE_5_2_1;
|
||||
WildcardQuery query = new WildcardQuery(t, maxDeterminizedStates);
|
||||
query.setRewriteMethod(multiTermRewriteMethod);
|
||||
return query;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Query getRegexpQuery(String field, String termStr) throws ParseException {
|
||||
if (lowercaseExpandedTerms) {
|
||||
|
@ -815,14 +806,24 @@ public class MapperQueryParser extends QueryParser {
|
|||
}
|
||||
}
|
||||
|
||||
private void applySlop(Query q, int slop) {
|
||||
if (q instanceof FilteredQuery) {
|
||||
applySlop(((FilteredQuery)q).getQuery(), slop);
|
||||
}
|
||||
private Query applySlop(Query q, int slop) {
|
||||
if (q instanceof PhraseQuery) {
|
||||
((PhraseQuery) q).setSlop(slop);
|
||||
PhraseQuery pq = (PhraseQuery) q;
|
||||
PhraseQuery.Builder builder = new PhraseQuery.Builder();
|
||||
builder.setSlop(slop);
|
||||
final Term[] terms = pq.getTerms();
|
||||
final int[] positions = pq.getPositions();
|
||||
for (int i = 0; i < terms.length; ++i) {
|
||||
builder.add(terms[i], positions[i]);
|
||||
}
|
||||
pq = builder.build();
|
||||
pq.setBoost(q.getBoost());
|
||||
return pq;
|
||||
} else if (q instanceof MultiPhraseQuery) {
|
||||
((MultiPhraseQuery) q).setSlop(slop);
|
||||
return q;
|
||||
} else {
|
||||
return q;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -1,153 +0,0 @@
|
|||
/*
|
||||
Licensed to Elasticsearch under one or more contributor
|
||||
license agreements. See the NOTICE file distributed with
|
||||
this work for additional information regarding copyright
|
||||
ownership. Elasticsearch licenses this file to you under
|
||||
the Apache License, Version 2.0 (the "License"); you may
|
||||
not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
*
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
Unless required by applicable law or agreed to in writing,
|
||||
software distributed under the License is distributed on an
|
||||
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations
|
||||
under the License.
|
||||
*/
|
||||
|
||||
package org.apache.lucene.search.postingshighlight;
|
||||
|
||||
import java.text.BreakIterator;
|
||||
import java.text.CharacterIterator;
|
||||
|
||||
/**
|
||||
* A {@link BreakIterator} that breaks the text whenever a certain separator, provided as a constructor argument, is found.
|
||||
*/
|
||||
public class CustomSeparatorBreakIterator extends BreakIterator {
|
||||
|
||||
private final char separator;
|
||||
private CharacterIterator text;
|
||||
private int current;
|
||||
|
||||
public CustomSeparatorBreakIterator(char separator) {
|
||||
this.separator = separator;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int current() {
|
||||
return current;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int first() {
|
||||
text.setIndex(text.getBeginIndex());
|
||||
return current = text.getIndex();
|
||||
}
|
||||
|
||||
@Override
|
||||
public int last() {
|
||||
text.setIndex(text.getEndIndex());
|
||||
return current = text.getIndex();
|
||||
}
|
||||
|
||||
@Override
|
||||
public int next() {
|
||||
if (text.getIndex() == text.getEndIndex()) {
|
||||
return DONE;
|
||||
} else {
|
||||
return advanceForward();
|
||||
}
|
||||
}
|
||||
|
||||
private int advanceForward() {
|
||||
char c;
|
||||
while( (c = text.next()) != CharacterIterator.DONE) {
|
||||
if (c == separator) {
|
||||
return current = text.getIndex() + 1;
|
||||
}
|
||||
}
|
||||
assert text.getIndex() == text.getEndIndex();
|
||||
return current = text.getIndex();
|
||||
}
|
||||
|
||||
@Override
|
||||
public int following(int pos) {
|
||||
if (pos < text.getBeginIndex() || pos > text.getEndIndex()) {
|
||||
throw new IllegalArgumentException("offset out of bounds");
|
||||
} else if (pos == text.getEndIndex()) {
|
||||
// this conflicts with the javadocs, but matches actual behavior (Oracle has a bug in something)
|
||||
// http://bugs.sun.com/bugdatabase/view_bug.do?bug_id=9000909
|
||||
text.setIndex(text.getEndIndex());
|
||||
current = text.getIndex();
|
||||
return DONE;
|
||||
} else {
|
||||
text.setIndex(pos);
|
||||
current = text.getIndex();
|
||||
return advanceForward();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public int previous() {
|
||||
if (text.getIndex() == text.getBeginIndex()) {
|
||||
return DONE;
|
||||
} else {
|
||||
return advanceBackward();
|
||||
}
|
||||
}
|
||||
|
||||
private int advanceBackward() {
|
||||
char c;
|
||||
while( (c = text.previous()) != CharacterIterator.DONE) {
|
||||
if (c == separator) {
|
||||
return current = text.getIndex() + 1;
|
||||
}
|
||||
}
|
||||
assert text.getIndex() == text.getBeginIndex();
|
||||
return current = text.getIndex();
|
||||
}
|
||||
|
||||
@Override
|
||||
public int preceding(int pos) {
|
||||
if (pos < text.getBeginIndex() || pos > text.getEndIndex()) {
|
||||
throw new IllegalArgumentException("offset out of bounds");
|
||||
} else if (pos == text.getBeginIndex()) {
|
||||
// this conflicts with the javadocs, but matches actual behavior (Oracle has a bug in something)
|
||||
// http://bugs.sun.com/bugdatabase/view_bug.do?bug_id=9000909
|
||||
text.setIndex(text.getBeginIndex());
|
||||
current = text.getIndex();
|
||||
return DONE;
|
||||
} else {
|
||||
text.setIndex(pos);
|
||||
current = text.getIndex();
|
||||
return advanceBackward();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public int next(int n) {
|
||||
if (n < 0) {
|
||||
for (int i = 0; i < -n; i++) {
|
||||
previous();
|
||||
}
|
||||
} else {
|
||||
for (int i = 0; i < n; i++) {
|
||||
next();
|
||||
}
|
||||
}
|
||||
return current();
|
||||
}
|
||||
|
||||
@Override
|
||||
public CharacterIterator getText() {
|
||||
return text;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setText(CharacterIterator newText) {
|
||||
text = newText;
|
||||
current = text.getBeginIndex();
|
||||
}
|
||||
}
|
|
@ -28,6 +28,7 @@ import org.apache.lucene.search.suggest.Lookup;
|
|||
import org.apache.lucene.store.*;
|
||||
import org.apache.lucene.util.*;
|
||||
import org.apache.lucene.util.automaton.Automaton;
|
||||
import org.apache.lucene.util.automaton.LimitedFiniteStringsIterator;
|
||||
import org.apache.lucene.util.automaton.Operations;
|
||||
import org.apache.lucene.util.automaton.Transition;
|
||||
import org.apache.lucene.util.fst.*;
|
||||
|
@ -465,16 +466,12 @@ public long ramBytesUsed() {
|
|||
byte buffer[] = new byte[8];
|
||||
try {
|
||||
ByteArrayDataOutput output = new ByteArrayDataOutput(buffer);
|
||||
BytesRef surfaceForm;
|
||||
|
||||
while ((surfaceForm = iterator.next()) != null) {
|
||||
Set<IntsRef> paths = toFiniteStrings(surfaceForm, ts2a);
|
||||
|
||||
maxAnalyzedPathsForOneInput = Math.max(maxAnalyzedPathsForOneInput, paths.size());
|
||||
|
||||
for (IntsRef path : paths) {
|
||||
|
||||
Util.toBytesRef(path, scratch);
|
||||
for (BytesRef surfaceForm; (surfaceForm = iterator.next()) != null;) {
|
||||
LimitedFiniteStringsIterator finiteStrings =
|
||||
new LimitedFiniteStringsIterator(toAutomaton(surfaceForm, ts2a), maxGraphExpansions);
|
||||
for (IntsRef string; (string = finiteStrings.next()) != null; count++) {
|
||||
Util.toBytesRef(string, scratch);
|
||||
|
||||
// length of the analyzed text (FST input)
|
||||
if (scratch.length() > Short.MAX_VALUE-2) {
|
||||
|
@ -526,7 +523,7 @@ public long ramBytesUsed() {
|
|||
|
||||
writer.write(buffer, 0, output.getPosition());
|
||||
}
|
||||
count++;
|
||||
maxAnalyzedPathsForOneInput = Math.max(maxAnalyzedPathsForOneInput, finiteStrings.size());
|
||||
}
|
||||
writer.close();
|
||||
|
||||
|
@ -912,23 +909,17 @@ public long ramBytesUsed() {
|
|||
return prefixPaths;
|
||||
}
|
||||
|
||||
public final Set<IntsRef> toFiniteStrings(final BytesRef surfaceForm, final TokenStreamToAutomaton ts2a) throws IOException {
|
||||
// Analyze surface form:
|
||||
TokenStream ts = indexAnalyzer.tokenStream("", surfaceForm.utf8ToString());
|
||||
return toFiniteStrings(ts2a, ts);
|
||||
}
|
||||
|
||||
public final Set<IntsRef> toFiniteStrings(final TokenStreamToAutomaton ts2a, final TokenStream ts) throws IOException {
|
||||
Automaton automaton = null;
|
||||
try {
|
||||
|
||||
// Create corresponding automaton: labels are bytes
|
||||
// from each analyzed token, with byte 0 used as
|
||||
// separator between tokens:
|
||||
automaton = ts2a.toAutomaton(ts);
|
||||
} finally {
|
||||
IOUtils.closeWhileHandlingException(ts);
|
||||
final Automaton toAutomaton(final BytesRef surfaceForm, final TokenStreamToAutomaton ts2a) throws IOException {
|
||||
try (TokenStream ts = indexAnalyzer.tokenStream("", surfaceForm.utf8ToString())) {
|
||||
return toAutomaton(ts, ts2a);
|
||||
}
|
||||
}
|
||||
|
||||
final Automaton toAutomaton(TokenStream ts, final TokenStreamToAutomaton ts2a) throws IOException {
|
||||
// Create corresponding automaton: labels are bytes
|
||||
// from each analyzed token, with byte 0 used as
|
||||
// separator between tokens:
|
||||
Automaton automaton = ts2a.toAutomaton(ts);
|
||||
|
||||
automaton = replaceSep(automaton);
|
||||
automaton = convertAutomaton(automaton);
|
||||
|
@ -940,11 +931,24 @@ public long ramBytesUsed() {
|
|||
// more than one path, eg if the analyzer created a
|
||||
// graph using SynFilter or WDF):
|
||||
|
||||
// TODO: we could walk & add simultaneously, so we
|
||||
// don't have to alloc [possibly biggish]
|
||||
// intermediate HashSet in RAM:
|
||||
return automaton;
|
||||
}
|
||||
|
||||
return Operations.getFiniteStrings(automaton, maxGraphExpansions);
|
||||
// EDIT: Adrien, needed by lookup providers
|
||||
// NOTE: these XForks are unmaintainable, we need to get rid of them...
|
||||
public Set<IntsRef> toFiniteStrings(TokenStream stream) throws IOException {
|
||||
final TokenStreamToAutomaton ts2a = getTokenStreamToAutomaton();
|
||||
Automaton automaton;
|
||||
try (TokenStream ts = stream) {
|
||||
automaton = toAutomaton(ts, ts2a);
|
||||
}
|
||||
LimitedFiniteStringsIterator finiteStrings =
|
||||
new LimitedFiniteStringsIterator(automaton, maxGraphExpansions);
|
||||
Set<IntsRef> set = new HashSet<>();
|
||||
for (IntsRef string = finiteStrings.next(); string != null; string = finiteStrings.next()) {
|
||||
set.add(IntsRef.deepCopyOf(string));
|
||||
}
|
||||
return Collections.unmodifiableSet(set);
|
||||
}
|
||||
|
||||
final Automaton toLookupAutomaton(final CharSequence key) throws IOException {
|
||||
|
|
|
@ -28,9 +28,10 @@ import org.apache.lucene.util.fst.FST;
|
|||
import org.apache.lucene.util.fst.PairOutputs;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Arrays;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.Set;
|
||||
|
||||
import static org.apache.lucene.util.automaton.Operations.DEFAULT_MAX_DETERMINIZED_STATES;
|
||||
|
||||
/**
|
||||
* Implements a fuzzy {@link AnalyzingSuggester}. The similarity measurement is
|
||||
|
@ -221,42 +222,37 @@ public final class XFuzzySuggester extends XAnalyzingSuggester {
|
|||
}
|
||||
|
||||
Automaton toLevenshteinAutomata(Automaton automaton) {
|
||||
final Set<IntsRef> ref = Operations.getFiniteStrings(automaton, -1);
|
||||
Automaton subs[] = new Automaton[ref.size()];
|
||||
int upto = 0;
|
||||
for (IntsRef path : ref) {
|
||||
if (path.length <= nonFuzzyPrefix || path.length < minFuzzyLength) {
|
||||
subs[upto] = Automata.makeString(path.ints, path.offset, path.length);
|
||||
upto++;
|
||||
List<Automaton> subs = new ArrayList<>();
|
||||
FiniteStringsIterator finiteStrings = new FiniteStringsIterator(automaton);
|
||||
for (IntsRef string; (string = finiteStrings.next()) != null;) {
|
||||
if (string.length <= nonFuzzyPrefix || string.length < minFuzzyLength) {
|
||||
subs.add(Automata.makeString(string.ints, string.offset, string.length));
|
||||
} else {
|
||||
int ints[] = new int[path.length-nonFuzzyPrefix];
|
||||
System.arraycopy(path.ints, path.offset+nonFuzzyPrefix, ints, 0, ints.length);
|
||||
int ints[] = new int[string.length-nonFuzzyPrefix];
|
||||
System.arraycopy(string.ints, string.offset+nonFuzzyPrefix, ints, 0, ints.length);
|
||||
// TODO: maybe add alphaMin to LevenshteinAutomata,
|
||||
// and pass 1 instead of 0? We probably don't want
|
||||
// to allow the trailing dedup bytes to be
|
||||
// edited... but then 0 byte is "in general" allowed
|
||||
// on input (but not in UTF8).
|
||||
LevenshteinAutomata lev = new LevenshteinAutomata(ints, unicodeAware ? Character.MAX_CODE_POINT : 255, transpositions);
|
||||
subs[upto] = lev.toAutomaton(maxEdits, UnicodeUtil.newString(path.ints, path.offset, nonFuzzyPrefix));
|
||||
upto++;
|
||||
subs.add(lev.toAutomaton(maxEdits, UnicodeUtil.newString(string.ints, string.offset, nonFuzzyPrefix)));
|
||||
}
|
||||
}
|
||||
|
||||
if (subs.length == 0) {
|
||||
if (subs.isEmpty()) {
|
||||
// automaton is empty, there is no accepted paths through it
|
||||
return Automata.makeEmpty(); // matches nothing
|
||||
} else if (subs.length == 1) {
|
||||
} else if (subs.size() == 1) {
|
||||
// no synonyms or anything: just a single path through the tokenstream
|
||||
return subs[0];
|
||||
return subs.get(0);
|
||||
} else {
|
||||
// multiple paths: this is really scary! is it slow?
|
||||
// maybe we should not do this and throw UOE?
|
||||
Automaton a = Operations.union(Arrays.asList(subs));
|
||||
Automaton a = Operations.union(subs);
|
||||
// TODO: we could call toLevenshteinAutomata() before det?
|
||||
// this only happens if you have multiple paths anyway (e.g. synonyms)
|
||||
|
||||
// This automaton should not blow up during determinize:
|
||||
return Operations.determinize(a, Integer.MAX_VALUE);
|
||||
return Operations.determinize(a, DEFAULT_MAX_DETERMINIZED_STATES);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -258,7 +258,7 @@ public class Version {
|
|||
public static final int V_2_0_0_ID = 2000099;
|
||||
public static final Version V_2_0_0 = new Version(V_2_0_0_ID, true, org.apache.lucene.util.Version.LUCENE_5_2_1);
|
||||
public static final int V_2_1_0_ID = 2010099;
|
||||
public static final Version V_2_1_0 = new Version(V_2_1_0_ID, true, org.apache.lucene.util.Version.LUCENE_5_2_1);
|
||||
public static final Version V_2_1_0 = new Version(V_2_1_0_ID, true, org.apache.lucene.util.Version.LUCENE_5_3_0);
|
||||
|
||||
public static final Version CURRENT = V_2_1_0;
|
||||
|
||||
|
|
|
@ -180,6 +180,7 @@ import org.elasticsearch.action.suggest.TransportSuggestAction;
|
|||
import org.elasticsearch.action.support.ActionFilter;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.AutoCreateIndex;
|
||||
import org.elasticsearch.action.support.DestructiveOperations;
|
||||
import org.elasticsearch.action.support.TransportAction;
|
||||
import org.elasticsearch.action.termvectors.MultiTermVectorsAction;
|
||||
import org.elasticsearch.action.termvectors.TermVectorsAction;
|
||||
|
@ -252,6 +253,7 @@ public class ActionModule extends AbstractModule {
|
|||
}
|
||||
bind(ActionFilters.class).asEagerSingleton();
|
||||
bind(AutoCreateIndex.class).asEagerSingleton();
|
||||
bind(DestructiveOperations.class).asEagerSingleton();
|
||||
registerAction(NodesInfoAction.INSTANCE, TransportNodesInfoAction.class);
|
||||
registerAction(NodesStatsAction.INSTANCE, TransportNodesStatsAction.class);
|
||||
registerAction(NodesHotThreadsAction.INSTANCE, TransportNodesHotThreadsAction.class);
|
||||
|
|
|
@ -39,7 +39,7 @@ import java.util.Collections;
|
|||
/**
|
||||
* Base class for write action responses.
|
||||
*/
|
||||
public abstract class ActionWriteResponse extends ActionResponse {
|
||||
public class ActionWriteResponse extends ActionResponse {
|
||||
|
||||
public final static ActionWriteResponse.ShardInfo.Failure[] EMPTY = new ActionWriteResponse.ShardInfo.Failure[0];
|
||||
|
||||
|
|
|
@ -19,7 +19,6 @@
|
|||
|
||||
package org.elasticsearch.action.admin.cluster.health;
|
||||
|
||||
import com.google.common.collect.ImmutableList;
|
||||
import com.google.common.collect.Maps;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.ActionResponse;
|
||||
|
@ -39,6 +38,7 @@ import org.elasticsearch.rest.RestStatus;
|
|||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.Locale;
|
||||
|
@ -270,7 +270,7 @@ public class ClusterHealthResponse extends ActionResponse implements Iterable<Cl
|
|||
timedOut = in.readBoolean();
|
||||
size = in.readVInt();
|
||||
if (size == 0) {
|
||||
validationFailures = ImmutableList.of();
|
||||
validationFailures = Collections.emptyList();
|
||||
} else {
|
||||
for (int i = 0; i < size; i++) {
|
||||
validationFailures.add(in.readString());
|
||||
|
|
|
@ -19,12 +19,10 @@
|
|||
|
||||
package org.elasticsearch.action.admin.cluster.health;
|
||||
|
||||
import com.google.common.collect.ImmutableList;
|
||||
import com.google.common.collect.Maps;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.routing.IndexRoutingTable;
|
||||
import org.elasticsearch.cluster.routing.IndexShardRoutingTable;
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.io.stream.Streamable;
|
||||
|
@ -33,6 +31,7 @@ import org.elasticsearch.common.xcontent.XContentBuilder;
|
|||
import org.elasticsearch.common.xcontent.XContentBuilderString;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Arrays;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.Locale;
|
||||
|
@ -179,7 +178,7 @@ public class ClusterIndexHealth implements Iterable<ClusterShardHealth>, Streama
|
|||
ClusterShardHealth shardHealth = readClusterShardHealth(in);
|
||||
shards.put(shardHealth.getId(), shardHealth);
|
||||
}
|
||||
validationFailures = ImmutableList.copyOf(in.readStringArray());
|
||||
validationFailures = Arrays.asList(in.readStringArray());
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -19,7 +19,6 @@
|
|||
|
||||
package org.elasticsearch.action.admin.cluster.repositories.get;
|
||||
|
||||
import com.google.common.collect.ImmutableList;
|
||||
import org.elasticsearch.action.ActionResponse;
|
||||
import org.elasticsearch.cluster.metadata.RepositoryMetaData;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
|
@ -27,6 +26,8 @@ import org.elasticsearch.common.io.stream.StreamOutput;
|
|||
import org.elasticsearch.common.settings.Settings;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
|
||||
|
@ -35,13 +36,13 @@ import java.util.List;
|
|||
*/
|
||||
public class GetRepositoriesResponse extends ActionResponse implements Iterable<RepositoryMetaData> {
|
||||
|
||||
private ImmutableList<RepositoryMetaData> repositories = ImmutableList.of();
|
||||
private List<RepositoryMetaData> repositories = Collections.emptyList();
|
||||
|
||||
|
||||
GetRepositoriesResponse() {
|
||||
}
|
||||
|
||||
GetRepositoriesResponse(ImmutableList<RepositoryMetaData> repositories) {
|
||||
GetRepositoriesResponse(List<RepositoryMetaData> repositories) {
|
||||
this.repositories = repositories;
|
||||
}
|
||||
|
||||
|
@ -59,7 +60,7 @@ public class GetRepositoriesResponse extends ActionResponse implements Iterable<
|
|||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
int size = in.readVInt();
|
||||
ImmutableList.Builder<RepositoryMetaData> repositoryListBuilder = ImmutableList.builder();
|
||||
List<RepositoryMetaData> repositoryListBuilder = new ArrayList<>();
|
||||
for (int j = 0; j < size; j++) {
|
||||
repositoryListBuilder.add(new RepositoryMetaData(
|
||||
in.readString(),
|
||||
|
@ -67,7 +68,7 @@ public class GetRepositoriesResponse extends ActionResponse implements Iterable<
|
|||
Settings.readSettingsFromStream(in))
|
||||
);
|
||||
}
|
||||
repositories = repositoryListBuilder.build();
|
||||
repositories = Collections.unmodifiableList(repositoryListBuilder);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -19,7 +19,6 @@
|
|||
|
||||
package org.elasticsearch.action.admin.cluster.repositories.get;
|
||||
|
||||
import com.google.common.collect.ImmutableList;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.master.TransportMasterNodeReadAction;
|
||||
|
@ -37,6 +36,10 @@ import org.elasticsearch.repositories.RepositoryMissingException;
|
|||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
* Transport action for get repositories operation
|
||||
*/
|
||||
|
@ -71,11 +74,11 @@ public class TransportGetRepositoriesAction extends TransportMasterNodeReadActio
|
|||
if (repositories != null) {
|
||||
listener.onResponse(new GetRepositoriesResponse(repositories.repositories()));
|
||||
} else {
|
||||
listener.onResponse(new GetRepositoriesResponse(ImmutableList.<RepositoryMetaData>of()));
|
||||
listener.onResponse(new GetRepositoriesResponse(Collections.<RepositoryMetaData>emptyList()));
|
||||
}
|
||||
} else {
|
||||
if (repositories != null) {
|
||||
ImmutableList.Builder<RepositoryMetaData> repositoryListBuilder = ImmutableList.builder();
|
||||
List<RepositoryMetaData> repositoryListBuilder = new ArrayList<>();
|
||||
for (String repository : request.repositories()) {
|
||||
RepositoryMetaData repositoryMetaData = repositories.repository(repository);
|
||||
if (repositoryMetaData == null) {
|
||||
|
@ -84,7 +87,7 @@ public class TransportGetRepositoriesAction extends TransportMasterNodeReadActio
|
|||
}
|
||||
repositoryListBuilder.add(repositoryMetaData);
|
||||
}
|
||||
listener.onResponse(new GetRepositoriesResponse(repositoryListBuilder.build()));
|
||||
listener.onResponse(new GetRepositoriesResponse(Collections.unmodifiableList(repositoryListBuilder)));
|
||||
} else {
|
||||
listener.onFailure(new RepositoryMissingException(request.repositories()[0]));
|
||||
}
|
||||
|
|
|
@ -19,7 +19,6 @@
|
|||
|
||||
package org.elasticsearch.action.admin.cluster.snapshots.get;
|
||||
|
||||
import com.google.common.collect.ImmutableList;
|
||||
import org.elasticsearch.action.ActionResponse;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
|
@ -29,6 +28,8 @@ import org.elasticsearch.common.xcontent.XContentBuilderString;
|
|||
import org.elasticsearch.snapshots.SnapshotInfo;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
|
@ -36,12 +37,12 @@ import java.util.List;
|
|||
*/
|
||||
public class GetSnapshotsResponse extends ActionResponse implements ToXContent {
|
||||
|
||||
private ImmutableList<SnapshotInfo> snapshots = ImmutableList.of();
|
||||
private List<SnapshotInfo> snapshots = Collections.emptyList();
|
||||
|
||||
GetSnapshotsResponse() {
|
||||
}
|
||||
|
||||
GetSnapshotsResponse(ImmutableList<SnapshotInfo> snapshots) {
|
||||
GetSnapshotsResponse(List<SnapshotInfo> snapshots) {
|
||||
this.snapshots = snapshots;
|
||||
}
|
||||
|
||||
|
@ -58,11 +59,11 @@ public class GetSnapshotsResponse extends ActionResponse implements ToXContent {
|
|||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
int size = in.readVInt();
|
||||
ImmutableList.Builder<SnapshotInfo> builder = ImmutableList.builder();
|
||||
List<SnapshotInfo> builder = new ArrayList<>();
|
||||
for (int i = 0; i < size; i++) {
|
||||
builder.add(SnapshotInfo.readSnapshotInfo(in));
|
||||
}
|
||||
snapshots = builder.build();
|
||||
snapshots = Collections.unmodifiableList(builder);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -19,7 +19,6 @@
|
|||
|
||||
package org.elasticsearch.action.admin.cluster.snapshots.get;
|
||||
|
||||
import com.google.common.collect.ImmutableList;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.master.TransportMasterNodeAction;
|
||||
|
@ -37,6 +36,8 @@ import org.elasticsearch.snapshots.SnapshotsService;
|
|||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
|
@ -71,7 +72,7 @@ public class TransportGetSnapshotsAction extends TransportMasterNodeAction<GetSn
|
|||
@Override
|
||||
protected void masterOperation(final GetSnapshotsRequest request, ClusterState state, final ActionListener<GetSnapshotsResponse> listener) {
|
||||
try {
|
||||
ImmutableList.Builder<SnapshotInfo> snapshotInfoBuilder = ImmutableList.builder();
|
||||
List<SnapshotInfo> snapshotInfoBuilder = new ArrayList<>();
|
||||
if (isAllSnapshots(request.snapshots())) {
|
||||
List<Snapshot> snapshots = snapshotsService.snapshots(request.repository());
|
||||
for (Snapshot snapshot : snapshots) {
|
||||
|
@ -88,7 +89,7 @@ public class TransportGetSnapshotsAction extends TransportMasterNodeAction<GetSn
|
|||
snapshotInfoBuilder.add(new SnapshotInfo(snapshotsService.snapshot(snapshotId)));
|
||||
}
|
||||
}
|
||||
listener.onResponse(new GetSnapshotsResponse(snapshotInfoBuilder.build()));
|
||||
listener.onResponse(new GetSnapshotsResponse(Collections.unmodifiableList(snapshotInfoBuilder)));
|
||||
} catch (Throwable t) {
|
||||
listener.onFailure(t);
|
||||
}
|
||||
|
|
|
@ -19,7 +19,6 @@
|
|||
|
||||
package org.elasticsearch.action.admin.cluster.snapshots.status;
|
||||
|
||||
import com.google.common.collect.ImmutableList;
|
||||
import com.google.common.collect.ImmutableMap;
|
||||
import org.elasticsearch.cluster.SnapshotsInProgress.State;
|
||||
import org.elasticsearch.cluster.metadata.SnapshotId;
|
||||
|
@ -33,6 +32,7 @@ import org.elasticsearch.common.xcontent.XContentFactory;
|
|||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
@ -48,7 +48,7 @@ public class SnapshotStatus implements ToXContent, Streamable {
|
|||
|
||||
private State state;
|
||||
|
||||
private ImmutableList<SnapshotIndexShardStatus> shards;
|
||||
private List<SnapshotIndexShardStatus> shards;
|
||||
|
||||
private ImmutableMap<String, SnapshotIndexStatus> indicesStatus;
|
||||
|
||||
|
@ -57,7 +57,7 @@ public class SnapshotStatus implements ToXContent, Streamable {
|
|||
private SnapshotStats stats;
|
||||
|
||||
|
||||
SnapshotStatus(SnapshotId snapshotId, State state, ImmutableList<SnapshotIndexShardStatus> shards) {
|
||||
SnapshotStatus(SnapshotId snapshotId, State state, List<SnapshotIndexShardStatus> shards) {
|
||||
this.snapshotId = snapshotId;
|
||||
this.state = state;
|
||||
this.shards = shards;
|
||||
|
@ -127,11 +127,11 @@ public class SnapshotStatus implements ToXContent, Streamable {
|
|||
snapshotId = SnapshotId.readSnapshotId(in);
|
||||
state = State.fromValue(in.readByte());
|
||||
int size = in.readVInt();
|
||||
ImmutableList.Builder<SnapshotIndexShardStatus> builder = ImmutableList.builder();
|
||||
List<SnapshotIndexShardStatus> builder = new ArrayList<>();
|
||||
for (int i = 0; i < size; i++) {
|
||||
builder.add(SnapshotIndexShardStatus.readShardSnapshotStatus(in));
|
||||
}
|
||||
shards = builder.build();
|
||||
shards = Collections.unmodifiableList(builder);
|
||||
updateShardStats();
|
||||
}
|
||||
|
||||
|
|
|
@ -19,7 +19,6 @@
|
|||
|
||||
package org.elasticsearch.action.admin.cluster.snapshots.status;
|
||||
|
||||
import com.google.common.collect.ImmutableList;
|
||||
import org.elasticsearch.action.ActionResponse;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
|
@ -28,18 +27,21 @@ import org.elasticsearch.common.xcontent.XContentBuilder;
|
|||
import org.elasticsearch.common.xcontent.XContentBuilderString;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
* Snapshot status response
|
||||
*/
|
||||
public class SnapshotsStatusResponse extends ActionResponse implements ToXContent {
|
||||
|
||||
private ImmutableList<SnapshotStatus> snapshots = ImmutableList.of();
|
||||
private List<SnapshotStatus> snapshots = Collections.emptyList();
|
||||
|
||||
SnapshotsStatusResponse() {
|
||||
}
|
||||
|
||||
SnapshotsStatusResponse(ImmutableList<SnapshotStatus> snapshots) {
|
||||
SnapshotsStatusResponse(List<SnapshotStatus> snapshots) {
|
||||
this.snapshots = snapshots;
|
||||
}
|
||||
|
||||
|
@ -48,7 +50,7 @@ public class SnapshotsStatusResponse extends ActionResponse implements ToXConten
|
|||
*
|
||||
* @return the list of snapshots
|
||||
*/
|
||||
public ImmutableList<SnapshotStatus> getSnapshots() {
|
||||
public List<SnapshotStatus> getSnapshots() {
|
||||
return snapshots;
|
||||
}
|
||||
|
||||
|
@ -56,11 +58,11 @@ public class SnapshotsStatusResponse extends ActionResponse implements ToXConten
|
|||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
int size = in.readVInt();
|
||||
ImmutableList.Builder<SnapshotStatus> builder = ImmutableList.builder();
|
||||
List<SnapshotStatus> builder = new ArrayList<>();
|
||||
for (int i = 0; i < size; i++) {
|
||||
builder.add(SnapshotStatus.readSnapshotStatus(in));
|
||||
}
|
||||
snapshots = builder.build();
|
||||
snapshots = Collections.unmodifiableList(builder);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -19,7 +19,6 @@
|
|||
|
||||
package org.elasticsearch.action.admin.cluster.snapshots.status;
|
||||
|
||||
import com.google.common.collect.ImmutableList;
|
||||
import com.google.common.collect.ImmutableMap;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
|
@ -42,6 +41,8 @@ import org.elasticsearch.threadpool.ThreadPool;
|
|||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
@ -138,7 +139,7 @@ public class TransportSnapshotsStatusAction extends TransportMasterNodeAction<Sn
|
|||
private SnapshotsStatusResponse buildResponse(SnapshotsStatusRequest request, List<SnapshotsInProgress.Entry> currentSnapshots,
|
||||
TransportNodesSnapshotsStatus.NodesSnapshotStatus nodeSnapshotStatuses) throws IOException {
|
||||
// First process snapshot that are currently processed
|
||||
ImmutableList.Builder<SnapshotStatus> builder = ImmutableList.builder();
|
||||
List<SnapshotStatus> builder = new ArrayList<>();
|
||||
Set<SnapshotId> currentSnapshotIds = newHashSet();
|
||||
if (!currentSnapshots.isEmpty()) {
|
||||
Map<String, TransportNodesSnapshotsStatus.NodeSnapshotStatus> nodeSnapshotStatusMap;
|
||||
|
@ -150,7 +151,7 @@ public class TransportSnapshotsStatusAction extends TransportMasterNodeAction<Sn
|
|||
|
||||
for (SnapshotsInProgress.Entry entry : currentSnapshots) {
|
||||
currentSnapshotIds.add(entry.snapshotId());
|
||||
ImmutableList.Builder<SnapshotIndexShardStatus> shardStatusBuilder = ImmutableList.builder();
|
||||
List<SnapshotIndexShardStatus> shardStatusBuilder = new ArrayList<>();
|
||||
for (ImmutableMap.Entry<ShardId, SnapshotsInProgress.ShardSnapshotStatus> shardEntry : entry.shards().entrySet()) {
|
||||
SnapshotsInProgress.ShardSnapshotStatus status = shardEntry.getValue();
|
||||
if (status.nodeId() != null) {
|
||||
|
@ -189,7 +190,7 @@ public class TransportSnapshotsStatusAction extends TransportMasterNodeAction<Sn
|
|||
SnapshotIndexShardStatus shardStatus = new SnapshotIndexShardStatus(shardEntry.getKey(), stage);
|
||||
shardStatusBuilder.add(shardStatus);
|
||||
}
|
||||
builder.add(new SnapshotStatus(entry.snapshotId(), entry.state(), shardStatusBuilder.build()));
|
||||
builder.add(new SnapshotStatus(entry.snapshotId(), entry.state(), Collections.unmodifiableList(shardStatusBuilder)));
|
||||
}
|
||||
}
|
||||
// Now add snapshots on disk that are not currently running
|
||||
|
@ -202,7 +203,7 @@ public class TransportSnapshotsStatusAction extends TransportMasterNodeAction<Sn
|
|||
continue;
|
||||
}
|
||||
Snapshot snapshot = snapshotsService.snapshot(snapshotId);
|
||||
ImmutableList.Builder<SnapshotIndexShardStatus> shardStatusBuilder = ImmutableList.builder();
|
||||
List<SnapshotIndexShardStatus> shardStatusBuilder = new ArrayList<>();
|
||||
if (snapshot.state().completed()) {
|
||||
ImmutableMap<ShardId, IndexShardSnapshotStatus> shardStatues = snapshotsService.snapshotShards(snapshotId);
|
||||
for (ImmutableMap.Entry<ShardId, IndexShardSnapshotStatus> shardStatus : shardStatues.entrySet()) {
|
||||
|
@ -222,13 +223,13 @@ public class TransportSnapshotsStatusAction extends TransportMasterNodeAction<Sn
|
|||
default:
|
||||
throw new IllegalArgumentException("Unknown snapshot state " + snapshot.state());
|
||||
}
|
||||
builder.add(new SnapshotStatus(snapshotId, state, shardStatusBuilder.build()));
|
||||
builder.add(new SnapshotStatus(snapshotId, state, Collections.unmodifiableList(shardStatusBuilder)));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return new SnapshotsStatusResponse(builder.build());
|
||||
return new SnapshotsStatusResponse(Collections.unmodifiableList(builder));
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -20,7 +20,6 @@
|
|||
package org.elasticsearch.action.admin.indices.alias;
|
||||
|
||||
import com.carrotsearch.hppc.cursors.ObjectCursor;
|
||||
import com.google.common.collect.ImmutableList;
|
||||
import org.elasticsearch.action.ActionRequestValidationException;
|
||||
import org.elasticsearch.action.AliasesRequest;
|
||||
import org.elasticsearch.action.CompositeIndicesRequest;
|
||||
|
@ -179,9 +178,9 @@ public class IndicesAliasesRequest extends AcknowledgedRequest<IndicesAliasesReq
|
|||
if (expandAliasesWildcards()) {
|
||||
//for DELETE we expand the aliases
|
||||
String[] indexAsArray = {concreteIndex};
|
||||
ImmutableOpenMap<String, ImmutableList<AliasMetaData>> aliasMetaData = metaData.findAliases(aliases, indexAsArray);
|
||||
ImmutableOpenMap<String, List<AliasMetaData>> aliasMetaData = metaData.findAliases(aliases, indexAsArray);
|
||||
List<String> finalAliases = new ArrayList<>();
|
||||
for (ObjectCursor<ImmutableList<AliasMetaData>> curAliases : aliasMetaData.values()) {
|
||||
for (ObjectCursor<List<AliasMetaData>> curAliases : aliasMetaData.values()) {
|
||||
for (AliasMetaData aliasMeta: curAliases.value) {
|
||||
finalAliases.add(aliasMeta.alias());
|
||||
}
|
||||
|
|
|
@ -20,7 +20,6 @@
|
|||
package org.elasticsearch.action.admin.indices.alias.get;
|
||||
|
||||
import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
|
||||
import com.google.common.collect.ImmutableList;
|
||||
import org.elasticsearch.action.ActionResponse;
|
||||
import org.elasticsearch.cluster.metadata.AliasMetaData;
|
||||
import org.elasticsearch.common.collect.ImmutableOpenMap;
|
||||
|
@ -29,6 +28,7 @@ import org.elasticsearch.common.io.stream.StreamOutput;
|
|||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
|
@ -61,7 +61,7 @@ public class GetAliasesResponse extends ActionResponse {
|
|||
for (int j = 0; j < valueSize; j++) {
|
||||
value.add(AliasMetaData.Builder.readFrom(in));
|
||||
}
|
||||
aliasesBuilder.put(key, ImmutableList.copyOf(value));
|
||||
aliasesBuilder.put(key, Collections.unmodifiableList(value));
|
||||
}
|
||||
aliases = aliasesBuilder.build();
|
||||
}
|
||||
|
|
|
@ -64,7 +64,7 @@ public class TransportGetAliasesAction extends TransportMasterNodeReadAction<Get
|
|||
@Override
|
||||
protected void masterOperation(GetAliasesRequest request, ClusterState state, ActionListener<GetAliasesResponse> listener) {
|
||||
String[] concreteIndices = indexNameExpressionResolver.concreteIndices(state, request);
|
||||
@SuppressWarnings("unchecked") // ImmutableList to List results incompatible type
|
||||
@SuppressWarnings("unchecked")
|
||||
ImmutableOpenMap<String, List<AliasMetaData>> result = (ImmutableOpenMap) state.metaData().findAliases(request.aliases(), concreteIndices);
|
||||
listener.onResponse(new GetAliasesResponse(result));
|
||||
}
|
||||
|
|
|
@ -48,10 +48,10 @@ public class TransportCloseIndexAction extends TransportMasterNodeAction<CloseIn
|
|||
public TransportCloseIndexAction(Settings settings, TransportService transportService, ClusterService clusterService,
|
||||
ThreadPool threadPool, MetaDataIndexStateService indexStateService,
|
||||
NodeSettingsService nodeSettingsService, ActionFilters actionFilters,
|
||||
IndexNameExpressionResolver indexNameExpressionResolver) {
|
||||
IndexNameExpressionResolver indexNameExpressionResolver, DestructiveOperations destructiveOperations) {
|
||||
super(settings, CloseIndexAction.NAME, transportService, clusterService, threadPool, actionFilters, indexNameExpressionResolver, CloseIndexRequest.class);
|
||||
this.indexStateService = indexStateService;
|
||||
this.destructiveOperations = new DestructiveOperations(logger, settings, nodeSettingsService);
|
||||
this.destructiveOperations = destructiveOperations;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -48,10 +48,10 @@ public class TransportDeleteIndexAction extends TransportMasterNodeAction<Delete
|
|||
public TransportDeleteIndexAction(Settings settings, TransportService transportService, ClusterService clusterService,
|
||||
ThreadPool threadPool, MetaDataDeleteIndexService deleteIndexService,
|
||||
NodeSettingsService nodeSettingsService, ActionFilters actionFilters,
|
||||
IndexNameExpressionResolver indexNameExpressionResolver) {
|
||||
IndexNameExpressionResolver indexNameExpressionResolver, DestructiveOperations destructiveOperations) {
|
||||
super(settings, DeleteIndexAction.NAME, transportService, clusterService, threadPool, actionFilters, indexNameExpressionResolver, DeleteIndexRequest.class);
|
||||
this.deleteIndexService = deleteIndexService;
|
||||
this.destructiveOperations = new DestructiveOperations(logger, settings, nodeSettingsService);
|
||||
this.destructiveOperations = destructiveOperations;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -21,10 +21,7 @@ package org.elasticsearch.action.admin.indices.flush;
|
|||
|
||||
import org.elasticsearch.action.ShardOperationFailedException;
|
||||
import org.elasticsearch.action.support.broadcast.BroadcastResponse;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
|
@ -42,13 +39,4 @@ public class FlushResponse extends BroadcastResponse {
|
|||
super(totalShards, successfulShards, failedShards, shardFailures);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -19,27 +19,27 @@
|
|||
|
||||
package org.elasticsearch.action.admin.indices.flush;
|
||||
|
||||
import org.elasticsearch.action.support.broadcast.BroadcastShardRequest;
|
||||
import org.elasticsearch.action.support.replication.ReplicationRequest;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
/**
|
||||
*
|
||||
*/
|
||||
class ShardFlushRequest extends BroadcastShardRequest {
|
||||
public class ShardFlushRequest extends ReplicationRequest<ShardFlushRequest> {
|
||||
|
||||
private FlushRequest request = new FlushRequest();
|
||||
|
||||
ShardFlushRequest() {
|
||||
}
|
||||
|
||||
ShardFlushRequest(ShardId shardId, FlushRequest request) {
|
||||
super(shardId, request);
|
||||
public ShardFlushRequest(FlushRequest request) {
|
||||
super(request);
|
||||
this.request = request;
|
||||
}
|
||||
|
||||
public ShardFlushRequest() {
|
||||
}
|
||||
|
||||
FlushRequest getRequest() {
|
||||
return request;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
|
@ -53,7 +53,5 @@ class ShardFlushRequest extends BroadcastShardRequest {
|
|||
request.writeTo(out);
|
||||
}
|
||||
|
||||
FlushRequest getRequest() {
|
||||
return request;
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -1,37 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.admin.indices.flush;
|
||||
|
||||
import org.elasticsearch.action.support.broadcast.BroadcastShardResponse;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
|
||||
/**
|
||||
*
|
||||
*/
|
||||
class ShardFlushResponse extends BroadcastShardResponse {
|
||||
|
||||
ShardFlushResponse() {
|
||||
|
||||
}
|
||||
|
||||
ShardFlushResponse(ShardId shardId) {
|
||||
super(shardId);
|
||||
}
|
||||
}
|
|
@ -19,99 +19,45 @@
|
|||
|
||||
package org.elasticsearch.action.admin.indices.flush;
|
||||
|
||||
import org.elasticsearch.action.ActionWriteResponse;
|
||||
import org.elasticsearch.action.ShardOperationFailedException;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.DefaultShardOperationFailedException;
|
||||
import org.elasticsearch.action.support.broadcast.BroadcastShardOperationFailedException;
|
||||
import org.elasticsearch.action.support.broadcast.TransportBroadcastAction;
|
||||
import org.elasticsearch.action.support.replication.TransportBroadcastReplicationAction;
|
||||
import org.elasticsearch.cluster.ClusterService;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.block.ClusterBlockException;
|
||||
import org.elasticsearch.cluster.block.ClusterBlockLevel;
|
||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.cluster.routing.GroupShardsIterator;
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.index.shard.IndexShard;
|
||||
import org.elasticsearch.indices.IndicesService;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.concurrent.atomic.AtomicReferenceArray;
|
||||
|
||||
/**
|
||||
* Flush Action.
|
||||
*/
|
||||
public class TransportFlushAction extends TransportBroadcastAction<FlushRequest, FlushResponse, ShardFlushRequest, ShardFlushResponse> {
|
||||
|
||||
private final IndicesService indicesService;
|
||||
public class TransportFlushAction extends TransportBroadcastReplicationAction<FlushRequest, FlushResponse, ShardFlushRequest, ActionWriteResponse> {
|
||||
|
||||
@Inject
|
||||
public TransportFlushAction(Settings settings, ThreadPool threadPool, ClusterService clusterService,
|
||||
TransportService transportService, IndicesService indicesService,
|
||||
ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) {
|
||||
super(settings, FlushAction.NAME, threadPool, clusterService, transportService, actionFilters, indexNameExpressionResolver,
|
||||
FlushRequest.class, ShardFlushRequest.class, ThreadPool.Names.FLUSH);
|
||||
this.indicesService = indicesService;
|
||||
TransportService transportService, ActionFilters actionFilters,
|
||||
IndexNameExpressionResolver indexNameExpressionResolver,
|
||||
TransportShardFlushAction replicatedFlushAction) {
|
||||
super(FlushAction.NAME, FlushRequest.class, settings, threadPool, clusterService, transportService, actionFilters, indexNameExpressionResolver, replicatedFlushAction);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected FlushResponse newResponse(FlushRequest request, AtomicReferenceArray shardsResponses, ClusterState clusterState) {
|
||||
int successfulShards = 0;
|
||||
int failedShards = 0;
|
||||
List<ShardOperationFailedException> shardFailures = null;
|
||||
for (int i = 0; i < shardsResponses.length(); i++) {
|
||||
Object shardResponse = shardsResponses.get(i);
|
||||
if (shardResponse == null) {
|
||||
// a non active shard, ignore
|
||||
} else if (shardResponse instanceof BroadcastShardOperationFailedException) {
|
||||
failedShards++;
|
||||
if (shardFailures == null) {
|
||||
shardFailures = new ArrayList<>();
|
||||
}
|
||||
shardFailures.add(new DefaultShardOperationFailedException((BroadcastShardOperationFailedException) shardResponse));
|
||||
} else {
|
||||
successfulShards++;
|
||||
}
|
||||
}
|
||||
return new FlushResponse(shardsResponses.length(), successfulShards, failedShards, shardFailures);
|
||||
protected ActionWriteResponse newShardResponse() {
|
||||
return new ActionWriteResponse();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected ShardFlushRequest newShardRequest(int numShards, ShardRouting shard, FlushRequest request) {
|
||||
return new ShardFlushRequest(shard.shardId(), request);
|
||||
protected ShardFlushRequest newShardRequest(FlushRequest request, ShardId shardId) {
|
||||
return new ShardFlushRequest(request).setShardId(shardId);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected ShardFlushResponse newShardResponse() {
|
||||
return new ShardFlushResponse();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected ShardFlushResponse shardOperation(ShardFlushRequest request) {
|
||||
IndexShard indexShard = indicesService.indexServiceSafe(request.shardId().getIndex()).shardSafe(request.shardId().id());
|
||||
indexShard.flush(request.getRequest());
|
||||
return new ShardFlushResponse(request.shardId());
|
||||
}
|
||||
|
||||
/**
|
||||
* The refresh request works against *all* shards.
|
||||
*/
|
||||
@Override
|
||||
protected GroupShardsIterator shards(ClusterState clusterState, FlushRequest request, String[] concreteIndices) {
|
||||
return clusterState.routingTable().allActiveShardsGrouped(concreteIndices, true, true);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected ClusterBlockException checkGlobalBlock(ClusterState state, FlushRequest request) {
|
||||
return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_WRITE);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected ClusterBlockException checkRequestBlock(ClusterState state, FlushRequest countRequest, String[] concreteIndices) {
|
||||
return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_WRITE, concreteIndices);
|
||||
protected FlushResponse newResponse(int successfulShards, int failedShards, int totalNumCopies, List<ShardOperationFailedException> shardFailures) {
|
||||
return new FlushResponse(totalNumCopies, successfulShards, failedShards, shardFailures);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,102 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.admin.indices.flush;
|
||||
|
||||
import org.elasticsearch.action.ActionWriteResponse;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.replication.TransportReplicationAction;
|
||||
import org.elasticsearch.cluster.ClusterService;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.action.index.MappingUpdatedAction;
|
||||
import org.elasticsearch.cluster.action.shard.ShardStateAction;
|
||||
import org.elasticsearch.cluster.block.ClusterBlockException;
|
||||
import org.elasticsearch.cluster.block.ClusterBlockLevel;
|
||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.cluster.routing.ShardIterator;
|
||||
import org.elasticsearch.common.collect.Tuple;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.index.shard.IndexShard;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
import org.elasticsearch.indices.IndicesService;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
/**
|
||||
*
|
||||
*/
|
||||
public class TransportShardFlushAction extends TransportReplicationAction<ShardFlushRequest, ShardFlushRequest, ActionWriteResponse> {
|
||||
|
||||
public static final String NAME = FlushAction.NAME + "[s]";
|
||||
|
||||
@Inject
|
||||
public TransportShardFlushAction(Settings settings, TransportService transportService, ClusterService clusterService,
|
||||
IndicesService indicesService, ThreadPool threadPool, ShardStateAction shardStateAction,
|
||||
MappingUpdatedAction mappingUpdatedAction, ActionFilters actionFilters,
|
||||
IndexNameExpressionResolver indexNameExpressionResolver) {
|
||||
super(settings, NAME, transportService, clusterService, indicesService, threadPool, shardStateAction, mappingUpdatedAction,
|
||||
actionFilters, indexNameExpressionResolver, ShardFlushRequest.class, ShardFlushRequest.class, ThreadPool.Names.FLUSH);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected ActionWriteResponse newResponseInstance() {
|
||||
return new ActionWriteResponse();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Tuple<ActionWriteResponse, ShardFlushRequest> shardOperationOnPrimary(ClusterState clusterState, PrimaryOperationRequest shardRequest) throws Throwable {
|
||||
IndexShard indexShard = indicesService.indexServiceSafe(shardRequest.shardId.getIndex()).shardSafe(shardRequest.shardId.id());
|
||||
indexShard.flush(shardRequest.request.getRequest());
|
||||
logger.trace("{} flush request executed on primary", indexShard.shardId());
|
||||
return new Tuple<>(new ActionWriteResponse(), shardRequest.request);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void shardOperationOnReplica(ShardId shardId, ShardFlushRequest request) {
|
||||
IndexShard indexShard = indicesService.indexServiceSafe(request.shardId().getIndex()).shardSafe(request.shardId().id());
|
||||
indexShard.flush(request.getRequest());
|
||||
logger.trace("{} flush request executed on replica", indexShard.shardId());
|
||||
}
|
||||
|
||||
@Override
|
||||
protected boolean checkWriteConsistency() {
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected ShardIterator shards(ClusterState clusterState, InternalRequest request) {
|
||||
return clusterState.getRoutingTable().indicesRouting().get(request.concreteIndex()).getShards().get(request.request().shardId().getId()).shardsIt();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected ClusterBlockException checkGlobalBlock(ClusterState state) {
|
||||
return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_WRITE);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected ClusterBlockException checkRequestBlock(ClusterState state, InternalRequest request) {
|
||||
return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_WRITE, new String[]{request.concreteIndex()});
|
||||
}
|
||||
|
||||
@Override
|
||||
protected boolean shouldExecuteReplication(Settings settings) {
|
||||
return true;
|
||||
}
|
||||
}
|
|
@ -20,7 +20,6 @@
|
|||
package org.elasticsearch.action.admin.indices.get;
|
||||
|
||||
import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
|
||||
import com.google.common.collect.ImmutableList;
|
||||
|
||||
import org.elasticsearch.action.ActionResponse;
|
||||
import org.elasticsearch.cluster.metadata.AliasMetaData;
|
||||
|
@ -32,21 +31,24 @@ import org.elasticsearch.common.settings.Settings;
|
|||
import org.elasticsearch.search.warmer.IndexWarmersMetaData;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
* A response for a delete index action.
|
||||
*/
|
||||
public class GetIndexResponse extends ActionResponse {
|
||||
|
||||
private ImmutableOpenMap<String, ImmutableList<IndexWarmersMetaData.Entry>> warmers = ImmutableOpenMap.of();
|
||||
private ImmutableOpenMap<String, List<IndexWarmersMetaData.Entry>> warmers = ImmutableOpenMap.of();
|
||||
private ImmutableOpenMap<String, ImmutableOpenMap<String, MappingMetaData>> mappings = ImmutableOpenMap.of();
|
||||
private ImmutableOpenMap<String, ImmutableList<AliasMetaData>> aliases = ImmutableOpenMap.of();
|
||||
private ImmutableOpenMap<String, List<AliasMetaData>> aliases = ImmutableOpenMap.of();
|
||||
private ImmutableOpenMap<String, Settings> settings = ImmutableOpenMap.of();
|
||||
private String[] indices;
|
||||
|
||||
GetIndexResponse(String[] indices, ImmutableOpenMap<String, ImmutableList<IndexWarmersMetaData.Entry>> warmers,
|
||||
GetIndexResponse(String[] indices, ImmutableOpenMap<String, List<IndexWarmersMetaData.Entry>> warmers,
|
||||
ImmutableOpenMap<String, ImmutableOpenMap<String, MappingMetaData>> mappings,
|
||||
ImmutableOpenMap<String, ImmutableList<AliasMetaData>> aliases, ImmutableOpenMap<String, Settings> settings) {
|
||||
ImmutableOpenMap<String, List<AliasMetaData>> aliases, ImmutableOpenMap<String, Settings> settings) {
|
||||
this.indices = indices;
|
||||
if (warmers != null) {
|
||||
this.warmers = warmers;
|
||||
|
@ -73,11 +75,11 @@ public class GetIndexResponse extends ActionResponse {
|
|||
return indices();
|
||||
}
|
||||
|
||||
public ImmutableOpenMap<String, ImmutableList<IndexWarmersMetaData.Entry>> warmers() {
|
||||
public ImmutableOpenMap<String, List<IndexWarmersMetaData.Entry>> warmers() {
|
||||
return warmers;
|
||||
}
|
||||
|
||||
public ImmutableOpenMap<String, ImmutableList<IndexWarmersMetaData.Entry>> getWarmers() {
|
||||
public ImmutableOpenMap<String, List<IndexWarmersMetaData.Entry>> getWarmers() {
|
||||
return warmers();
|
||||
}
|
||||
|
||||
|
@ -89,11 +91,11 @@ public class GetIndexResponse extends ActionResponse {
|
|||
return mappings();
|
||||
}
|
||||
|
||||
public ImmutableOpenMap<String, ImmutableList<AliasMetaData>> aliases() {
|
||||
public ImmutableOpenMap<String, List<AliasMetaData>> aliases() {
|
||||
return aliases;
|
||||
}
|
||||
|
||||
public ImmutableOpenMap<String, ImmutableList<AliasMetaData>> getAliases() {
|
||||
public ImmutableOpenMap<String, List<AliasMetaData>> getAliases() {
|
||||
return aliases();
|
||||
}
|
||||
|
||||
|
@ -110,11 +112,11 @@ public class GetIndexResponse extends ActionResponse {
|
|||
super.readFrom(in);
|
||||
this.indices = in.readStringArray();
|
||||
int warmersSize = in.readVInt();
|
||||
ImmutableOpenMap.Builder<String, ImmutableList<IndexWarmersMetaData.Entry>> warmersMapBuilder = ImmutableOpenMap.builder();
|
||||
ImmutableOpenMap.Builder<String, List<IndexWarmersMetaData.Entry>> warmersMapBuilder = ImmutableOpenMap.builder();
|
||||
for (int i = 0; i < warmersSize; i++) {
|
||||
String key = in.readString();
|
||||
int valueSize = in.readVInt();
|
||||
ImmutableList.Builder<IndexWarmersMetaData.Entry> warmerEntryBuilder = ImmutableList.builder();
|
||||
List<IndexWarmersMetaData.Entry> warmerEntryBuilder = new ArrayList<>();
|
||||
for (int j = 0; j < valueSize; j++) {
|
||||
warmerEntryBuilder.add(new IndexWarmersMetaData.Entry(
|
||||
in.readString(),
|
||||
|
@ -123,7 +125,7 @@ public class GetIndexResponse extends ActionResponse {
|
|||
in.readBytesReference())
|
||||
);
|
||||
}
|
||||
warmersMapBuilder.put(key, warmerEntryBuilder.build());
|
||||
warmersMapBuilder.put(key, Collections.unmodifiableList(warmerEntryBuilder));
|
||||
}
|
||||
warmers = warmersMapBuilder.build();
|
||||
int mappingsSize = in.readVInt();
|
||||
|
@ -139,15 +141,15 @@ public class GetIndexResponse extends ActionResponse {
|
|||
}
|
||||
mappings = mappingsMapBuilder.build();
|
||||
int aliasesSize = in.readVInt();
|
||||
ImmutableOpenMap.Builder<String, ImmutableList<AliasMetaData>> aliasesMapBuilder = ImmutableOpenMap.builder();
|
||||
ImmutableOpenMap.Builder<String, List<AliasMetaData>> aliasesMapBuilder = ImmutableOpenMap.builder();
|
||||
for (int i = 0; i < aliasesSize; i++) {
|
||||
String key = in.readString();
|
||||
int valueSize = in.readVInt();
|
||||
ImmutableList.Builder<AliasMetaData> aliasEntryBuilder = ImmutableList.builder();
|
||||
List<AliasMetaData> aliasEntryBuilder = new ArrayList<>();
|
||||
for (int j = 0; j < valueSize; j++) {
|
||||
aliasEntryBuilder.add(AliasMetaData.Builder.readFrom(in));
|
||||
}
|
||||
aliasesMapBuilder.put(key, aliasEntryBuilder.build());
|
||||
aliasesMapBuilder.put(key, Collections.unmodifiableList(aliasEntryBuilder));
|
||||
}
|
||||
aliases = aliasesMapBuilder.build();
|
||||
int settingsSize = in.readVInt();
|
||||
|
@ -164,7 +166,7 @@ public class GetIndexResponse extends ActionResponse {
|
|||
super.writeTo(out);
|
||||
out.writeStringArray(indices);
|
||||
out.writeVInt(warmers.size());
|
||||
for (ObjectObjectCursor<String, ImmutableList<IndexWarmersMetaData.Entry>> indexEntry : warmers) {
|
||||
for (ObjectObjectCursor<String, List<IndexWarmersMetaData.Entry>> indexEntry : warmers) {
|
||||
out.writeString(indexEntry.key);
|
||||
out.writeVInt(indexEntry.value.size());
|
||||
for (IndexWarmersMetaData.Entry warmerEntry : indexEntry.value) {
|
||||
|
@ -184,7 +186,7 @@ public class GetIndexResponse extends ActionResponse {
|
|||
}
|
||||
}
|
||||
out.writeVInt(aliases.size());
|
||||
for (ObjectObjectCursor<String, ImmutableList<AliasMetaData>> indexEntry : aliases) {
|
||||
for (ObjectObjectCursor<String, List<AliasMetaData>> indexEntry : aliases) {
|
||||
out.writeString(indexEntry.key);
|
||||
out.writeVInt(indexEntry.value.size());
|
||||
for (AliasMetaData aliasEntry : indexEntry.value) {
|
||||
|
|
|
@ -19,7 +19,6 @@
|
|||
|
||||
package org.elasticsearch.action.admin.indices.get;
|
||||
|
||||
import com.google.common.collect.ImmutableList;
|
||||
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.admin.indices.get.GetIndexRequest.Feature;
|
||||
|
@ -41,6 +40,8 @@ import org.elasticsearch.search.warmer.IndexWarmersMetaData.Entry;
|
|||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
* Get index action.
|
||||
*/
|
||||
|
@ -71,9 +72,9 @@ public class TransportGetIndexAction extends TransportClusterInfoAction<GetIndex
|
|||
@Override
|
||||
protected void doMasterOperation(final GetIndexRequest request, String[] concreteIndices, final ClusterState state,
|
||||
final ActionListener<GetIndexResponse> listener) {
|
||||
ImmutableOpenMap<String, ImmutableList<Entry>> warmersResult = ImmutableOpenMap.of();
|
||||
ImmutableOpenMap<String, List<Entry>> warmersResult = ImmutableOpenMap.of();
|
||||
ImmutableOpenMap<String, ImmutableOpenMap<String, MappingMetaData>> mappingsResult = ImmutableOpenMap.of();
|
||||
ImmutableOpenMap<String, ImmutableList<AliasMetaData>> aliasesResult = ImmutableOpenMap.of();
|
||||
ImmutableOpenMap<String, List<AliasMetaData>> aliasesResult = ImmutableOpenMap.of();
|
||||
ImmutableOpenMap<String, Settings> settings = ImmutableOpenMap.of();
|
||||
Feature[] features = request.features();
|
||||
boolean doneAliases = false;
|
||||
|
|
|
@ -47,10 +47,11 @@ public class TransportOpenIndexAction extends TransportMasterNodeAction<OpenInde
|
|||
@Inject
|
||||
public TransportOpenIndexAction(Settings settings, TransportService transportService, ClusterService clusterService,
|
||||
ThreadPool threadPool, MetaDataIndexStateService indexStateService,
|
||||
NodeSettingsService nodeSettingsService, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) {
|
||||
NodeSettingsService nodeSettingsService, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver,
|
||||
DestructiveOperations destructiveOperations) {
|
||||
super(settings, OpenIndexAction.NAME, transportService, clusterService, threadPool, actionFilters, indexNameExpressionResolver, OpenIndexRequest.class);
|
||||
this.indexStateService = indexStateService;
|
||||
this.destructiveOperations = new DestructiveOperations(logger, settings, nodeSettingsService);
|
||||
this.destructiveOperations = destructiveOperations;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -22,8 +22,6 @@ package org.elasticsearch.action.admin.indices.refresh;
|
|||
import org.elasticsearch.action.Action;
|
||||
import org.elasticsearch.client.ElasticsearchClient;
|
||||
|
||||
/**
|
||||
*/
|
||||
public class RefreshAction extends Action<RefreshRequest, RefreshResponse, RefreshRequestBuilder> {
|
||||
|
||||
public static final RefreshAction INSTANCE = new RefreshAction();
|
||||
|
|
|
@ -33,7 +33,6 @@ import org.elasticsearch.action.support.broadcast.BroadcastRequest;
|
|||
*/
|
||||
public class RefreshRequest extends BroadcastRequest<RefreshRequest> {
|
||||
|
||||
|
||||
RefreshRequest() {
|
||||
}
|
||||
|
||||
|
@ -48,5 +47,4 @@ public class RefreshRequest extends BroadcastRequest<RefreshRequest> {
|
|||
public RefreshRequest(String... indices) {
|
||||
super(indices);
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -21,34 +21,18 @@ package org.elasticsearch.action.admin.indices.refresh;
|
|||
|
||||
import org.elasticsearch.action.ShardOperationFailedException;
|
||||
import org.elasticsearch.action.support.broadcast.BroadcastResponse;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
* The response of a refresh action.
|
||||
*
|
||||
*
|
||||
*/
|
||||
public class RefreshResponse extends BroadcastResponse {
|
||||
|
||||
RefreshResponse() {
|
||||
|
||||
}
|
||||
|
||||
RefreshResponse(int totalShards, int successfulShards, int failedShards, List<ShardOperationFailedException> shardFailures) {
|
||||
super(totalShards, successfulShards, failedShards, shardFailures);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,37 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.admin.indices.refresh;
|
||||
|
||||
import org.elasticsearch.action.support.broadcast.BroadcastShardRequest;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
|
||||
/**
|
||||
*
|
||||
*/
|
||||
class ShardRefreshRequest extends BroadcastShardRequest {
|
||||
|
||||
ShardRefreshRequest() {
|
||||
}
|
||||
|
||||
ShardRefreshRequest(ShardId shardId, RefreshRequest request) {
|
||||
super(shardId, request);
|
||||
}
|
||||
|
||||
}
|
|
@ -1,36 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.admin.indices.refresh;
|
||||
|
||||
import org.elasticsearch.action.support.broadcast.BroadcastShardResponse;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
|
||||
/**
|
||||
*
|
||||
*/
|
||||
class ShardRefreshResponse extends BroadcastShardResponse {
|
||||
|
||||
ShardRefreshResponse() {
|
||||
}
|
||||
|
||||
ShardRefreshResponse(ShardId shardId) {
|
||||
super(shardId);
|
||||
}
|
||||
}
|
|
@ -19,100 +19,46 @@
|
|||
|
||||
package org.elasticsearch.action.admin.indices.refresh;
|
||||
|
||||
import org.elasticsearch.action.ActionWriteResponse;
|
||||
import org.elasticsearch.action.ShardOperationFailedException;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.DefaultShardOperationFailedException;
|
||||
import org.elasticsearch.action.support.broadcast.BroadcastShardOperationFailedException;
|
||||
import org.elasticsearch.action.support.broadcast.TransportBroadcastAction;
|
||||
import org.elasticsearch.action.support.replication.ReplicationRequest;
|
||||
import org.elasticsearch.action.support.replication.TransportBroadcastReplicationAction;
|
||||
import org.elasticsearch.cluster.ClusterService;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.block.ClusterBlockException;
|
||||
import org.elasticsearch.cluster.block.ClusterBlockLevel;
|
||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.cluster.routing.GroupShardsIterator;
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.index.shard.IndexShard;
|
||||
import org.elasticsearch.indices.IndicesService;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.concurrent.atomic.AtomicReferenceArray;
|
||||
|
||||
/**
|
||||
* Refresh action.
|
||||
*/
|
||||
public class TransportRefreshAction extends TransportBroadcastAction<RefreshRequest, RefreshResponse, ShardRefreshRequest, ShardRefreshResponse> {
|
||||
|
||||
private final IndicesService indicesService;
|
||||
public class TransportRefreshAction extends TransportBroadcastReplicationAction<RefreshRequest, RefreshResponse, ReplicationRequest, ActionWriteResponse> {
|
||||
|
||||
@Inject
|
||||
public TransportRefreshAction(Settings settings, ThreadPool threadPool, ClusterService clusterService,
|
||||
TransportService transportService, IndicesService indicesService,
|
||||
ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) {
|
||||
super(settings, RefreshAction.NAME, threadPool, clusterService, transportService, actionFilters, indexNameExpressionResolver,
|
||||
RefreshRequest.class, ShardRefreshRequest.class, ThreadPool.Names.REFRESH);
|
||||
this.indicesService = indicesService;
|
||||
TransportService transportService, ActionFilters actionFilters,
|
||||
IndexNameExpressionResolver indexNameExpressionResolver,
|
||||
TransportShardRefreshAction shardRefreshAction) {
|
||||
super(RefreshAction.NAME, RefreshRequest.class, settings, threadPool, clusterService, transportService, actionFilters, indexNameExpressionResolver, shardRefreshAction);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected RefreshResponse newResponse(RefreshRequest request, AtomicReferenceArray shardsResponses, ClusterState clusterState) {
|
||||
int successfulShards = 0;
|
||||
int failedShards = 0;
|
||||
List<ShardOperationFailedException> shardFailures = null;
|
||||
for (int i = 0; i < shardsResponses.length(); i++) {
|
||||
Object shardResponse = shardsResponses.get(i);
|
||||
if (shardResponse == null) {
|
||||
// non active shard, ignore
|
||||
} else if (shardResponse instanceof BroadcastShardOperationFailedException) {
|
||||
failedShards++;
|
||||
if (shardFailures == null) {
|
||||
shardFailures = new ArrayList<>();
|
||||
}
|
||||
shardFailures.add(new DefaultShardOperationFailedException((BroadcastShardOperationFailedException) shardResponse));
|
||||
} else {
|
||||
successfulShards++;
|
||||
}
|
||||
}
|
||||
return new RefreshResponse(shardsResponses.length(), successfulShards, failedShards, shardFailures);
|
||||
protected ActionWriteResponse newShardResponse() {
|
||||
return new ActionWriteResponse();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected ShardRefreshRequest newShardRequest(int numShards, ShardRouting shard, RefreshRequest request) {
|
||||
return new ShardRefreshRequest(shard.shardId(), request);
|
||||
protected ReplicationRequest newShardRequest(RefreshRequest request, ShardId shardId) {
|
||||
return new ReplicationRequest(request).setShardId(shardId);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected ShardRefreshResponse newShardResponse() {
|
||||
return new ShardRefreshResponse();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected ShardRefreshResponse shardOperation(ShardRefreshRequest request) {
|
||||
IndexShard indexShard = indicesService.indexServiceSafe(request.shardId().getIndex()).shardSafe(request.shardId().id());
|
||||
indexShard.refresh("api");
|
||||
logger.trace("{} refresh request executed", indexShard.shardId());
|
||||
return new ShardRefreshResponse(request.shardId());
|
||||
}
|
||||
|
||||
/**
|
||||
* The refresh request works against *all* shards.
|
||||
*/
|
||||
@Override
|
||||
protected GroupShardsIterator shards(ClusterState clusterState, RefreshRequest request, String[] concreteIndices) {
|
||||
return clusterState.routingTable().allAssignedShardsGrouped(concreteIndices, true, true);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected ClusterBlockException checkGlobalBlock(ClusterState state, RefreshRequest request) {
|
||||
return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_WRITE);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected ClusterBlockException checkRequestBlock(ClusterState state, RefreshRequest countRequest, String[] concreteIndices) {
|
||||
return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_WRITE, concreteIndices);
|
||||
protected RefreshResponse newResponse(int successfulShards, int failedShards, int totalNumCopies, List<ShardOperationFailedException> shardFailures) {
|
||||
return new RefreshResponse(totalNumCopies, successfulShards, failedShards, shardFailures);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,103 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.admin.indices.refresh;
|
||||
|
||||
import org.elasticsearch.action.ActionWriteResponse;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.replication.ReplicationRequest;
|
||||
import org.elasticsearch.action.support.replication.TransportReplicationAction;
|
||||
import org.elasticsearch.cluster.ClusterService;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.action.index.MappingUpdatedAction;
|
||||
import org.elasticsearch.cluster.action.shard.ShardStateAction;
|
||||
import org.elasticsearch.cluster.block.ClusterBlockException;
|
||||
import org.elasticsearch.cluster.block.ClusterBlockLevel;
|
||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.cluster.routing.ShardIterator;
|
||||
import org.elasticsearch.common.collect.Tuple;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.index.shard.IndexShard;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
import org.elasticsearch.indices.IndicesService;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
/**
|
||||
*
|
||||
*/
|
||||
public class TransportShardRefreshAction extends TransportReplicationAction<ReplicationRequest, ReplicationRequest, ActionWriteResponse> {
|
||||
|
||||
public static final String NAME = RefreshAction.NAME + "[s]";
|
||||
|
||||
@Inject
|
||||
public TransportShardRefreshAction(Settings settings, TransportService transportService, ClusterService clusterService,
|
||||
IndicesService indicesService, ThreadPool threadPool, ShardStateAction shardStateAction,
|
||||
MappingUpdatedAction mappingUpdatedAction, ActionFilters actionFilters,
|
||||
IndexNameExpressionResolver indexNameExpressionResolver) {
|
||||
super(settings, NAME, transportService, clusterService, indicesService, threadPool, shardStateAction, mappingUpdatedAction,
|
||||
actionFilters, indexNameExpressionResolver, ReplicationRequest.class, ReplicationRequest.class, ThreadPool.Names.REFRESH);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected ActionWriteResponse newResponseInstance() {
|
||||
return new ActionWriteResponse();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Tuple<ActionWriteResponse, ReplicationRequest> shardOperationOnPrimary(ClusterState clusterState, PrimaryOperationRequest shardRequest) throws Throwable {
|
||||
IndexShard indexShard = indicesService.indexServiceSafe(shardRequest.shardId.getIndex()).shardSafe(shardRequest.shardId.id());
|
||||
indexShard.refresh("api");
|
||||
logger.trace("{} refresh request executed on primary", indexShard.shardId());
|
||||
return new Tuple<>(new ActionWriteResponse(), shardRequest.request);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void shardOperationOnReplica(ShardId shardId, ReplicationRequest request) {
|
||||
IndexShard indexShard = indicesService.indexServiceSafe(shardId.getIndex()).shardSafe(shardId.id());
|
||||
indexShard.refresh("api");
|
||||
logger.trace("{} refresh request executed on replica", indexShard.shardId());
|
||||
}
|
||||
|
||||
@Override
|
||||
protected boolean checkWriteConsistency() {
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected ShardIterator shards(ClusterState clusterState, InternalRequest request) {
|
||||
return clusterState.getRoutingTable().indicesRouting().get(request.concreteIndex()).getShards().get(request.request().shardId().getId()).shardsIt();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected ClusterBlockException checkGlobalBlock(ClusterState state) {
|
||||
return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_WRITE);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected ClusterBlockException checkRequestBlock(ClusterState state, InternalRequest request) {
|
||||
return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_WRITE, new String[]{request.concreteIndex()});
|
||||
}
|
||||
|
||||
@Override
|
||||
protected boolean shouldExecuteReplication(Settings settings) {
|
||||
return true;
|
||||
}
|
||||
}
|
|
@ -19,7 +19,6 @@
|
|||
|
||||
package org.elasticsearch.action.admin.indices.segments;
|
||||
|
||||
import com.google.common.collect.ImmutableList;
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
|
@ -28,6 +27,7 @@ import org.elasticsearch.index.engine.Segment;
|
|||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
|
||||
|
@ -91,7 +91,7 @@ public class ShardSegments implements Streamable, Iterable<Segment> {
|
|||
shardRouting = readShardRoutingEntry(in);
|
||||
int size = in.readVInt();
|
||||
if (size == 0) {
|
||||
segments = ImmutableList.of();
|
||||
segments = Collections.emptyList();
|
||||
} else {
|
||||
segments = new ArrayList<>(size);
|
||||
for (int i = 0; i < size; i++) {
|
||||
|
|
|
@ -21,7 +21,6 @@ package org.elasticsearch.action.admin.indices.shards;
|
|||
|
||||
import com.carrotsearch.hppc.cursors.IntObjectCursor;
|
||||
import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
|
||||
import com.google.common.collect.ImmutableList;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.action.ActionResponse;
|
||||
import org.elasticsearch.action.ShardOperationFailedException;
|
||||
|
@ -38,6 +37,7 @@ import org.elasticsearch.common.xcontent.XContentBuilderString;
|
|||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
|
||||
import static org.elasticsearch.action.admin.indices.shards.IndicesShardStoresResponse.StoreStatus.*;
|
||||
|
@ -258,15 +258,15 @@ public class IndicesShardStoresResponse extends ActionResponse implements ToXCon
|
|||
}
|
||||
|
||||
private ImmutableOpenMap<String, ImmutableOpenIntMap<List<StoreStatus>>> storeStatuses;
|
||||
private ImmutableList<Failure> failures;
|
||||
private List<Failure> failures;
|
||||
|
||||
public IndicesShardStoresResponse(ImmutableOpenMap<String, ImmutableOpenIntMap<List<StoreStatus>>> storeStatuses, ImmutableList<Failure> failures) {
|
||||
public IndicesShardStoresResponse(ImmutableOpenMap<String, ImmutableOpenIntMap<List<StoreStatus>>> storeStatuses, List<Failure> failures) {
|
||||
this.storeStatuses = storeStatuses;
|
||||
this.failures = failures;
|
||||
}
|
||||
|
||||
IndicesShardStoresResponse() {
|
||||
this(ImmutableOpenMap.<String, ImmutableOpenIntMap<List<StoreStatus>>>of(), ImmutableList.<Failure>of());
|
||||
this(ImmutableOpenMap.<String, ImmutableOpenIntMap<List<StoreStatus>>>of(), Collections.<Failure>emptyList());
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -281,7 +281,7 @@ public class IndicesShardStoresResponse extends ActionResponse implements ToXCon
|
|||
* Returns node {@link Failure}s encountered
|
||||
* while executing the request
|
||||
*/
|
||||
public ImmutableList<Failure> getFailures() {
|
||||
public List<Failure> getFailures() {
|
||||
return failures;
|
||||
}
|
||||
|
||||
|
@ -306,12 +306,12 @@ public class IndicesShardStoresResponse extends ActionResponse implements ToXCon
|
|||
storeStatusesBuilder.put(index, shardEntries.build());
|
||||
}
|
||||
int numFailure = in.readVInt();
|
||||
ImmutableList.Builder<Failure> failureBuilder = ImmutableList.builder();
|
||||
List<Failure> failureBuilder = new ArrayList<>();
|
||||
for (int i = 0; i < numFailure; i++) {
|
||||
failureBuilder.add(Failure.readFailure(in));
|
||||
}
|
||||
storeStatuses = storeStatusesBuilder.build();
|
||||
failures = failureBuilder.build();
|
||||
failures = Collections.unmodifiableList(failureBuilder);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -18,7 +18,6 @@
|
|||
*/
|
||||
package org.elasticsearch.action.admin.indices.shards;
|
||||
|
||||
import com.google.common.collect.ImmutableList;
|
||||
import org.apache.lucene.util.CollectionUtil;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.FailedNodeException;
|
||||
|
@ -34,7 +33,11 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
|||
import org.elasticsearch.cluster.metadata.MetaData;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNodes;
|
||||
import org.elasticsearch.cluster.routing.*;
|
||||
import org.elasticsearch.cluster.routing.IndexRoutingTable;
|
||||
import org.elasticsearch.cluster.routing.IndexShardRoutingTable;
|
||||
import org.elasticsearch.cluster.routing.RoutingNodes;
|
||||
import org.elasticsearch.cluster.routing.RoutingTable;
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.common.collect.ImmutableOpenIntMap;
|
||||
import org.elasticsearch.common.collect.ImmutableOpenMap;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
|
@ -48,7 +51,11 @@ import org.elasticsearch.index.shard.ShardId;
|
|||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
import java.util.*;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.HashSet;
|
||||
import java.util.Queue;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.ConcurrentLinkedQueue;
|
||||
|
||||
/**
|
||||
|
@ -157,7 +164,7 @@ public class TransportIndicesShardStoresAction extends TransportMasterNodeReadAc
|
|||
|
||||
void finish() {
|
||||
ImmutableOpenMap.Builder<String, ImmutableOpenIntMap<java.util.List<IndicesShardStoresResponse.StoreStatus>>> indicesStoreStatusesBuilder = ImmutableOpenMap.builder();
|
||||
ImmutableList.Builder<IndicesShardStoresResponse.Failure> failureBuilder = ImmutableList.builder();
|
||||
java.util.List<IndicesShardStoresResponse.Failure> failureBuilder = new ArrayList<>();
|
||||
for (Response fetchResponse : fetchResponses) {
|
||||
ImmutableOpenIntMap<java.util.List<IndicesShardStoresResponse.StoreStatus>> indexStoreStatuses = indicesStoreStatusesBuilder.get(fetchResponse.shardId.getIndex());
|
||||
final ImmutableOpenIntMap.Builder<java.util.List<IndicesShardStoresResponse.StoreStatus>> indexShardsBuilder;
|
||||
|
@ -183,7 +190,7 @@ public class TransportIndicesShardStoresAction extends TransportMasterNodeReadAc
|
|||
failureBuilder.add(new IndicesShardStoresResponse.Failure(failure.nodeId(), fetchResponse.shardId.getIndex(), fetchResponse.shardId.id(), failure.getCause()));
|
||||
}
|
||||
}
|
||||
listener.onResponse(new IndicesShardStoresResponse(indicesStoreStatusesBuilder.build(), failureBuilder.build()));
|
||||
listener.onResponse(new IndicesShardStoresResponse(indicesStoreStatusesBuilder.build(), Collections.unmodifiableList(failureBuilder)));
|
||||
}
|
||||
|
||||
private IndicesShardStoresResponse.StoreStatus.Allocation getAllocation(String index, int shardID, DiscoveryNode node) {
|
||||
|
|
|
@ -19,7 +19,6 @@
|
|||
|
||||
package org.elasticsearch.action.admin.indices.validate.query;
|
||||
|
||||
import com.google.common.collect.ImmutableList;
|
||||
import org.elasticsearch.action.ShardOperationFailedException;
|
||||
import org.elasticsearch.action.support.broadcast.BroadcastResponse;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
|
@ -27,6 +26,7 @@ import org.elasticsearch.common.io.stream.StreamOutput;
|
|||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
|
||||
import static org.elasticsearch.action.admin.indices.validate.query.QueryExplanation.readQueryExplanation;
|
||||
|
@ -51,7 +51,7 @@ public class ValidateQueryResponse extends BroadcastResponse {
|
|||
this.valid = valid;
|
||||
this.queryExplanations = queryExplanations;
|
||||
if (queryExplanations == null) {
|
||||
this.queryExplanations = ImmutableList.of();
|
||||
this.queryExplanations = Collections.emptyList();
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -67,7 +67,7 @@ public class ValidateQueryResponse extends BroadcastResponse {
|
|||
*/
|
||||
public List<? extends QueryExplanation> getQueryExplanation() {
|
||||
if (queryExplanations == null) {
|
||||
return ImmutableList.of();
|
||||
return Collections.emptyList();
|
||||
}
|
||||
return queryExplanations;
|
||||
}
|
||||
|
|
|
@ -55,7 +55,7 @@ public class TransportRenderSearchTemplateAction extends HandledTransportAction<
|
|||
|
||||
@Override
|
||||
protected void doRun() throws Exception {
|
||||
ExecutableScript executable = scriptService.executable(request.template(), ScriptContext.Standard.SEARCH);
|
||||
ExecutableScript executable = scriptService.executable(request.template(), ScriptContext.Standard.SEARCH, request);
|
||||
BytesReference processedTemplate = (BytesReference) executable.run();
|
||||
RenderSearchTemplateResponse response = new RenderSearchTemplateResponse();
|
||||
response.source(processedTemplate);
|
||||
|
|
|
@ -20,7 +20,6 @@
|
|||
package org.elasticsearch.action.admin.indices.warmer.get;
|
||||
|
||||
import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
|
||||
import com.google.common.collect.ImmutableList;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.ActionResponse;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
|
@ -30,6 +29,9 @@ import org.elasticsearch.common.io.stream.StreamOutput;
|
|||
import org.elasticsearch.search.warmer.IndexWarmersMetaData;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
* Holds a warmer-name to a list of {@link IndexWarmersMetaData} mapping for each warmer specified
|
||||
|
@ -38,20 +40,20 @@ import java.io.IOException;
|
|||
*/
|
||||
public class GetWarmersResponse extends ActionResponse {
|
||||
|
||||
private ImmutableOpenMap<String, ImmutableList<IndexWarmersMetaData.Entry>> warmers = ImmutableOpenMap.of();
|
||||
private ImmutableOpenMap<String, List<IndexWarmersMetaData.Entry>> warmers = ImmutableOpenMap.of();
|
||||
|
||||
GetWarmersResponse(ImmutableOpenMap<String, ImmutableList<IndexWarmersMetaData.Entry>> warmers) {
|
||||
GetWarmersResponse(ImmutableOpenMap<String, List<IndexWarmersMetaData.Entry>> warmers) {
|
||||
this.warmers = warmers;
|
||||
}
|
||||
|
||||
GetWarmersResponse() {
|
||||
}
|
||||
|
||||
public ImmutableOpenMap<String, ImmutableList<IndexWarmersMetaData.Entry>> warmers() {
|
||||
public ImmutableOpenMap<String, List<IndexWarmersMetaData.Entry>> warmers() {
|
||||
return warmers;
|
||||
}
|
||||
|
||||
public ImmutableOpenMap<String, ImmutableList<IndexWarmersMetaData.Entry>> getWarmers() {
|
||||
public ImmutableOpenMap<String, List<IndexWarmersMetaData.Entry>> getWarmers() {
|
||||
return warmers();
|
||||
}
|
||||
|
||||
|
@ -59,11 +61,11 @@ public class GetWarmersResponse extends ActionResponse {
|
|||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
int size = in.readVInt();
|
||||
ImmutableOpenMap.Builder<String, ImmutableList<IndexWarmersMetaData.Entry>> indexMapBuilder = ImmutableOpenMap.builder();
|
||||
ImmutableOpenMap.Builder<String, List<IndexWarmersMetaData.Entry>> indexMapBuilder = ImmutableOpenMap.builder();
|
||||
for (int i = 0; i < size; i++) {
|
||||
String key = in.readString();
|
||||
int valueSize = in.readVInt();
|
||||
ImmutableList.Builder<IndexWarmersMetaData.Entry> warmerEntryBuilder = ImmutableList.builder();
|
||||
List<IndexWarmersMetaData.Entry> warmerEntryBuilder = new ArrayList<>();
|
||||
for (int j = 0; j < valueSize; j++) {
|
||||
String name = in.readString();
|
||||
String[] types = in.readStringArray();
|
||||
|
@ -77,7 +79,7 @@ public class GetWarmersResponse extends ActionResponse {
|
|||
source)
|
||||
);
|
||||
}
|
||||
indexMapBuilder.put(key, warmerEntryBuilder.build());
|
||||
indexMapBuilder.put(key, Collections.unmodifiableList(warmerEntryBuilder));
|
||||
}
|
||||
warmers = indexMapBuilder.build();
|
||||
}
|
||||
|
@ -86,7 +88,7 @@ public class GetWarmersResponse extends ActionResponse {
|
|||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
out.writeVInt(warmers.size());
|
||||
for (ObjectObjectCursor<String, ImmutableList<IndexWarmersMetaData.Entry>> indexEntry : warmers) {
|
||||
for (ObjectObjectCursor<String, List<IndexWarmersMetaData.Entry>> indexEntry : warmers) {
|
||||
out.writeString(indexEntry.key);
|
||||
out.writeVInt(indexEntry.value.size());
|
||||
for (IndexWarmersMetaData.Entry warmerEntry : indexEntry.value) {
|
||||
|
|
|
@ -19,7 +19,6 @@
|
|||
|
||||
package org.elasticsearch.action.admin.indices.warmer.get;
|
||||
|
||||
import com.google.common.collect.ImmutableList;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.master.info.TransportClusterInfoAction;
|
||||
|
@ -35,6 +34,8 @@ import org.elasticsearch.search.warmer.IndexWarmersMetaData;
|
|||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
* Internal Actions executed on the master fetching the warmer from the cluster state metadata.
|
||||
*
|
||||
|
@ -66,7 +67,7 @@ public class TransportGetWarmersAction extends TransportClusterInfoAction<GetWar
|
|||
|
||||
@Override
|
||||
protected void doMasterOperation(final GetWarmersRequest request, String[] concreteIndices, final ClusterState state, final ActionListener<GetWarmersResponse> listener) {
|
||||
ImmutableOpenMap<String, ImmutableList<IndexWarmersMetaData.Entry>> result = state.metaData().findWarmers(
|
||||
ImmutableOpenMap<String, List<IndexWarmersMetaData.Entry>> result = state.metaData().findWarmers(
|
||||
concreteIndices, request.types(), request.warmers()
|
||||
);
|
||||
listener.onResponse(new GetWarmersResponse(result));
|
||||
|
|
|
@ -22,6 +22,7 @@ package org.elasticsearch.action.bulk;
|
|||
import org.elasticsearch.action.support.replication.ReplicationRequest;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
|
@ -32,8 +33,6 @@ import java.util.List;
|
|||
*/
|
||||
public class BulkShardRequest extends ReplicationRequest<BulkShardRequest> {
|
||||
|
||||
private int shardId;
|
||||
|
||||
private BulkItemRequest[] items;
|
||||
|
||||
private boolean refresh;
|
||||
|
@ -44,7 +43,7 @@ public class BulkShardRequest extends ReplicationRequest<BulkShardRequest> {
|
|||
BulkShardRequest(BulkRequest bulkRequest, String index, int shardId, boolean refresh, BulkItemRequest[] items) {
|
||||
super(bulkRequest);
|
||||
this.index = index;
|
||||
this.shardId = shardId;
|
||||
this.setShardId(new ShardId(index, shardId));
|
||||
this.items = items;
|
||||
this.refresh = refresh;
|
||||
}
|
||||
|
@ -53,10 +52,6 @@ public class BulkShardRequest extends ReplicationRequest<BulkShardRequest> {
|
|||
return this.refresh;
|
||||
}
|
||||
|
||||
int shardId() {
|
||||
return shardId;
|
||||
}
|
||||
|
||||
BulkItemRequest[] items() {
|
||||
return items;
|
||||
}
|
||||
|
@ -75,7 +70,6 @@ public class BulkShardRequest extends ReplicationRequest<BulkShardRequest> {
|
|||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
out.writeVInt(shardId);
|
||||
out.writeVInt(items.length);
|
||||
for (BulkItemRequest item : items) {
|
||||
if (item != null) {
|
||||
|
@ -91,7 +85,6 @@ public class BulkShardRequest extends ReplicationRequest<BulkShardRequest> {
|
|||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
shardId = in.readVInt();
|
||||
items = new BulkItemRequest[in.readVInt()];
|
||||
for (int i = 0; i < items.length; i++) {
|
||||
if (in.readBoolean()) {
|
||||
|
|
|
@ -109,7 +109,7 @@ public class TransportShardBulkAction extends TransportReplicationAction<BulkSha
|
|||
|
||||
@Override
|
||||
protected ShardIterator shards(ClusterState clusterState, InternalRequest request) {
|
||||
return clusterState.routingTable().index(request.concreteIndex()).shard(request.request().shardId()).shardsIt();
|
||||
return clusterState.routingTable().index(request.concreteIndex()).shard(request.request().shardId().id()).shardsIt();
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -94,11 +94,6 @@ public class TransportDeleteAction extends TransportReplicationAction<DeleteRequ
|
|||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
protected boolean resolveIndex() {
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void resolveRequest(final ClusterState state, final InternalRequest request, final ActionListener<DeleteResponse> listener) {
|
||||
request.request().routing(state.metaData().resolveIndexRouting(request.request().routing(), request.request().index()));
|
||||
|
|
|
@ -120,11 +120,6 @@ public class TransportIndexAction extends TransportReplicationAction<IndexReques
|
|||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
protected boolean resolveIndex() {
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void resolveRequest(ClusterState state, InternalRequest request, ActionListener<IndexResponse> indexResponseActionListener) {
|
||||
MetaData metaData = clusterService.state().metaData();
|
||||
|
|
|
@ -18,7 +18,6 @@
|
|||
*/
|
||||
package org.elasticsearch.action.percolate;
|
||||
|
||||
import com.google.common.collect.ImmutableList;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.elasticsearch.action.support.broadcast.BroadcastShardResponse;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
|
@ -35,6 +34,7 @@ import org.elasticsearch.search.query.QuerySearchResult;
|
|||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
@ -45,7 +45,7 @@ public class PercolateShardResponse extends BroadcastShardResponse {
|
|||
|
||||
private static final BytesRef[] EMPTY_MATCHES = new BytesRef[0];
|
||||
private static final float[] EMPTY_SCORES = new float[0];
|
||||
private static final List<Map<String, HighlightField>> EMPTY_HL = ImmutableList.of();
|
||||
private static final List<Map<String, HighlightField>> EMPTY_HL = Collections.emptyList();
|
||||
|
||||
private long count;
|
||||
private float[] scores;
|
||||
|
|
|
@ -146,7 +146,7 @@ public class TransportPercolateAction extends TransportBroadcastAction<Percolate
|
|||
PercolateResponse.Match[] matches = request.onlyCount() ? null : PercolateResponse.EMPTY;
|
||||
return new PercolateResponse(shardsResponses.length(), successfulShards, failedShards, shardFailures, tookInMillis, matches);
|
||||
} else {
|
||||
PercolatorService.ReduceResult result = percolatorService.reduce(percolatorTypeId, shardResults);
|
||||
PercolatorService.ReduceResult result = percolatorService.reduce(percolatorTypeId, shardResults, request);
|
||||
long tookInMillis = Math.max(1, System.currentTimeMillis() - request.startTime);
|
||||
return new PercolateResponse(
|
||||
shardsResponses.length(), successfulShards, failedShards, shardFailures,
|
||||
|
|
|
@ -69,15 +69,15 @@ public class MultiSearchRequest extends ActionRequest<MultiSearchRequest> implem
|
|||
}
|
||||
|
||||
public MultiSearchRequest add(byte[] data, int from, int length,
|
||||
@Nullable String[] indices, @Nullable String[] types, @Nullable String searchType) throws Exception {
|
||||
return add(new BytesArray(data, from, length), indices, types, searchType, null, IndicesOptions.strictExpandOpenAndForbidClosed(), true);
|
||||
boolean isTemplateRequest, @Nullable String[] indices, @Nullable String[] types, @Nullable String searchType) throws Exception {
|
||||
return add(new BytesArray(data, from, length), isTemplateRequest, indices, types, searchType, null, IndicesOptions.strictExpandOpenAndForbidClosed(), true);
|
||||
}
|
||||
|
||||
public MultiSearchRequest add(BytesReference data, @Nullable String[] indices, @Nullable String[] types, @Nullable String searchType, IndicesOptions indicesOptions) throws Exception {
|
||||
return add(data, indices, types, searchType, null, indicesOptions, true);
|
||||
public MultiSearchRequest add(BytesReference data, boolean isTemplateRequest, @Nullable String[] indices, @Nullable String[] types, @Nullable String searchType, IndicesOptions indicesOptions) throws Exception {
|
||||
return add(data, isTemplateRequest, indices, types, searchType, null, indicesOptions, true);
|
||||
}
|
||||
|
||||
public MultiSearchRequest add(BytesReference data, @Nullable String[] indices, @Nullable String[] types, @Nullable String searchType, @Nullable String routing, IndicesOptions indicesOptions, boolean allowExplicitIndex) throws Exception {
|
||||
public MultiSearchRequest add(BytesReference data, boolean isTemplateRequest, @Nullable String[] indices, @Nullable String[] types, @Nullable String searchType, @Nullable String routing, IndicesOptions indicesOptions, boolean allowExplicitIndex) throws Exception {
|
||||
XContent xContent = XContentFactory.xContent(data);
|
||||
int from = 0;
|
||||
int length = data.length();
|
||||
|
@ -146,8 +146,11 @@ public class MultiSearchRequest extends ActionRequest<MultiSearchRequest> implem
|
|||
if (nextMarker == -1) {
|
||||
break;
|
||||
}
|
||||
|
||||
searchRequest.source(data.slice(from, nextMarker - from));
|
||||
if (isTemplateRequest) {
|
||||
searchRequest.templateSource(data.slice(from, nextMarker - from));
|
||||
} else {
|
||||
searchRequest.source(data.slice(from, nextMarker - from));
|
||||
}
|
||||
// move pointers
|
||||
from = nextMarker + 1;
|
||||
|
||||
|
@ -157,15 +160,6 @@ public class MultiSearchRequest extends ActionRequest<MultiSearchRequest> implem
|
|||
return this;
|
||||
}
|
||||
|
||||
private String[] parseArray(XContentParser parser) throws IOException {
|
||||
final List<String> list = new ArrayList<>();
|
||||
assert parser.currentToken() == XContentParser.Token.START_ARRAY;
|
||||
while (parser.nextToken() != XContentParser.Token.END_ARRAY) {
|
||||
list.add(parser.text());
|
||||
}
|
||||
return list.toArray(new String[list.size()]);
|
||||
}
|
||||
|
||||
private int findNextMarker(byte marker, int from, BytesReference data, int length) {
|
||||
for (int i = from; i < length; i++) {
|
||||
if (data.get(i) == marker) {
|
||||
|
|
|
@ -75,7 +75,8 @@ public class TransportSearchCountAction extends TransportSearchTypeAction {
|
|||
@Override
|
||||
protected void moveToSecondPhase() throws Exception {
|
||||
// no need to sort, since we know we have no hits back
|
||||
final InternalSearchResponse internalResponse = searchPhaseController.merge(SearchPhaseController.EMPTY_DOCS, firstResults, (AtomicArray<? extends FetchSearchResultProvider>) AtomicArray.empty());
|
||||
final InternalSearchResponse internalResponse = searchPhaseController.merge(SearchPhaseController.EMPTY_DOCS, firstResults,
|
||||
(AtomicArray<? extends FetchSearchResultProvider>) AtomicArray.empty(), request);
|
||||
String scrollId = null;
|
||||
if (request.scroll() != null) {
|
||||
scrollId = buildScrollId(request.searchType(), firstResults, null);
|
||||
|
|
|
@ -134,7 +134,8 @@ public class TransportSearchDfsQueryAndFetchAction extends TransportSearchTypeAc
|
|||
@Override
|
||||
public void doRun() throws IOException {
|
||||
sortedShardList = searchPhaseController.sortDocs(true, queryFetchResults);
|
||||
final InternalSearchResponse internalResponse = searchPhaseController.merge(sortedShardList, queryFetchResults, queryFetchResults);
|
||||
final InternalSearchResponse internalResponse = searchPhaseController.merge(sortedShardList, queryFetchResults,
|
||||
queryFetchResults, request);
|
||||
String scrollId = null;
|
||||
if (request.scroll() != null) {
|
||||
scrollId = TransportSearchHelper.buildScrollId(request.searchType(), firstResults, null);
|
||||
|
|
|
@ -20,6 +20,7 @@
|
|||
package org.elasticsearch.action.search.type;
|
||||
|
||||
import com.carrotsearch.hppc.IntArrayList;
|
||||
|
||||
import org.apache.lucene.search.ScoreDoc;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.ActionRunnable;
|
||||
|
@ -39,8 +40,8 @@ import org.elasticsearch.search.action.SearchServiceTransportAction;
|
|||
import org.elasticsearch.search.controller.SearchPhaseController;
|
||||
import org.elasticsearch.search.dfs.AggregatedDfs;
|
||||
import org.elasticsearch.search.dfs.DfsSearchResult;
|
||||
import org.elasticsearch.search.fetch.ShardFetchSearchRequest;
|
||||
import org.elasticsearch.search.fetch.FetchSearchResult;
|
||||
import org.elasticsearch.search.fetch.ShardFetchSearchRequest;
|
||||
import org.elasticsearch.search.internal.InternalSearchResponse;
|
||||
import org.elasticsearch.search.internal.ShardSearchTransportRequest;
|
||||
import org.elasticsearch.search.query.QuerySearchRequest;
|
||||
|
@ -210,7 +211,8 @@ public class TransportSearchDfsQueryThenFetchAction extends TransportSearchTypeA
|
|||
threadPool.executor(ThreadPool.Names.SEARCH).execute(new ActionRunnable<SearchResponse>(listener) {
|
||||
@Override
|
||||
public void doRun() throws IOException {
|
||||
final InternalSearchResponse internalResponse = searchPhaseController.merge(sortedShardList, queryResults, fetchResults);
|
||||
final InternalSearchResponse internalResponse = searchPhaseController.merge(sortedShardList, queryResults,
|
||||
fetchResults, request);
|
||||
String scrollId = null;
|
||||
if (request.scroll() != null) {
|
||||
scrollId = TransportSearchHelper.buildScrollId(request.searchType(), firstResults, null);
|
||||
|
|
|
@ -81,7 +81,8 @@ public class TransportSearchQueryAndFetchAction extends TransportSearchTypeActio
|
|||
public void doRun() throws IOException {
|
||||
boolean useScroll = request.scroll() != null;
|
||||
sortedShardList = searchPhaseController.sortDocs(useScroll, firstResults);
|
||||
final InternalSearchResponse internalResponse = searchPhaseController.merge(sortedShardList, firstResults, firstResults);
|
||||
final InternalSearchResponse internalResponse = searchPhaseController.merge(sortedShardList, firstResults,
|
||||
firstResults, request);
|
||||
String scrollId = null;
|
||||
if (request.scroll() != null) {
|
||||
scrollId = buildScrollId(request.searchType(), firstResults, null);
|
||||
|
|
|
@ -20,6 +20,7 @@
|
|||
package org.elasticsearch.action.search.type;
|
||||
|
||||
import com.carrotsearch.hppc.IntArrayList;
|
||||
|
||||
import org.apache.lucene.search.ScoreDoc;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.ActionRunnable;
|
||||
|
@ -145,7 +146,8 @@ public class TransportSearchQueryThenFetchAction extends TransportSearchTypeActi
|
|||
threadPool.executor(ThreadPool.Names.SEARCH).execute(new ActionRunnable<SearchResponse>(listener) {
|
||||
@Override
|
||||
public void doRun() throws IOException {
|
||||
final InternalSearchResponse internalResponse = searchPhaseController.merge(sortedShardList, firstResults, fetchResults);
|
||||
final InternalSearchResponse internalResponse = searchPhaseController.merge(sortedShardList, firstResults,
|
||||
fetchResults, request);
|
||||
String scrollId = null;
|
||||
if (request.scroll() != null) {
|
||||
scrollId = TransportSearchHelper.buildScrollId(request.searchType(), firstResults, null);
|
||||
|
|
|
@ -20,6 +20,7 @@
|
|||
package org.elasticsearch.action.search.type;
|
||||
|
||||
import com.google.common.collect.ImmutableMap;
|
||||
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.search.SearchRequest;
|
||||
import org.elasticsearch.action.search.SearchResponse;
|
||||
|
@ -73,7 +74,8 @@ public class TransportSearchScanAction extends TransportSearchTypeAction {
|
|||
|
||||
@Override
|
||||
protected void moveToSecondPhase() throws Exception {
|
||||
final InternalSearchResponse internalResponse = searchPhaseController.merge(SearchPhaseController.EMPTY_DOCS, firstResults, (AtomicArray<? extends FetchSearchResultProvider>) AtomicArray.empty());
|
||||
final InternalSearchResponse internalResponse = searchPhaseController.merge(SearchPhaseController.EMPTY_DOCS, firstResults,
|
||||
(AtomicArray<? extends FetchSearchResultProvider>) AtomicArray.empty(), request);
|
||||
String scrollId = null;
|
||||
if (request.scroll() != null) {
|
||||
scrollId = buildScrollId(request.searchType(), firstResults, ImmutableMap.of("total_hits", Long.toString(internalResponse.hits().totalHits())));
|
||||
|
|
|
@ -21,7 +21,11 @@ package org.elasticsearch.action.search.type;
|
|||
|
||||
import org.apache.lucene.search.ScoreDoc;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.search.*;
|
||||
import org.elasticsearch.action.search.ReduceSearchPhaseException;
|
||||
import org.elasticsearch.action.search.SearchPhaseExecutionException;
|
||||
import org.elasticsearch.action.search.SearchResponse;
|
||||
import org.elasticsearch.action.search.SearchScrollRequest;
|
||||
import org.elasticsearch.action.search.ShardSearchFailure;
|
||||
import org.elasticsearch.cluster.ClusterService;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNodes;
|
||||
|
@ -188,7 +192,8 @@ public class TransportSearchScrollQueryAndFetchAction extends AbstractComponent
|
|||
|
||||
private void innerFinishHim() throws Exception {
|
||||
ScoreDoc[] sortedShardList = searchPhaseController.sortDocs(true, queryFetchResults);
|
||||
final InternalSearchResponse internalResponse = searchPhaseController.merge(sortedShardList, queryFetchResults, queryFetchResults);
|
||||
final InternalSearchResponse internalResponse = searchPhaseController.merge(sortedShardList, queryFetchResults,
|
||||
queryFetchResults, request);
|
||||
String scrollId = null;
|
||||
if (request.scroll() != null) {
|
||||
scrollId = request.scrollId();
|
||||
|
|
|
@ -20,9 +20,14 @@
|
|||
package org.elasticsearch.action.search.type;
|
||||
|
||||
import com.carrotsearch.hppc.IntArrayList;
|
||||
|
||||
import org.apache.lucene.search.ScoreDoc;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.search.*;
|
||||
import org.elasticsearch.action.search.ReduceSearchPhaseException;
|
||||
import org.elasticsearch.action.search.SearchPhaseExecutionException;
|
||||
import org.elasticsearch.action.search.SearchResponse;
|
||||
import org.elasticsearch.action.search.SearchScrollRequest;
|
||||
import org.elasticsearch.action.search.ShardSearchFailure;
|
||||
import org.elasticsearch.cluster.ClusterService;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNodes;
|
||||
|
@ -239,7 +244,7 @@ public class TransportSearchScrollQueryThenFetchAction extends AbstractComponent
|
|||
}
|
||||
|
||||
private void innerFinishHim() {
|
||||
InternalSearchResponse internalResponse = searchPhaseController.merge(sortedShardList, queryResults, fetchResults);
|
||||
InternalSearchResponse internalResponse = searchPhaseController.merge(sortedShardList, queryResults, fetchResults, request);
|
||||
String scrollId = null;
|
||||
if (request.scroll() != null) {
|
||||
scrollId = request.scrollId();
|
||||
|
|
|
@ -212,7 +212,8 @@ public class TransportSearchScrollScanAction extends AbstractComponent {
|
|||
docs.add(scoreDoc);
|
||||
}
|
||||
}
|
||||
final InternalSearchResponse internalResponse = searchPhaseController.merge(docs.toArray(new ScoreDoc[0]), queryFetchResults, queryFetchResults);
|
||||
final InternalSearchResponse internalResponse = searchPhaseController.merge(docs.toArray(new ScoreDoc[0]), queryFetchResults,
|
||||
queryFetchResults, request);
|
||||
((InternalSearchHits) internalResponse.hits()).totalHits = Long.parseLong(this.scrollId.getAttributes().get("total_hits"));
|
||||
|
||||
|
||||
|
|
|
@ -143,7 +143,7 @@ public class TransportSuggestAction extends TransportBroadcastAction<SuggestRequ
|
|||
throw new IllegalArgumentException("suggest content missing");
|
||||
}
|
||||
final SuggestionSearchContext context = suggestPhase.parseElement().parseInternal(parser, indexService.mapperService(),
|
||||
indexService.queryParserService(), request.shardId().getIndex(), request.shardId().id());
|
||||
indexService.queryParserService(), request.shardId().getIndex(), request.shardId().id(), request);
|
||||
final Suggest result = suggestPhase.execute(context, searcher.searcher());
|
||||
return new ShardSuggestResponse(request.shardId(), result);
|
||||
}
|
||||
|
|
|
@ -19,28 +19,25 @@
|
|||
|
||||
package org.elasticsearch.action.support;
|
||||
|
||||
import org.elasticsearch.common.logging.ESLogger;
|
||||
import org.elasticsearch.common.component.AbstractComponent;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.node.settings.NodeSettingsService;
|
||||
|
||||
/**
|
||||
* Helper for dealing with destructive operations and wildcard usage.
|
||||
*/
|
||||
public final class DestructiveOperations implements NodeSettingsService.Listener {
|
||||
public final class DestructiveOperations extends AbstractComponent implements NodeSettingsService.Listener {
|
||||
|
||||
/**
|
||||
* Setting which controls whether wildcard usage (*, prefix*, _all) is allowed.
|
||||
*/
|
||||
public static final String REQUIRES_NAME = "action.destructive_requires_name";
|
||||
|
||||
private final ESLogger logger;
|
||||
private volatile boolean destructiveRequiresName;
|
||||
|
||||
// TODO: Turn into a component that can be reused and wired up into all the transport actions where
|
||||
// this helper logic is required. Note: also added the logger as argument, otherwise the same log
|
||||
// statement is printed several times, this can removed once this becomes a component.
|
||||
public DestructiveOperations(ESLogger logger, Settings settings, NodeSettingsService nodeSettingsService) {
|
||||
this.logger = logger;
|
||||
@Inject
|
||||
public DestructiveOperations(Settings settings, NodeSettingsService nodeSettingsService) {
|
||||
super(settings);
|
||||
destructiveRequiresName = settings.getAsBoolean(DestructiveOperations.REQUIRES_NAME, false);
|
||||
nodeSettingsService.addListener(this);
|
||||
}
|
||||
|
@ -70,7 +67,7 @@ public final class DestructiveOperations implements NodeSettingsService.Listener
|
|||
|
||||
@Override
|
||||
public void onRefreshSettings(Settings settings) {
|
||||
boolean newValue = settings.getAsBoolean("action.destructive_requires_name", destructiveRequiresName);
|
||||
boolean newValue = settings.getAsBoolean(DestructiveOperations.REQUIRES_NAME, destructiveRequiresName);
|
||||
if (destructiveRequiresName != newValue) {
|
||||
logger.info("updating [action.operate_all_indices] from [{}] to [{}]", destructiveRequiresName, newValue);
|
||||
this.destructiveRequiresName = newValue;
|
||||
|
|
|
@ -25,18 +25,20 @@ import org.elasticsearch.action.IndicesRequest;
|
|||
import org.elasticsearch.action.support.IndicesOptions;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
/**
|
||||
*
|
||||
*/
|
||||
public abstract class BroadcastRequest<T extends BroadcastRequest> extends ActionRequest<T> implements IndicesRequest.Replaceable {
|
||||
public class BroadcastRequest<T extends BroadcastRequest> extends ActionRequest<T> implements IndicesRequest.Replaceable {
|
||||
|
||||
protected String[] indices;
|
||||
private IndicesOptions indicesOptions = IndicesOptions.strictExpandOpenAndForbidClosed();
|
||||
|
||||
protected BroadcastRequest() {
|
||||
public BroadcastRequest() {
|
||||
|
||||
}
|
||||
|
||||
|
|
|
@ -32,17 +32,17 @@ import static org.elasticsearch.action.support.DefaultShardOperationFailedExcept
|
|||
/**
|
||||
* Base class for all broadcast operation based responses.
|
||||
*/
|
||||
public abstract class BroadcastResponse extends ActionResponse {
|
||||
public class BroadcastResponse extends ActionResponse {
|
||||
private static final ShardOperationFailedException[] EMPTY = new ShardOperationFailedException[0];
|
||||
private int totalShards;
|
||||
private int successfulShards;
|
||||
private int failedShards;
|
||||
private ShardOperationFailedException[] shardFailures = EMPTY;
|
||||
|
||||
protected BroadcastResponse() {
|
||||
public BroadcastResponse() {
|
||||
}
|
||||
|
||||
protected BroadcastResponse(int totalShards, int successfulShards, int failedShards, List<? extends ShardOperationFailedException> shardFailures) {
|
||||
public BroadcastResponse(int totalShards, int successfulShards, int failedShards, List<? extends ShardOperationFailedException> shardFailures) {
|
||||
this.totalShards = totalShards;
|
||||
this.successfulShards = successfulShards;
|
||||
this.failedShards = failedShards;
|
||||
|
|
|
@ -24,6 +24,7 @@ import org.elasticsearch.action.ActionRequestValidationException;
|
|||
import org.elasticsearch.action.IndicesRequest;
|
||||
import org.elasticsearch.action.WriteConsistencyLevel;
|
||||
import org.elasticsearch.action.support.IndicesOptions;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
|
@ -37,7 +38,7 @@ import static org.elasticsearch.action.ValidateActions.addValidationError;
|
|||
/**
|
||||
*
|
||||
*/
|
||||
public abstract class ReplicationRequest<T extends ReplicationRequest> extends ActionRequest<T> implements IndicesRequest {
|
||||
public class ReplicationRequest<T extends ReplicationRequest> extends ActionRequest<T> implements IndicesRequest {
|
||||
|
||||
public static final TimeValue DEFAULT_TIMEOUT = new TimeValue(1, TimeUnit.MINUTES);
|
||||
|
||||
|
@ -49,14 +50,14 @@ public abstract class ReplicationRequest<T extends ReplicationRequest> extends A
|
|||
private WriteConsistencyLevel consistencyLevel = WriteConsistencyLevel.DEFAULT;
|
||||
private volatile boolean canHaveDuplicates = false;
|
||||
|
||||
protected ReplicationRequest() {
|
||||
public ReplicationRequest() {
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a new request that inherits headers and context from the request provided as argument.
|
||||
*/
|
||||
protected ReplicationRequest(ActionRequest request) {
|
||||
public ReplicationRequest(ActionRequest request) {
|
||||
super(request);
|
||||
}
|
||||
|
||||
|
@ -133,6 +134,16 @@ public abstract class ReplicationRequest<T extends ReplicationRequest> extends A
|
|||
return this.consistencyLevel;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return the shardId of the shard where this operation should be executed on.
|
||||
* can be null in case the shardId is determined by a single document (index, type, id) for example for index or delete request.
|
||||
*/
|
||||
public
|
||||
@Nullable
|
||||
ShardId shardId() {
|
||||
return internalShardId;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the consistency level of write. Defaults to {@link org.elasticsearch.action.WriteConsistencyLevel#DEFAULT}
|
||||
*/
|
||||
|
@ -173,4 +184,10 @@ public abstract class ReplicationRequest<T extends ReplicationRequest> extends A
|
|||
out.writeString(index);
|
||||
out.writeBoolean(canHaveDuplicates);
|
||||
}
|
||||
|
||||
public T setShardId(ShardId shardId) {
|
||||
this.internalShardId = shardId;
|
||||
this.index = shardId.getIndex();
|
||||
return (T) this;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,162 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.support.replication;
|
||||
|
||||
import com.carrotsearch.hppc.cursors.IntObjectCursor;
|
||||
import org.elasticsearch.ExceptionsHelper;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.ActionWriteResponse;
|
||||
import org.elasticsearch.action.ShardOperationFailedException;
|
||||
import org.elasticsearch.action.UnavailableShardsException;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.DefaultShardOperationFailedException;
|
||||
import org.elasticsearch.action.support.HandledTransportAction;
|
||||
import org.elasticsearch.action.support.broadcast.BroadcastRequest;
|
||||
import org.elasticsearch.action.support.broadcast.BroadcastResponse;
|
||||
import org.elasticsearch.action.support.broadcast.BroadcastShardOperationFailedException;
|
||||
import org.elasticsearch.cluster.ClusterService;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.cluster.routing.IndexShardRoutingTable;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.util.concurrent.CountDown;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
import java.util.concurrent.CopyOnWriteArrayList;
|
||||
|
||||
/**
|
||||
* Base class for requests that should be executed on all shards of an index or several indices.
|
||||
* This action sends shard requests to all primary shards of the indices and they are then replicated like write requests
|
||||
*/
|
||||
public abstract class TransportBroadcastReplicationAction<Request extends BroadcastRequest, Response extends BroadcastResponse, ShardRequest extends ReplicationRequest, ShardResponse extends ActionWriteResponse> extends HandledTransportAction<Request, Response> {
|
||||
|
||||
private final TransportReplicationAction replicatedBroadcastShardAction;
|
||||
private final ClusterService clusterService;
|
||||
|
||||
public TransportBroadcastReplicationAction(String name, Class<Request> request, Settings settings, ThreadPool threadPool, ClusterService clusterService,
|
||||
TransportService transportService,
|
||||
ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, TransportReplicationAction replicatedBroadcastShardAction) {
|
||||
super(settings, name, threadPool, transportService, actionFilters, indexNameExpressionResolver, request);
|
||||
this.replicatedBroadcastShardAction = replicatedBroadcastShardAction;
|
||||
this.clusterService = clusterService;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doExecute(final Request request, final ActionListener<Response> listener) {
|
||||
final ClusterState clusterState = clusterService.state();
|
||||
List<ShardId> shards = shards(request, clusterState);
|
||||
final CopyOnWriteArrayList<ShardResponse> shardsResponses = new CopyOnWriteArrayList();
|
||||
if (shards.size() == 0) {
|
||||
finishAndNotifyListener(listener, shardsResponses);
|
||||
}
|
||||
final CountDown responsesCountDown = new CountDown(shards.size());
|
||||
for (final ShardId shardId : shards) {
|
||||
ActionListener<ShardResponse> shardActionListener = new ActionListener<ShardResponse>() {
|
||||
@Override
|
||||
public void onResponse(ShardResponse shardResponse) {
|
||||
shardsResponses.add(shardResponse);
|
||||
logger.trace("{}: got response from {}", actionName, shardId);
|
||||
if (responsesCountDown.countDown()) {
|
||||
finishAndNotifyListener(listener, shardsResponses);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Throwable e) {
|
||||
logger.trace("{}: got failure from {}", actionName, shardId);
|
||||
int totalNumCopies = clusterState.getMetaData().index(shardId.index().getName()).getNumberOfReplicas() + 1;
|
||||
ShardResponse shardResponse = newShardResponse();
|
||||
ActionWriteResponse.ShardInfo.Failure[] failures;
|
||||
if (ExceptionsHelper.unwrap(e, UnavailableShardsException.class) != null) {
|
||||
failures = new ActionWriteResponse.ShardInfo.Failure[0];
|
||||
} else {
|
||||
ActionWriteResponse.ShardInfo.Failure failure = new ActionWriteResponse.ShardInfo.Failure(shardId.index().name(), shardId.id(), null, e, ExceptionsHelper.status(e), true);
|
||||
failures = new ActionWriteResponse.ShardInfo.Failure[totalNumCopies];
|
||||
Arrays.fill(failures, failure);
|
||||
}
|
||||
shardResponse.setShardInfo(new ActionWriteResponse.ShardInfo(totalNumCopies, 0, failures));
|
||||
shardsResponses.add(shardResponse);
|
||||
if (responsesCountDown.countDown()) {
|
||||
finishAndNotifyListener(listener, shardsResponses);
|
||||
}
|
||||
}
|
||||
};
|
||||
shardExecute(request, shardId, shardActionListener);
|
||||
}
|
||||
}
|
||||
|
||||
protected void shardExecute(Request request, ShardId shardId, ActionListener<ShardResponse> shardActionListener) {
|
||||
replicatedBroadcastShardAction.execute(newShardRequest(request, shardId), shardActionListener);
|
||||
}
|
||||
|
||||
/**
|
||||
* @return all shard ids the request should run on
|
||||
*/
|
||||
protected List<ShardId> shards(Request request, ClusterState clusterState) {
|
||||
List<ShardId> shardIds = new ArrayList<>();
|
||||
String[] concreteIndices = indexNameExpressionResolver.concreteIndices(clusterState, request);
|
||||
for (String index : concreteIndices) {
|
||||
IndexMetaData indexMetaData = clusterState.metaData().getIndices().get(index);
|
||||
if (indexMetaData != null) {
|
||||
for (IntObjectCursor<IndexShardRoutingTable> shardRouting : clusterState.getRoutingTable().indicesRouting().get(index).getShards()) {
|
||||
shardIds.add(shardRouting.value.shardId());
|
||||
}
|
||||
}
|
||||
}
|
||||
return shardIds;
|
||||
}
|
||||
|
||||
protected abstract ShardResponse newShardResponse();
|
||||
|
||||
protected abstract ShardRequest newShardRequest(Request request, ShardId shardId);
|
||||
|
||||
private void finishAndNotifyListener(ActionListener listener, CopyOnWriteArrayList<ShardResponse> shardsResponses) {
|
||||
logger.trace("{}: got all shard responses", actionName);
|
||||
int successfulShards = 0;
|
||||
int failedShards = 0;
|
||||
int totalNumCopies = 0;
|
||||
List<ShardOperationFailedException> shardFailures = null;
|
||||
for (int i = 0; i < shardsResponses.size(); i++) {
|
||||
ActionWriteResponse shardResponse = shardsResponses.get(i);
|
||||
if (shardResponse == null) {
|
||||
// non active shard, ignore
|
||||
} else {
|
||||
failedShards += shardResponse.getShardInfo().getFailed();
|
||||
successfulShards += shardResponse.getShardInfo().getSuccessful();
|
||||
totalNumCopies += shardResponse.getShardInfo().getTotal();
|
||||
if (shardFailures == null) {
|
||||
shardFailures = new ArrayList<>();
|
||||
}
|
||||
for (ActionWriteResponse.ShardInfo.Failure failure : shardResponse.getShardInfo().getFailures()) {
|
||||
shardFailures.add(new DefaultShardOperationFailedException(new BroadcastShardOperationFailedException(new ShardId(failure.index(), failure.shardId()), failure.getCause())));
|
||||
}
|
||||
}
|
||||
}
|
||||
listener.onResponse(newResponse(successfulShards, failedShards, totalNumCopies, shardFailures));
|
||||
}
|
||||
|
||||
protected abstract BroadcastResponse newResponse(int successfulShards, int failedShards, int totalNumCopies, List<ShardOperationFailedException> shardFailures);
|
||||
}
|
|
@ -142,7 +142,9 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
|
|||
return state.blocks().indexBlockedException(ClusterBlockLevel.WRITE, request.concreteIndex());
|
||||
}
|
||||
|
||||
protected abstract boolean resolveIndex();
|
||||
protected boolean resolveIndex() {
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* Resolves the request, by default doing nothing. Can be subclassed to do
|
||||
|
@ -360,6 +362,7 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
|
|||
finishWithUnexpectedFailure(e);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doRun() {
|
||||
if (checkBlocks() == false) {
|
||||
return;
|
||||
|
@ -725,7 +728,7 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
|
|||
// new primary shard as well...
|
||||
ClusterState newState = clusterService.state();
|
||||
|
||||
int numberOfUnassignedOrShadowReplicas = 0;
|
||||
int numberOfUnassignedOrIgnoredReplicas = 0;
|
||||
int numberOfPendingShardInstances = 0;
|
||||
if (observer.observedState() != newState) {
|
||||
observer.reset(newState);
|
||||
|
@ -739,7 +742,7 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
|
|||
if (shard.relocating()) {
|
||||
numberOfPendingShardInstances++;
|
||||
}
|
||||
} else if (IndexMetaData.isIndexUsingShadowReplicas(indexMetaData.settings())) {
|
||||
} else if (shouldExecuteReplication(indexMetaData.settings()) == false) {
|
||||
// If the replicas use shadow replicas, there is no reason to
|
||||
// perform the action on the replica, so skip it and
|
||||
// immediately return
|
||||
|
@ -748,9 +751,9 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
|
|||
// to wait until they get the new mapping through the cluster
|
||||
// state, which is why we recommend pre-defined mappings for
|
||||
// indices using shadow replicas
|
||||
numberOfUnassignedOrShadowReplicas++;
|
||||
numberOfUnassignedOrIgnoredReplicas++;
|
||||
} else if (shard.unassigned()) {
|
||||
numberOfUnassignedOrShadowReplicas++;
|
||||
numberOfUnassignedOrIgnoredReplicas++;
|
||||
} else if (shard.relocating()) {
|
||||
// we need to send to two copies
|
||||
numberOfPendingShardInstances += 2;
|
||||
|
@ -767,13 +770,13 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
|
|||
replicaRequest.setCanHaveDuplicates();
|
||||
}
|
||||
if (shard.unassigned()) {
|
||||
numberOfUnassignedOrShadowReplicas++;
|
||||
numberOfUnassignedOrIgnoredReplicas++;
|
||||
} else if (shard.primary()) {
|
||||
if (shard.relocating()) {
|
||||
// we have to replicate to the other copy
|
||||
numberOfPendingShardInstances += 1;
|
||||
}
|
||||
} else if (IndexMetaData.isIndexUsingShadowReplicas(indexMetaData.settings())) {
|
||||
} else if (shouldExecuteReplication(indexMetaData.settings()) == false) {
|
||||
// If the replicas use shadow replicas, there is no reason to
|
||||
// perform the action on the replica, so skip it and
|
||||
// immediately return
|
||||
|
@ -782,7 +785,7 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
|
|||
// to wait until they get the new mapping through the cluster
|
||||
// state, which is why we recommend pre-defined mappings for
|
||||
// indices using shadow replicas
|
||||
numberOfUnassignedOrShadowReplicas++;
|
||||
numberOfUnassignedOrIgnoredReplicas++;
|
||||
} else if (shard.relocating()) {
|
||||
// we need to send to two copies
|
||||
numberOfPendingShardInstances += 2;
|
||||
|
@ -793,7 +796,7 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
|
|||
}
|
||||
|
||||
// one for the primary already done
|
||||
this.totalShards = 1 + numberOfPendingShardInstances + numberOfUnassignedOrShadowReplicas;
|
||||
this.totalShards = 1 + numberOfPendingShardInstances + numberOfUnassignedOrIgnoredReplicas;
|
||||
this.pending = new AtomicInteger(numberOfPendingShardInstances);
|
||||
}
|
||||
|
||||
|
@ -852,7 +855,7 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
|
|||
if (shard.relocating()) {
|
||||
performOnReplica(shard, shard.relocatingNodeId());
|
||||
}
|
||||
} else if (IndexMetaData.isIndexUsingShadowReplicas(indexMetaData.settings()) == false) {
|
||||
} else if (shouldExecuteReplication(indexMetaData.settings())) {
|
||||
performOnReplica(shard, shard.currentNodeId());
|
||||
if (shard.relocating()) {
|
||||
performOnReplica(shard, shard.relocatingNodeId());
|
||||
|
@ -983,6 +986,14 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
|
|||
|
||||
}
|
||||
|
||||
/**
|
||||
* Indicated whether this operation should be replicated to shadow replicas or not. If this method returns true the replication phase will be skipped.
|
||||
* For example writes such as index and delete don't need to be replicated on shadow replicas but refresh and flush do.
|
||||
*/
|
||||
protected boolean shouldExecuteReplication(Settings settings) {
|
||||
return IndexMetaData.isIndexUsingShadowReplicas(settings) == false;
|
||||
}
|
||||
|
||||
/**
|
||||
* Internal request class that gets built on each node. Holds the original request plus additional info.
|
||||
*/
|
||||
|
|
|
@ -336,7 +336,7 @@ public final class TermVectorsFields extends Fields {
|
|||
}
|
||||
|
||||
@Override
|
||||
public PostingsEnum postings(Bits liveDocs, PostingsEnum reuse, int flags) throws IOException {
|
||||
public PostingsEnum postings(PostingsEnum reuse, int flags) throws IOException {
|
||||
final TermVectorPostingsEnum retVal = (reuse instanceof TermVectorPostingsEnum ? (TermVectorPostingsEnum) reuse
|
||||
: new TermVectorPostingsEnum());
|
||||
return retVal.reset(hasPositions ? positions : null, hasOffsets ? startOffsets : null, hasOffsets ? endOffsets
|
||||
|
|
|
@ -286,7 +286,7 @@ public class TermVectorsFilter {
|
|||
}
|
||||
|
||||
private int getTermFreq(TermsEnum termsEnum, PostingsEnum docsEnum) throws IOException {
|
||||
docsEnum = termsEnum.postings(null, docsEnum);
|
||||
docsEnum = termsEnum.postings(docsEnum);
|
||||
docsEnum.nextDoc();
|
||||
return docsEnum.freq();
|
||||
}
|
||||
|
|
|
@ -220,7 +220,7 @@ public class TermVectorsResponse extends ActionResponse implements ToXContent {
|
|||
builder.startObject(spare.toString());
|
||||
buildTermStatistics(builder, termIter);
|
||||
// finally write the term vectors
|
||||
PostingsEnum posEnum = termIter.postings(null, null, PostingsEnum.ALL);
|
||||
PostingsEnum posEnum = termIter.postings(null, PostingsEnum.ALL);
|
||||
int termFreq = posEnum.freq();
|
||||
builder.field(FieldStrings.TERM_FREQ, termFreq);
|
||||
initMemory(curTerms, termFreq);
|
||||
|
|
|
@ -151,7 +151,7 @@ final class TermVectorsWriter {
|
|||
}
|
||||
|
||||
private PostingsEnum writeTermWithDocsOnly(TermsEnum iterator, PostingsEnum docsEnum) throws IOException {
|
||||
docsEnum = iterator.postings(null, docsEnum);
|
||||
docsEnum = iterator.postings(docsEnum);
|
||||
int nextDoc = docsEnum.nextDoc();
|
||||
assert nextDoc != DocIdSetIterator.NO_MORE_DOCS;
|
||||
writeFreq(docsEnum.freq());
|
||||
|
@ -162,7 +162,7 @@ final class TermVectorsWriter {
|
|||
|
||||
private PostingsEnum writeTermWithDocsAndPos(TermsEnum iterator, PostingsEnum docsAndPosEnum, boolean positions,
|
||||
boolean offsets, boolean payloads) throws IOException {
|
||||
docsAndPosEnum = iterator.postings(null, docsAndPosEnum, PostingsEnum.ALL);
|
||||
docsAndPosEnum = iterator.postings(docsAndPosEnum, PostingsEnum.ALL);
|
||||
// for each term (iterator next) in this field (field)
|
||||
// iterate over the docs (should only be one)
|
||||
int nextDoc = docsAndPosEnum.nextDoc();
|
||||
|
|
|
@ -19,7 +19,6 @@
|
|||
|
||||
package org.elasticsearch.action.update;
|
||||
|
||||
import com.google.common.collect.ImmutableList;
|
||||
import org.elasticsearch.ExceptionsHelper;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.ActionRunnable;
|
||||
|
@ -58,6 +57,7 @@ import org.elasticsearch.indices.IndicesService;
|
|||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
import java.util.Collections;
|
||||
import java.util.Map;
|
||||
|
||||
/**
|
||||
|
@ -153,10 +153,10 @@ public class TransportUpdateAction extends TransportInstanceSingleOperationActio
|
|||
ShardRouting shard;
|
||||
while ((shard = shardIterator.nextOrNull()) != null) {
|
||||
if (shard.primary()) {
|
||||
return new PlainShardIterator(shardIterator.shardId(), ImmutableList.of(shard));
|
||||
return new PlainShardIterator(shardIterator.shardId(), Collections.singletonList(shard));
|
||||
}
|
||||
}
|
||||
return new PlainShardIterator(shardIterator.shardId(), ImmutableList.<ShardRouting>of());
|
||||
return new PlainShardIterator(shardIterator.shardId(), Collections.<ShardRouting>emptyList());
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -246,7 +246,7 @@ public class UpdateHelper extends AbstractComponent {
|
|||
private Map<String, Object> executeScript(UpdateRequest request, Map<String, Object> ctx) {
|
||||
try {
|
||||
if (scriptService != null) {
|
||||
ExecutableScript script = scriptService.executable(request.script, ScriptContext.Standard.UPDATE);
|
||||
ExecutableScript script = scriptService.executable(request.script, ScriptContext.Standard.UPDATE, request);
|
||||
script.setNextVar("ctx", ctx);
|
||||
script.run();
|
||||
// we need to unwrap the ctx...
|
||||
|
|
|
@ -22,6 +22,8 @@ package org.elasticsearch.bootstrap;
|
|||
import org.elasticsearch.common.SuppressForbidden;
|
||||
|
||||
import java.net.URI;
|
||||
import java.net.URL;
|
||||
import java.security.CodeSource;
|
||||
import java.security.Permission;
|
||||
import java.security.PermissionCollection;
|
||||
import java.security.Policy;
|
||||
|
@ -44,11 +46,22 @@ final class ESPolicy extends Policy {
|
|||
}
|
||||
|
||||
@Override @SuppressForbidden(reason = "fast equals check is desired")
|
||||
public boolean implies(ProtectionDomain domain, Permission permission) {
|
||||
// run groovy scripts with no permissions
|
||||
if ("/groovy/script".equals(domain.getCodeSource().getLocation().getFile())) {
|
||||
return false;
|
||||
public boolean implies(ProtectionDomain domain, Permission permission) {
|
||||
CodeSource codeSource = domain.getCodeSource();
|
||||
// codesource can be null when reducing privileges via doPrivileged()
|
||||
if (codeSource != null) {
|
||||
URL location = codeSource.getLocation();
|
||||
// location can be null... ??? nobody knows
|
||||
// https://bugs.openjdk.java.net/browse/JDK-8129972
|
||||
if (location != null) {
|
||||
// run groovy scripts with no permissions
|
||||
if ("/groovy/script".equals(location.getFile())) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// otherwise defer to template + dynamic file permissions
|
||||
return template.implies(domain, permission) || dynamic.implies(permission);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -19,7 +19,6 @@
|
|||
|
||||
package org.elasticsearch.bootstrap;
|
||||
|
||||
import com.google.common.collect.ImmutableList;
|
||||
import com.sun.jna.*;
|
||||
import com.sun.jna.win32.StdCallLibrary;
|
||||
|
||||
|
@ -29,6 +28,7 @@ import org.elasticsearch.common.logging.Loggers;
|
|||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
|
||||
|
||||
|
@ -85,8 +85,8 @@ final class JNAKernel32Library {
|
|||
return result;
|
||||
}
|
||||
|
||||
ImmutableList<Object> getCallbacks() {
|
||||
return ImmutableList.builder().addAll(callbacks).build();
|
||||
List<Object> getCallbacks() {
|
||||
return Collections.<Object>unmodifiableList(callbacks);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -20,7 +20,6 @@
|
|||
package org.elasticsearch.client.transport;
|
||||
|
||||
import com.carrotsearch.hppc.cursors.ObjectCursor;
|
||||
import com.google.common.collect.ImmutableList;
|
||||
import com.google.common.collect.Sets;
|
||||
import org.elasticsearch.ExceptionsHelper;
|
||||
import org.elasticsearch.Version;
|
||||
|
@ -50,6 +49,7 @@ import org.elasticsearch.transport.TransportRequestOptions;
|
|||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.HashSet;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
|
@ -83,12 +83,12 @@ public class TransportClientNodesService extends AbstractComponent {
|
|||
private final Headers headers;
|
||||
|
||||
// nodes that are added to be discovered
|
||||
private volatile ImmutableList<DiscoveryNode> listedNodes = ImmutableList.of();
|
||||
private volatile List<DiscoveryNode> listedNodes = Collections.emptyList();
|
||||
|
||||
private final Object mutex = new Object();
|
||||
|
||||
private volatile List<DiscoveryNode> nodes = ImmutableList.of();
|
||||
private volatile List<DiscoveryNode> filteredNodes = ImmutableList.of();
|
||||
private volatile List<DiscoveryNode> nodes = Collections.emptyList();
|
||||
private volatile List<DiscoveryNode> filteredNodes = Collections.emptyList();
|
||||
|
||||
private final AtomicInteger tempNodeIdGenerator = new AtomicInteger();
|
||||
|
||||
|
@ -129,11 +129,11 @@ public class TransportClientNodesService extends AbstractComponent {
|
|||
}
|
||||
|
||||
public List<TransportAddress> transportAddresses() {
|
||||
ImmutableList.Builder<TransportAddress> lstBuilder = ImmutableList.builder();
|
||||
List<TransportAddress> lstBuilder = new ArrayList<>();
|
||||
for (DiscoveryNode listedNode : listedNodes) {
|
||||
lstBuilder.add(listedNode.address());
|
||||
}
|
||||
return lstBuilder.build();
|
||||
return Collections.unmodifiableList(lstBuilder);
|
||||
}
|
||||
|
||||
public List<DiscoveryNode> connectedNodes() {
|
||||
|
@ -170,14 +170,14 @@ public class TransportClientNodesService extends AbstractComponent {
|
|||
if (filtered.isEmpty()) {
|
||||
return this;
|
||||
}
|
||||
ImmutableList.Builder<DiscoveryNode> builder = ImmutableList.builder();
|
||||
List<DiscoveryNode> builder = new ArrayList<>();
|
||||
builder.addAll(listedNodes());
|
||||
for (TransportAddress transportAddress : filtered) {
|
||||
DiscoveryNode node = new DiscoveryNode("#transport#-" + tempNodeIdGenerator.incrementAndGet(), transportAddress, minCompatibilityVersion);
|
||||
logger.debug("adding address [{}]", node);
|
||||
builder.add(node);
|
||||
}
|
||||
listedNodes = builder.build();
|
||||
listedNodes = Collections.unmodifiableList(builder);
|
||||
nodesSampler.sample();
|
||||
}
|
||||
return this;
|
||||
|
@ -188,7 +188,7 @@ public class TransportClientNodesService extends AbstractComponent {
|
|||
if (closed) {
|
||||
throw new IllegalStateException("transport client is closed, can't remove an address");
|
||||
}
|
||||
ImmutableList.Builder<DiscoveryNode> builder = ImmutableList.builder();
|
||||
List<DiscoveryNode> builder = new ArrayList<>();
|
||||
for (DiscoveryNode otherNode : listedNodes) {
|
||||
if (!otherNode.address().equals(transportAddress)) {
|
||||
builder.add(otherNode);
|
||||
|
@ -196,7 +196,7 @@ public class TransportClientNodesService extends AbstractComponent {
|
|||
logger.debug("removing address [{}]", otherNode);
|
||||
}
|
||||
}
|
||||
listedNodes = builder.build();
|
||||
listedNodes = Collections.unmodifiableList(builder);
|
||||
nodesSampler.sample();
|
||||
}
|
||||
return this;
|
||||
|
@ -271,7 +271,7 @@ public class TransportClientNodesService extends AbstractComponent {
|
|||
for (DiscoveryNode listedNode : listedNodes) {
|
||||
transportService.disconnectFromNode(listedNode);
|
||||
}
|
||||
nodes = ImmutableList.of();
|
||||
nodes = Collections.emptyList();
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -321,7 +321,7 @@ public class TransportClientNodesService extends AbstractComponent {
|
|||
}
|
||||
}
|
||||
|
||||
return new ImmutableList.Builder<DiscoveryNode>().addAll(nodes).build();
|
||||
return Collections.unmodifiableList(new ArrayList<>(nodes));
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -386,7 +386,7 @@ public class TransportClientNodesService extends AbstractComponent {
|
|||
}
|
||||
|
||||
nodes = validateNewNodes(newNodes);
|
||||
filteredNodes = ImmutableList.copyOf(newFilteredNodes);
|
||||
filteredNodes = Collections.unmodifiableList(new ArrayList<>(newFilteredNodes));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -486,7 +486,7 @@ public class TransportClientNodesService extends AbstractComponent {
|
|||
}
|
||||
|
||||
nodes = validateNewNodes(newNodes);
|
||||
filteredNodes = ImmutableList.copyOf(newFilteredNodes);
|
||||
filteredNodes = Collections.unmodifiableList(new ArrayList<>(newFilteredNodes));
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -20,13 +20,13 @@
|
|||
package org.elasticsearch.cluster;
|
||||
|
||||
import com.carrotsearch.hppc.cursors.ObjectCursor;
|
||||
import com.google.common.collect.ImmutableList;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.metadata.MetaData;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNodes;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
|
@ -86,7 +86,7 @@ public class ClusterChangedEvent {
|
|||
return Arrays.asList(state.metaData().indices().keys().toArray(String.class));
|
||||
}
|
||||
if (!metaDataChanged()) {
|
||||
return ImmutableList.of();
|
||||
return Collections.emptyList();
|
||||
}
|
||||
List<String> created = null;
|
||||
for (ObjectCursor<String> cursor : state.metaData().indices().keys()) {
|
||||
|
@ -98,7 +98,7 @@ public class ClusterChangedEvent {
|
|||
created.add(index);
|
||||
}
|
||||
}
|
||||
return created == null ? ImmutableList.<String>of() : created;
|
||||
return created == null ? Collections.<String>emptyList() : created;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -116,10 +116,10 @@ public class ClusterChangedEvent {
|
|||
// See discussion on https://github.com/elastic/elasticsearch/pull/9952 and
|
||||
// https://github.com/elastic/elasticsearch/issues/11665
|
||||
if (hasNewMaster() || previousState == null) {
|
||||
return ImmutableList.of();
|
||||
return Collections.emptyList();
|
||||
}
|
||||
if (!metaDataChanged()) {
|
||||
return ImmutableList.of();
|
||||
return Collections.emptyList();
|
||||
}
|
||||
List<String> deleted = null;
|
||||
for (ObjectCursor<String> cursor : previousState.metaData().indices().keys()) {
|
||||
|
@ -131,7 +131,7 @@ public class ClusterChangedEvent {
|
|||
deleted.add(index);
|
||||
}
|
||||
}
|
||||
return deleted == null ? ImmutableList.<String>of() : deleted;
|
||||
return deleted == null ? Collections.<String>emptyList() : deleted;
|
||||
}
|
||||
|
||||
public boolean metaDataChanged() {
|
||||
|
|
|
@ -19,7 +19,6 @@
|
|||
|
||||
package org.elasticsearch.cluster;
|
||||
|
||||
import com.google.common.collect.ImmutableList;
|
||||
import com.google.common.collect.ImmutableMap;
|
||||
import org.elasticsearch.cluster.ClusterState.Custom;
|
||||
import org.elasticsearch.cluster.metadata.SnapshotId;
|
||||
|
@ -30,6 +29,9 @@ import org.elasticsearch.common.xcontent.XContentBuilder;
|
|||
import org.elasticsearch.index.shard.ShardId;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
|
@ -42,14 +44,14 @@ public class RestoreInProgress extends AbstractDiffable<Custom> implements Custo
|
|||
|
||||
public static final RestoreInProgress PROTO = new RestoreInProgress();
|
||||
|
||||
private final ImmutableList<Entry> entries;
|
||||
private final List<Entry> entries;
|
||||
|
||||
/**
|
||||
* Constructs new restore metadata
|
||||
*
|
||||
* @param entries list of currently running restore processes
|
||||
*/
|
||||
public RestoreInProgress(ImmutableList<Entry> entries) {
|
||||
public RestoreInProgress(List<Entry> entries) {
|
||||
this.entries = entries;
|
||||
}
|
||||
|
||||
|
@ -59,7 +61,7 @@ public class RestoreInProgress extends AbstractDiffable<Custom> implements Custo
|
|||
* @param entries list of currently running restore processes
|
||||
*/
|
||||
public RestoreInProgress(Entry... entries) {
|
||||
this.entries = ImmutableList.copyOf(entries);
|
||||
this.entries = Arrays.asList(entries);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -111,7 +113,7 @@ public class RestoreInProgress extends AbstractDiffable<Custom> implements Custo
|
|||
private final State state;
|
||||
private final SnapshotId snapshotId;
|
||||
private final ImmutableMap<ShardId, ShardRestoreStatus> shards;
|
||||
private final ImmutableList<String> indices;
|
||||
private final List<String> indices;
|
||||
|
||||
/**
|
||||
* Creates new restore metadata
|
||||
|
@ -121,7 +123,7 @@ public class RestoreInProgress extends AbstractDiffable<Custom> implements Custo
|
|||
* @param indices list of indices being restored
|
||||
* @param shards list of shards being restored and thier current restore status
|
||||
*/
|
||||
public Entry(SnapshotId snapshotId, State state, ImmutableList<String> indices, ImmutableMap<ShardId, ShardRestoreStatus> shards) {
|
||||
public Entry(SnapshotId snapshotId, State state, List<String> indices, ImmutableMap<ShardId, ShardRestoreStatus> shards) {
|
||||
this.snapshotId = snapshotId;
|
||||
this.state = state;
|
||||
this.indices = indices;
|
||||
|
@ -164,7 +166,7 @@ public class RestoreInProgress extends AbstractDiffable<Custom> implements Custo
|
|||
*
|
||||
* @return list of indices
|
||||
*/
|
||||
public ImmutableList<String> indices() {
|
||||
public List<String> indices() {
|
||||
return indices;
|
||||
}
|
||||
|
||||
|
@ -413,7 +415,7 @@ public class RestoreInProgress extends AbstractDiffable<Custom> implements Custo
|
|||
SnapshotId snapshotId = SnapshotId.readSnapshotId(in);
|
||||
State state = State.fromValue(in.readByte());
|
||||
int indices = in.readVInt();
|
||||
ImmutableList.Builder<String> indexBuilder = ImmutableList.builder();
|
||||
List<String> indexBuilder = new ArrayList<>();
|
||||
for (int j = 0; j < indices; j++) {
|
||||
indexBuilder.add(in.readString());
|
||||
}
|
||||
|
@ -424,7 +426,7 @@ public class RestoreInProgress extends AbstractDiffable<Custom> implements Custo
|
|||
ShardRestoreStatus shardState = ShardRestoreStatus.readShardRestoreStatus(in);
|
||||
builder.put(shardId, shardState);
|
||||
}
|
||||
entries[i] = new Entry(snapshotId, state, indexBuilder.build(), builder.build());
|
||||
entries[i] = new Entry(snapshotId, state, Collections.unmodifiableList(indexBuilder), builder.build());
|
||||
}
|
||||
return new RestoreInProgress(entries);
|
||||
}
|
||||
|
|
|
@ -19,7 +19,6 @@
|
|||
|
||||
package org.elasticsearch.cluster;
|
||||
|
||||
import com.google.common.collect.ImmutableList;
|
||||
import com.google.common.collect.ImmutableMap;
|
||||
import org.elasticsearch.cluster.ClusterState.Custom;
|
||||
import org.elasticsearch.cluster.metadata.SnapshotId;
|
||||
|
@ -31,7 +30,10 @@ import org.elasticsearch.common.xcontent.XContentBuilderString;
|
|||
import org.elasticsearch.index.shard.ShardId;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
|
@ -67,11 +69,11 @@ public class SnapshotsInProgress extends AbstractDiffable<Custom> implements Cus
|
|||
private final SnapshotId snapshotId;
|
||||
private final boolean includeGlobalState;
|
||||
private final ImmutableMap<ShardId, ShardSnapshotStatus> shards;
|
||||
private final ImmutableList<String> indices;
|
||||
private final ImmutableMap<String, ImmutableList<ShardId>> waitingIndices;
|
||||
private final List<String> indices;
|
||||
private final ImmutableMap<String, List<ShardId>> waitingIndices;
|
||||
private final long startTime;
|
||||
|
||||
public Entry(SnapshotId snapshotId, boolean includeGlobalState, State state, ImmutableList<String> indices, long startTime, ImmutableMap<ShardId, ShardSnapshotStatus> shards) {
|
||||
public Entry(SnapshotId snapshotId, boolean includeGlobalState, State state, List<String> indices, long startTime, ImmutableMap<ShardId, ShardSnapshotStatus> shards) {
|
||||
this.state = state;
|
||||
this.snapshotId = snapshotId;
|
||||
this.includeGlobalState = includeGlobalState;
|
||||
|
@ -106,11 +108,11 @@ public class SnapshotsInProgress extends AbstractDiffable<Custom> implements Cus
|
|||
return state;
|
||||
}
|
||||
|
||||
public ImmutableList<String> indices() {
|
||||
public List<String> indices() {
|
||||
return indices;
|
||||
}
|
||||
|
||||
public ImmutableMap<String, ImmutableList<ShardId>> waitingIndices() {
|
||||
public ImmutableMap<String, List<ShardId>> waitingIndices() {
|
||||
return waitingIndices;
|
||||
}
|
||||
|
||||
|
@ -152,22 +154,22 @@ public class SnapshotsInProgress extends AbstractDiffable<Custom> implements Cus
|
|||
return result;
|
||||
}
|
||||
|
||||
private ImmutableMap<String, ImmutableList<ShardId>> findWaitingIndices(ImmutableMap<ShardId, ShardSnapshotStatus> shards) {
|
||||
Map<String, ImmutableList.Builder<ShardId>> waitingIndicesMap = newHashMap();
|
||||
private ImmutableMap<String, List<ShardId>> findWaitingIndices(ImmutableMap<ShardId, ShardSnapshotStatus> shards) {
|
||||
Map<String, List<ShardId>> waitingIndicesMap = newHashMap();
|
||||
for (ImmutableMap.Entry<ShardId, ShardSnapshotStatus> entry : shards.entrySet()) {
|
||||
if (entry.getValue().state() == State.WAITING) {
|
||||
ImmutableList.Builder<ShardId> waitingShards = waitingIndicesMap.get(entry.getKey().getIndex());
|
||||
List<ShardId> waitingShards = waitingIndicesMap.get(entry.getKey().getIndex());
|
||||
if (waitingShards == null) {
|
||||
waitingShards = ImmutableList.builder();
|
||||
waitingShards = new ArrayList<>();
|
||||
waitingIndicesMap.put(entry.getKey().getIndex(), waitingShards);
|
||||
}
|
||||
waitingShards.add(entry.getKey());
|
||||
}
|
||||
}
|
||||
if (!waitingIndicesMap.isEmpty()) {
|
||||
ImmutableMap.Builder<String, ImmutableList<ShardId>> waitingIndicesBuilder = ImmutableMap.builder();
|
||||
for (Map.Entry<String, ImmutableList.Builder<ShardId>> entry : waitingIndicesMap.entrySet()) {
|
||||
waitingIndicesBuilder.put(entry.getKey(), entry.getValue().build());
|
||||
ImmutableMap.Builder<String, List<ShardId>> waitingIndicesBuilder = ImmutableMap.builder();
|
||||
for (Map.Entry<String, List<ShardId>> entry : waitingIndicesMap.entrySet()) {
|
||||
waitingIndicesBuilder.put(entry.getKey(), Collections.unmodifiableList(entry.getValue()));
|
||||
}
|
||||
return waitingIndicesBuilder.build();
|
||||
} else {
|
||||
|
@ -324,15 +326,15 @@ public class SnapshotsInProgress extends AbstractDiffable<Custom> implements Cus
|
|||
}
|
||||
}
|
||||
|
||||
private final ImmutableList<Entry> entries;
|
||||
private final List<Entry> entries;
|
||||
|
||||
|
||||
public SnapshotsInProgress(ImmutableList<Entry> entries) {
|
||||
public SnapshotsInProgress(List<Entry> entries) {
|
||||
this.entries = entries;
|
||||
}
|
||||
|
||||
public SnapshotsInProgress(Entry... entries) {
|
||||
this.entries = ImmutableList.copyOf(entries);
|
||||
this.entries = Arrays.asList(entries);
|
||||
}
|
||||
|
||||
public List<Entry> entries() {
|
||||
|
@ -361,7 +363,7 @@ public class SnapshotsInProgress extends AbstractDiffable<Custom> implements Cus
|
|||
boolean includeGlobalState = in.readBoolean();
|
||||
State state = State.fromValue(in.readByte());
|
||||
int indices = in.readVInt();
|
||||
ImmutableList.Builder<String> indexBuilder = ImmutableList.builder();
|
||||
List<String> indexBuilder = new ArrayList<>();
|
||||
for (int j = 0; j < indices; j++) {
|
||||
indexBuilder.add(in.readString());
|
||||
}
|
||||
|
@ -374,7 +376,7 @@ public class SnapshotsInProgress extends AbstractDiffable<Custom> implements Cus
|
|||
State shardState = State.fromValue(in.readByte());
|
||||
builder.put(shardId, new ShardSnapshotStatus(nodeId, shardState));
|
||||
}
|
||||
entries[i] = new Entry(snapshotId, includeGlobalState, state, indexBuilder.build(), startTime, builder.build());
|
||||
entries[i] = new Entry(snapshotId, includeGlobalState, state, Collections.unmodifiableList(indexBuilder), startTime, builder.build());
|
||||
}
|
||||
return new SnapshotsInProgress(entries);
|
||||
}
|
||||
|
|
|
@ -20,7 +20,6 @@
|
|||
package org.elasticsearch.cluster.metadata;
|
||||
|
||||
import com.google.common.base.Predicate;
|
||||
import com.google.common.collect.ImmutableList;
|
||||
import org.elasticsearch.ElasticsearchParseException;
|
||||
import org.elasticsearch.action.IndicesRequest;
|
||||
import org.elasticsearch.action.support.IndicesOptions;
|
||||
|
@ -57,13 +56,13 @@ import static com.google.common.collect.Maps.newHashMap;
|
|||
|
||||
public class IndexNameExpressionResolver extends AbstractComponent {
|
||||
|
||||
private final ImmutableList<ExpressionResolver> expressionResolvers;
|
||||
private final List<ExpressionResolver> expressionResolvers;
|
||||
private final DateMathExpressionResolver dateMathExpressionResolver;
|
||||
|
||||
@Inject
|
||||
public IndexNameExpressionResolver(Settings settings) {
|
||||
super(settings);
|
||||
expressionResolvers = ImmutableList.of(
|
||||
expressionResolvers = Arrays.asList(
|
||||
dateMathExpressionResolver = new DateMathExpressionResolver(settings),
|
||||
new WildcardExpressionResolver()
|
||||
);
|
||||
|
|
|
@ -24,7 +24,6 @@ import com.carrotsearch.hppc.cursors.ObjectCursor;
|
|||
import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
|
||||
import com.google.common.base.Predicate;
|
||||
import com.google.common.collect.Collections2;
|
||||
import com.google.common.collect.ImmutableList;
|
||||
import com.google.common.collect.ImmutableSet;
|
||||
import com.google.common.collect.UnmodifiableIterator;
|
||||
import org.apache.lucene.util.CollectionUtil;
|
||||
|
@ -253,7 +252,7 @@ public class MetaData implements Iterable<IndexMetaData>, Diffable<MetaData>, Fr
|
|||
* @param concreteIndices The concrete indexes the index aliases must point to order to be returned.
|
||||
* @return the found index aliases grouped by index
|
||||
*/
|
||||
public ImmutableOpenMap<String, ImmutableList<AliasMetaData>> findAliases(final String[] aliases, String[] concreteIndices) {
|
||||
public ImmutableOpenMap<String, List<AliasMetaData>> findAliases(final String[] aliases, String[] concreteIndices) {
|
||||
assert aliases != null;
|
||||
assert concreteIndices != null;
|
||||
if (concreteIndices.length == 0) {
|
||||
|
@ -261,7 +260,7 @@ public class MetaData implements Iterable<IndexMetaData>, Diffable<MetaData>, Fr
|
|||
}
|
||||
|
||||
boolean matchAllAliases = matchAllAliases(aliases);
|
||||
ImmutableOpenMap.Builder<String, ImmutableList<AliasMetaData>> mapBuilder = ImmutableOpenMap.builder();
|
||||
ImmutableOpenMap.Builder<String, List<AliasMetaData>> mapBuilder = ImmutableOpenMap.builder();
|
||||
Iterable<String> intersection = HppcMaps.intersection(ObjectHashSet.from(concreteIndices), indices.keys());
|
||||
for (String index : intersection) {
|
||||
IndexMetaData indexMetaData = indices.get(index);
|
||||
|
@ -281,7 +280,7 @@ public class MetaData implements Iterable<IndexMetaData>, Diffable<MetaData>, Fr
|
|||
return o1.alias().compareTo(o2.alias());
|
||||
}
|
||||
});
|
||||
mapBuilder.put(index, ImmutableList.copyOf(filteredValues));
|
||||
mapBuilder.put(index, Collections.unmodifiableList(filteredValues));
|
||||
}
|
||||
}
|
||||
return mapBuilder.build();
|
||||
|
@ -364,7 +363,7 @@ public class MetaData implements Iterable<IndexMetaData>, Diffable<MetaData>, Fr
|
|||
return indexMapBuilder.build();
|
||||
}
|
||||
|
||||
public ImmutableOpenMap<String, ImmutableList<IndexWarmersMetaData.Entry>> findWarmers(String[] concreteIndices, final String[] types, final String[] uncheckedWarmers) {
|
||||
public ImmutableOpenMap<String, List<IndexWarmersMetaData.Entry>> findWarmers(String[] concreteIndices, final String[] types, final String[] uncheckedWarmers) {
|
||||
assert uncheckedWarmers != null;
|
||||
assert concreteIndices != null;
|
||||
if (concreteIndices.length == 0) {
|
||||
|
@ -373,7 +372,7 @@ public class MetaData implements Iterable<IndexMetaData>, Diffable<MetaData>, Fr
|
|||
// special _all check to behave the same like not specifying anything for the warmers (not for the indices)
|
||||
final String[] warmers = Strings.isAllOrWildcard(uncheckedWarmers) ? Strings.EMPTY_ARRAY : uncheckedWarmers;
|
||||
|
||||
ImmutableOpenMap.Builder<String, ImmutableList<IndexWarmersMetaData.Entry>> mapBuilder = ImmutableOpenMap.builder();
|
||||
ImmutableOpenMap.Builder<String, List<IndexWarmersMetaData.Entry>> mapBuilder = ImmutableOpenMap.builder();
|
||||
Iterable<String> intersection = HppcMaps.intersection(ObjectHashSet.from(concreteIndices), indices.keys());
|
||||
for (String index : intersection) {
|
||||
IndexMetaData indexMetaData = indices.get(index);
|
||||
|
@ -382,6 +381,7 @@ public class MetaData implements Iterable<IndexMetaData>, Diffable<MetaData>, Fr
|
|||
continue;
|
||||
}
|
||||
|
||||
// TODO: make this a List so we don't have to copy below
|
||||
Collection<IndexWarmersMetaData.Entry> filteredWarmers = Collections2.filter(indexWarmersMetaData.entries(), new Predicate<IndexWarmersMetaData.Entry>() {
|
||||
|
||||
@Override
|
||||
|
@ -399,7 +399,7 @@ public class MetaData implements Iterable<IndexMetaData>, Diffable<MetaData>, Fr
|
|||
|
||||
});
|
||||
if (!filteredWarmers.isEmpty()) {
|
||||
mapBuilder.put(index, ImmutableList.copyOf(filteredWarmers));
|
||||
mapBuilder.put(index, Collections.unmodifiableList(new ArrayList<>(filteredWarmers)));
|
||||
}
|
||||
}
|
||||
return mapBuilder.build();
|
||||
|
|
|
@ -19,7 +19,6 @@
|
|||
|
||||
package org.elasticsearch.cluster.metadata;
|
||||
|
||||
import com.google.common.collect.ImmutableList;
|
||||
import org.elasticsearch.ElasticsearchParseException;
|
||||
import org.elasticsearch.cluster.AbstractDiffable;
|
||||
import org.elasticsearch.cluster.metadata.MetaData.Custom;
|
||||
|
@ -33,9 +32,9 @@ import org.elasticsearch.common.xcontent.XContentParser;
|
|||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.EnumSet;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
/**
|
||||
* Contains metadata about registered snapshot repositories
|
||||
|
@ -46,7 +45,7 @@ public class RepositoriesMetaData extends AbstractDiffable<Custom> implements Me
|
|||
|
||||
public static final RepositoriesMetaData PROTO = new RepositoriesMetaData();
|
||||
|
||||
private final ImmutableList<RepositoryMetaData> repositories;
|
||||
private final List<RepositoryMetaData> repositories;
|
||||
|
||||
/**
|
||||
* Constructs new repository metadata
|
||||
|
@ -54,7 +53,7 @@ public class RepositoriesMetaData extends AbstractDiffable<Custom> implements Me
|
|||
* @param repositories list of repositories
|
||||
*/
|
||||
public RepositoriesMetaData(RepositoryMetaData... repositories) {
|
||||
this.repositories = ImmutableList.copyOf(repositories);
|
||||
this.repositories = Arrays.asList(repositories);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -62,7 +61,7 @@ public class RepositoriesMetaData extends AbstractDiffable<Custom> implements Me
|
|||
*
|
||||
* @return list of repositories
|
||||
*/
|
||||
public ImmutableList<RepositoryMetaData> repositories() {
|
||||
public List<RepositoryMetaData> repositories() {
|
||||
return this.repositories;
|
||||
}
|
||||
|
||||
|
|
|
@ -19,7 +19,6 @@
|
|||
|
||||
package org.elasticsearch.cluster.node;
|
||||
|
||||
import com.google.common.collect.ImmutableList;
|
||||
import com.google.common.collect.ImmutableMap;
|
||||
|
||||
import org.elasticsearch.Version;
|
||||
|
@ -35,6 +34,8 @@ import org.elasticsearch.common.xcontent.XContentBuilder;
|
|||
|
||||
import java.io.IOException;
|
||||
import java.net.InetAddress;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import static org.elasticsearch.common.transport.TransportAddressSerializers.addressToStream;
|
||||
|
@ -92,7 +93,7 @@ public class DiscoveryNode implements Streamable, ToXContent {
|
|||
return Booleans.isExplicitTrue(data);
|
||||
}
|
||||
|
||||
public static final ImmutableList<DiscoveryNode> EMPTY_LIST = ImmutableList.of();
|
||||
public static final List<DiscoveryNode> EMPTY_LIST = Collections.emptyList();
|
||||
|
||||
private String nodeName = "";
|
||||
private String nodeId;
|
||||
|
|
|
@ -22,7 +22,6 @@ package org.elasticsearch.cluster.node;
|
|||
import com.carrotsearch.hppc.ObjectHashSet;
|
||||
import com.carrotsearch.hppc.cursors.ObjectCursor;
|
||||
import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
|
||||
import com.google.common.collect.ImmutableList;
|
||||
import com.google.common.collect.UnmodifiableIterator;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.cluster.AbstractDiffable;
|
||||
|
@ -36,6 +35,7 @@ import org.elasticsearch.common.transport.TransportAddress;
|
|||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
@ -433,7 +433,7 @@ public class DiscoveryNodes extends AbstractDiffable<DiscoveryNodes> implements
|
|||
newMasterNode = masterNode();
|
||||
}
|
||||
}
|
||||
return new Delta(previousMasterNode, newMasterNode, localNodeId, ImmutableList.copyOf(removed), ImmutableList.copyOf(added));
|
||||
return new Delta(previousMasterNode, newMasterNode, localNodeId, Collections.unmodifiableList(removed), Collections.unmodifiableList(added));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -472,14 +472,14 @@ public class DiscoveryNodes extends AbstractDiffable<DiscoveryNodes> implements
|
|||
private final String localNodeId;
|
||||
private final DiscoveryNode previousMasterNode;
|
||||
private final DiscoveryNode newMasterNode;
|
||||
private final ImmutableList<DiscoveryNode> removed;
|
||||
private final ImmutableList<DiscoveryNode> added;
|
||||
private final List<DiscoveryNode> removed;
|
||||
private final List<DiscoveryNode> added;
|
||||
|
||||
public Delta(String localNodeId, ImmutableList<DiscoveryNode> removed, ImmutableList<DiscoveryNode> added) {
|
||||
public Delta(String localNodeId, List<DiscoveryNode> removed, List<DiscoveryNode> added) {
|
||||
this(null, null, localNodeId, removed, added);
|
||||
}
|
||||
|
||||
public Delta(@Nullable DiscoveryNode previousMasterNode, @Nullable DiscoveryNode newMasterNode, String localNodeId, ImmutableList<DiscoveryNode> removed, ImmutableList<DiscoveryNode> added) {
|
||||
public Delta(@Nullable DiscoveryNode previousMasterNode, @Nullable DiscoveryNode newMasterNode, String localNodeId, List<DiscoveryNode> removed, List<DiscoveryNode> added) {
|
||||
this.previousMasterNode = previousMasterNode;
|
||||
this.newMasterNode = newMasterNode;
|
||||
this.localNodeId = localNodeId;
|
||||
|
|
|
@ -22,7 +22,6 @@ package org.elasticsearch.cluster.routing;
|
|||
import com.carrotsearch.hppc.IntSet;
|
||||
import com.carrotsearch.hppc.cursors.IntCursor;
|
||||
import com.carrotsearch.hppc.cursors.IntObjectCursor;
|
||||
import com.google.common.collect.ImmutableList;
|
||||
import com.google.common.collect.Sets;
|
||||
import com.google.common.collect.UnmodifiableIterator;
|
||||
import org.apache.lucene.util.CollectionUtil;
|
||||
|
@ -36,6 +35,7 @@ import org.elasticsearch.index.shard.ShardId;
|
|||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.Comparator;
|
||||
import java.util.List;
|
||||
import java.util.Set;
|
||||
|
@ -73,7 +73,7 @@ public class IndexRoutingTable extends AbstractDiffable<IndexRoutingTable> imple
|
|||
this.index = index;
|
||||
this.shuffler = new RotationShardShuffler(ThreadLocalRandom.current().nextInt());
|
||||
this.shards = shards;
|
||||
ImmutableList.Builder<ShardRouting> allActiveShards = ImmutableList.builder();
|
||||
List<ShardRouting> allActiveShards = new ArrayList<>();
|
||||
for (IntObjectCursor<IndexShardRoutingTable> cursor : shards) {
|
||||
for (ShardRouting shardRouting : cursor.value) {
|
||||
shardRouting.freeze();
|
||||
|
@ -82,7 +82,7 @@ public class IndexRoutingTable extends AbstractDiffable<IndexRoutingTable> imple
|
|||
}
|
||||
}
|
||||
}
|
||||
this.allActiveShards = allActiveShards.build();
|
||||
this.allActiveShards = Collections.unmodifiableList(allActiveShards);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -19,7 +19,6 @@
|
|||
|
||||
package org.elasticsearch.cluster.routing;
|
||||
|
||||
import com.google.common.collect.ImmutableList;
|
||||
import com.google.common.collect.ImmutableMap;
|
||||
import com.google.common.collect.Sets;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
|
@ -32,6 +31,7 @@ import org.elasticsearch.index.shard.ShardId;
|
|||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.Iterator;
|
||||
import java.util.LinkedList;
|
||||
import java.util.List;
|
||||
|
@ -52,30 +52,30 @@ public class IndexShardRoutingTable implements Iterable<ShardRouting> {
|
|||
final ShardId shardId;
|
||||
|
||||
final ShardRouting primary;
|
||||
final ImmutableList<ShardRouting> primaryAsList;
|
||||
final ImmutableList<ShardRouting> replicas;
|
||||
final ImmutableList<ShardRouting> shards;
|
||||
final ImmutableList<ShardRouting> activeShards;
|
||||
final ImmutableList<ShardRouting> assignedShards;
|
||||
final static ImmutableList<ShardRouting> NO_SHARDS = ImmutableList.of();
|
||||
final List<ShardRouting> primaryAsList;
|
||||
final List<ShardRouting> replicas;
|
||||
final List<ShardRouting> shards;
|
||||
final List<ShardRouting> activeShards;
|
||||
final List<ShardRouting> assignedShards;
|
||||
final static List<ShardRouting> NO_SHARDS = Collections.emptyList();
|
||||
final boolean allShardsStarted;
|
||||
|
||||
/**
|
||||
* The initializing list, including ones that are initializing on a target node because of relocation.
|
||||
* If we can come up with a better variable name, it would be nice...
|
||||
*/
|
||||
final ImmutableList<ShardRouting> allInitializingShards;
|
||||
final List<ShardRouting> allInitializingShards;
|
||||
|
||||
IndexShardRoutingTable(ShardId shardId, List<ShardRouting> shards) {
|
||||
this.shardId = shardId;
|
||||
this.shuffler = new RotationShardShuffler(ThreadLocalRandom.current().nextInt());
|
||||
this.shards = ImmutableList.copyOf(shards);
|
||||
this.shards = Collections.unmodifiableList(shards);
|
||||
|
||||
ShardRouting primary = null;
|
||||
ImmutableList.Builder<ShardRouting> replicas = ImmutableList.builder();
|
||||
ImmutableList.Builder<ShardRouting> activeShards = ImmutableList.builder();
|
||||
ImmutableList.Builder<ShardRouting> assignedShards = ImmutableList.builder();
|
||||
ImmutableList.Builder<ShardRouting> allInitializingShards = ImmutableList.builder();
|
||||
List<ShardRouting> replicas = new ArrayList<>();
|
||||
List<ShardRouting> activeShards = new ArrayList<>();
|
||||
List<ShardRouting> assignedShards = new ArrayList<>();
|
||||
List<ShardRouting> allInitializingShards = new ArrayList<>();
|
||||
boolean allShardsStarted = true;
|
||||
for (ShardRouting shard : shards) {
|
||||
if (shard.primary()) {
|
||||
|
@ -104,14 +104,14 @@ public class IndexShardRoutingTable implements Iterable<ShardRouting> {
|
|||
|
||||
this.primary = primary;
|
||||
if (primary != null) {
|
||||
this.primaryAsList = ImmutableList.of(primary);
|
||||
this.primaryAsList = Collections.singletonList(primary);
|
||||
} else {
|
||||
this.primaryAsList = ImmutableList.of();
|
||||
this.primaryAsList = Collections.emptyList();
|
||||
}
|
||||
this.replicas = replicas.build();
|
||||
this.activeShards = activeShards.build();
|
||||
this.assignedShards = assignedShards.build();
|
||||
this.allInitializingShards = allInitializingShards.build();
|
||||
this.replicas = Collections.unmodifiableList(replicas);
|
||||
this.activeShards = Collections.unmodifiableList(activeShards);
|
||||
this.assignedShards = Collections.unmodifiableList(assignedShards);
|
||||
this.allInitializingShards = Collections.unmodifiableList(allInitializingShards);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -145,7 +145,7 @@ public class IndexShardRoutingTable implements Iterable<ShardRouting> {
|
|||
shardRoutings.add(new ShardRouting(shards.get(i), highestVersion));
|
||||
}
|
||||
}
|
||||
return new IndexShardRoutingTable(shardId, ImmutableList.copyOf(shardRoutings));
|
||||
return new IndexShardRoutingTable(shardId, Collections.unmodifiableList(shardRoutings));
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -468,11 +468,11 @@ public class IndexShardRoutingTable implements Iterable<ShardRouting> {
|
|||
|
||||
static class AttributesRoutings {
|
||||
|
||||
public final ImmutableList<ShardRouting> withSameAttribute;
|
||||
public final ImmutableList<ShardRouting> withoutSameAttribute;
|
||||
public final List<ShardRouting> withSameAttribute;
|
||||
public final List<ShardRouting> withoutSameAttribute;
|
||||
public final int totalSize;
|
||||
|
||||
AttributesRoutings(ImmutableList<ShardRouting> withSameAttribute, ImmutableList<ShardRouting> withoutSameAttribute) {
|
||||
AttributesRoutings(List<ShardRouting> withSameAttribute, List<ShardRouting> withoutSameAttribute) {
|
||||
this.withSameAttribute = withSameAttribute;
|
||||
this.withoutSameAttribute = withoutSameAttribute;
|
||||
this.totalSize = withoutSameAttribute.size() + withSameAttribute.size();
|
||||
|
@ -488,9 +488,9 @@ public class IndexShardRoutingTable implements Iterable<ShardRouting> {
|
|||
if (shardRoutings == null) {
|
||||
synchronized (shardsByAttributeMutex) {
|
||||
ArrayList<ShardRouting> from = new ArrayList<>(activeShards);
|
||||
ImmutableList<ShardRouting> to = collectAttributeShards(key, nodes, from);
|
||||
List<ShardRouting> to = collectAttributeShards(key, nodes, from);
|
||||
|
||||
shardRoutings = new AttributesRoutings(to, ImmutableList.copyOf(from));
|
||||
shardRoutings = new AttributesRoutings(to, Collections.unmodifiableList(from));
|
||||
activeShardsByAttributes = MapBuilder.newMapBuilder(activeShardsByAttributes).put(key, shardRoutings).immutableMap();
|
||||
}
|
||||
}
|
||||
|
@ -502,15 +502,15 @@ public class IndexShardRoutingTable implements Iterable<ShardRouting> {
|
|||
if (shardRoutings == null) {
|
||||
synchronized (shardsByAttributeMutex) {
|
||||
ArrayList<ShardRouting> from = new ArrayList<>(allInitializingShards);
|
||||
ImmutableList<ShardRouting> to = collectAttributeShards(key, nodes, from);
|
||||
shardRoutings = new AttributesRoutings(to, ImmutableList.copyOf(from));
|
||||
List<ShardRouting> to = collectAttributeShards(key, nodes, from);
|
||||
shardRoutings = new AttributesRoutings(to, Collections.unmodifiableList(from));
|
||||
initializingShardsByAttributes = MapBuilder.newMapBuilder(initializingShardsByAttributes).put(key, shardRoutings).immutableMap();
|
||||
}
|
||||
}
|
||||
return shardRoutings;
|
||||
}
|
||||
|
||||
private static ImmutableList<ShardRouting> collectAttributeShards(AttributesKey key, DiscoveryNodes nodes, ArrayList<ShardRouting> from) {
|
||||
private static List<ShardRouting> collectAttributeShards(AttributesKey key, DiscoveryNodes nodes, ArrayList<ShardRouting> from) {
|
||||
final ArrayList<ShardRouting> to = new ArrayList<>();
|
||||
for (final String attribute : key.attributes) {
|
||||
final String localAttributeValue = nodes.localNode().attributes().get(attribute);
|
||||
|
@ -527,7 +527,7 @@ public class IndexShardRoutingTable implements Iterable<ShardRouting> {
|
|||
}
|
||||
}
|
||||
}
|
||||
return ImmutableList.copyOf(to);
|
||||
return Collections.unmodifiableList(to);
|
||||
}
|
||||
|
||||
public ShardIterator preferAttributesActiveInitializingShardsIt(String[] attributes, DiscoveryNodes nodes) {
|
||||
|
@ -616,7 +616,7 @@ public class IndexShardRoutingTable implements Iterable<ShardRouting> {
|
|||
}
|
||||
|
||||
public IndexShardRoutingTable build() {
|
||||
return new IndexShardRoutingTable(shardId, ImmutableList.copyOf(shards));
|
||||
return new IndexShardRoutingTable(shardId, Collections.unmodifiableList(new ArrayList<>(shards)));
|
||||
}
|
||||
|
||||
public static IndexShardRoutingTable readFrom(StreamInput in) throws IOException {
|
||||
|
|
|
@ -19,7 +19,6 @@
|
|||
|
||||
package org.elasticsearch.cluster.routing;
|
||||
|
||||
import com.google.common.collect.ImmutableList;
|
||||
import com.google.common.collect.ImmutableMap;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
|
@ -27,6 +26,7 @@ import org.elasticsearch.common.io.stream.Streamable;
|
|||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
|
@ -53,7 +53,7 @@ public class RoutingTableValidation implements Streamable {
|
|||
|
||||
public List<String> allFailures() {
|
||||
if (failures().isEmpty() && indicesFailures().isEmpty()) {
|
||||
return ImmutableList.of();
|
||||
return Collections.emptyList();
|
||||
}
|
||||
List<String> allFailures = new ArrayList<>(failures());
|
||||
for (Map.Entry<String, List<String>> entry : indicesFailures().entrySet()) {
|
||||
|
@ -66,7 +66,7 @@ public class RoutingTableValidation implements Streamable {
|
|||
|
||||
public List<String> failures() {
|
||||
if (failures == null) {
|
||||
return ImmutableList.of();
|
||||
return Collections.emptyList();
|
||||
}
|
||||
return failures;
|
||||
}
|
||||
|
@ -80,11 +80,11 @@ public class RoutingTableValidation implements Streamable {
|
|||
|
||||
public List<String> indexFailures(String index) {
|
||||
if (indicesFailures == null) {
|
||||
return ImmutableList.of();
|
||||
return Collections.emptyList();
|
||||
}
|
||||
List<String> indexFailures = indicesFailures.get(index);
|
||||
if (indexFailures == null) {
|
||||
return ImmutableList.of();
|
||||
return Collections.emptyList();
|
||||
}
|
||||
return indexFailures;
|
||||
}
|
||||
|
@ -120,7 +120,7 @@ public class RoutingTableValidation implements Streamable {
|
|||
valid = in.readBoolean();
|
||||
int size = in.readVInt();
|
||||
if (size == 0) {
|
||||
failures = ImmutableList.of();
|
||||
failures = Collections.emptyList();
|
||||
} else {
|
||||
failures = new ArrayList<>(size);
|
||||
for (int i = 0; i < size; i++) {
|
||||
|
|
|
@ -20,7 +20,6 @@
|
|||
package org.elasticsearch.cluster.routing.allocation;
|
||||
|
||||
import com.carrotsearch.hppc.cursors.ObjectCursor;
|
||||
import com.google.common.collect.ImmutableList;
|
||||
import org.elasticsearch.cluster.ClusterInfoService;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
|
@ -40,6 +39,7 @@ import org.elasticsearch.common.inject.Inject;
|
|||
import org.elasticsearch.common.settings.Settings;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
|
||||
|
||||
|
@ -89,7 +89,7 @@ public class AllocationService extends AbstractComponent {
|
|||
}
|
||||
|
||||
public RoutingAllocation.Result applyFailedShard(ClusterState clusterState, ShardRouting failedShard) {
|
||||
return applyFailedShards(clusterState, ImmutableList.of(new FailedRerouteAllocation.FailedShard(failedShard, null, null)));
|
||||
return applyFailedShards(clusterState, Collections.singletonList(new FailedRerouteAllocation.FailedShard(failedShard, null, null)));
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -335,15 +335,16 @@ public class DiskThresholdDecider extends AllocationDecider {
|
|||
|
||||
@Override
|
||||
public Decision canAllocate(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) {
|
||||
final Decision decision = earlyTerminate(allocation);
|
||||
ClusterInfo clusterInfo = allocation.clusterInfo();
|
||||
Map<String, DiskUsage> usages = clusterInfo.getNodeMostAvailableDiskUsages();
|
||||
final Decision decision = earlyTerminate(allocation, usages);
|
||||
if (decision != null) {
|
||||
return decision;
|
||||
}
|
||||
|
||||
final double usedDiskThresholdLow = 100.0 - DiskThresholdDecider.this.freeDiskThresholdLow;
|
||||
final double usedDiskThresholdHigh = 100.0 - DiskThresholdDecider.this.freeDiskThresholdHigh;
|
||||
ClusterInfo clusterInfo = allocation.clusterInfo();
|
||||
Map<String, DiskUsage> usages = clusterInfo.getNodeMostAvailableDiskUsages();
|
||||
|
||||
DiskUsage usage = getDiskUsage(node, allocation, usages);
|
||||
// First, check that the node currently over the low watermark
|
||||
double freeDiskPercentage = usage.getFreeDiskAsPercentage();
|
||||
|
@ -449,12 +450,13 @@ public class DiskThresholdDecider extends AllocationDecider {
|
|||
if (shardRouting.currentNodeId().equals(node.nodeId()) == false) {
|
||||
throw new IllegalArgumentException("Shard [" + shardRouting + "] is not allocated on node: [" + node.nodeId() + "]");
|
||||
}
|
||||
final Decision decision = earlyTerminate(allocation);
|
||||
final ClusterInfo clusterInfo = allocation.clusterInfo();
|
||||
final Map<String, DiskUsage> usages = clusterInfo.getNodeLeastAvailableDiskUsages();
|
||||
final Decision decision = earlyTerminate(allocation, usages);
|
||||
if (decision != null) {
|
||||
return decision;
|
||||
}
|
||||
final ClusterInfo clusterInfo = allocation.clusterInfo();
|
||||
final Map<String, DiskUsage> usages = clusterInfo.getNodeLeastAvailableDiskUsages();
|
||||
|
||||
final DiskUsage usage = getDiskUsage(node, allocation, usages);
|
||||
final String dataPath = clusterInfo.getDataPath(shardRouting);
|
||||
// If this node is already above the high threshold, the shard cannot remain (get it off!)
|
||||
|
@ -590,7 +592,7 @@ public class DiskThresholdDecider extends AllocationDecider {
|
|||
}
|
||||
}
|
||||
|
||||
private Decision earlyTerminate(RoutingAllocation allocation) {
|
||||
private Decision earlyTerminate(RoutingAllocation allocation, final Map<String, DiskUsage> usages) {
|
||||
// Always allow allocation if the decider is disabled
|
||||
if (!enabled) {
|
||||
return allocation.decision(Decision.YES, NAME, "disk threshold decider disabled");
|
||||
|
@ -613,7 +615,6 @@ public class DiskThresholdDecider extends AllocationDecider {
|
|||
return allocation.decision(Decision.YES, NAME, "cluster info unavailable");
|
||||
}
|
||||
|
||||
final Map<String, DiskUsage> usages = clusterInfo.getNodeLeastAvailableDiskUsages();
|
||||
// Fail open if there are no disk usages available
|
||||
if (usages.isEmpty()) {
|
||||
if (logger.isTraceEnabled()) {
|
||||
|
|
|
@ -0,0 +1,112 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.common;
|
||||
|
||||
import com.carrotsearch.hppc.ObjectObjectAssociativeContainer;
|
||||
|
||||
import org.elasticsearch.common.collect.ImmutableOpenMap;
|
||||
|
||||
import java.util.Set;
|
||||
|
||||
public class DelegatingHasContextAndHeaders implements HasContextAndHeaders {
|
||||
|
||||
private HasContextAndHeaders delegate;
|
||||
|
||||
public DelegatingHasContextAndHeaders(HasContextAndHeaders delegate) {
|
||||
this.delegate = delegate;
|
||||
}
|
||||
|
||||
@Override
|
||||
public <V> void putHeader(String key, V value) {
|
||||
delegate.putHeader(key, value);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void copyContextAndHeadersFrom(HasContextAndHeaders other) {
|
||||
delegate.copyContextAndHeadersFrom(other);
|
||||
}
|
||||
|
||||
@Override
|
||||
public <V> V getHeader(String key) {
|
||||
return delegate.getHeader(key);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean hasHeader(String key) {
|
||||
return delegate.hasHeader(key);
|
||||
}
|
||||
|
||||
@Override
|
||||
public <V> V putInContext(Object key, Object value) {
|
||||
return delegate.putInContext(key, value);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Set<String> getHeaders() {
|
||||
return delegate.getHeaders();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void copyHeadersFrom(HasHeaders from) {
|
||||
delegate.copyHeadersFrom(from);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void putAllInContext(ObjectObjectAssociativeContainer<Object, Object> map) {
|
||||
delegate.putAllInContext(map);
|
||||
}
|
||||
|
||||
@Override
|
||||
public <V> V getFromContext(Object key) {
|
||||
return delegate.getFromContext(key);
|
||||
}
|
||||
|
||||
@Override
|
||||
public <V> V getFromContext(Object key, V defaultValue) {
|
||||
return delegate.getFromContext(key, defaultValue);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean hasInContext(Object key) {
|
||||
return delegate.hasInContext(key);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int contextSize() {
|
||||
return delegate.contextSize();
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isContextEmpty() {
|
||||
return delegate.isContextEmpty();
|
||||
}
|
||||
|
||||
@Override
|
||||
public ImmutableOpenMap<Object, Object> getContext() {
|
||||
return delegate.getContext();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void copyContextFrom(HasContext other) {
|
||||
delegate.copyContextFrom(other);
|
||||
}
|
||||
|
||||
|
||||
}
|
|
@ -19,26 +19,28 @@
|
|||
|
||||
package org.elasticsearch.common.blobstore;
|
||||
|
||||
import com.google.common.collect.ImmutableList;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
*
|
||||
*/
|
||||
public class BlobPath implements Iterable<String> {
|
||||
|
||||
private final ImmutableList<String> paths;
|
||||
private final List<String> paths;
|
||||
|
||||
public BlobPath() {
|
||||
this.paths = ImmutableList.of();
|
||||
this.paths = Collections.emptyList();
|
||||
}
|
||||
|
||||
public static BlobPath cleanPath() {
|
||||
return new BlobPath();
|
||||
}
|
||||
|
||||
private BlobPath(ImmutableList<String> paths) {
|
||||
private BlobPath(List<String> paths) {
|
||||
this.paths = paths;
|
||||
}
|
||||
|
||||
|
@ -52,8 +54,10 @@ public class BlobPath implements Iterable<String> {
|
|||
}
|
||||
|
||||
public BlobPath add(String path) {
|
||||
ImmutableList.Builder<String> builder = ImmutableList.builder();
|
||||
return new BlobPath(builder.addAll(paths).add(path).build());
|
||||
List<String> paths = new ArrayList<>();
|
||||
paths.addAll(this.paths);
|
||||
paths.add(path);
|
||||
return new BlobPath(Collections.unmodifiableList(paths));
|
||||
}
|
||||
|
||||
public String buildAsString(String separator) {
|
||||
|
|
|
@ -16,13 +16,13 @@
|
|||
|
||||
package org.elasticsearch.common.inject;
|
||||
|
||||
import com.google.common.collect.ImmutableList;
|
||||
import org.elasticsearch.common.inject.internal.Errors;
|
||||
import org.elasticsearch.common.inject.spi.InjectionListener;
|
||||
import org.elasticsearch.common.inject.spi.Message;
|
||||
import org.elasticsearch.common.inject.spi.TypeEncounter;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
|
||||
import static com.google.common.base.Preconditions.checkState;
|
||||
|
@ -47,16 +47,16 @@ final class EncounterImpl<T> implements TypeEncounter<T> {
|
|||
valid = false;
|
||||
}
|
||||
|
||||
public ImmutableList<MembersInjector<? super T>> getMembersInjectors() {
|
||||
public List<MembersInjector<? super T>> getMembersInjectors() {
|
||||
return membersInjectors == null
|
||||
? ImmutableList.<MembersInjector<? super T>>of()
|
||||
: ImmutableList.copyOf(membersInjectors);
|
||||
? Collections.<MembersInjector<? super T>>emptyList()
|
||||
: Collections.unmodifiableList(membersInjectors);
|
||||
}
|
||||
|
||||
public ImmutableList<InjectionListener<? super T>> getInjectionListeners() {
|
||||
public List<InjectionListener<? super T>> getInjectionListeners() {
|
||||
return injectionListeners == null
|
||||
? ImmutableList.<InjectionListener<? super T>>of()
|
||||
: ImmutableList.copyOf(injectionListeners);
|
||||
? Collections.<InjectionListener<? super T>>emptyList()
|
||||
: Collections.unmodifiableList(injectionListeners);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -16,7 +16,6 @@
|
|||
|
||||
package org.elasticsearch.common.inject;
|
||||
|
||||
import com.google.common.collect.ImmutableList;
|
||||
import org.elasticsearch.common.inject.internal.Errors;
|
||||
import org.elasticsearch.common.inject.internal.ErrorsException;
|
||||
import org.elasticsearch.common.inject.internal.InternalContext;
|
||||
|
@ -85,7 +84,7 @@ class InjectionRequestProcessor extends AbstractProcessor {
|
|||
final InjectorImpl injector;
|
||||
final Object source;
|
||||
final StaticInjectionRequest request;
|
||||
ImmutableList<SingleMemberInjector> memberInjectors;
|
||||
List<SingleMemberInjector> memberInjectors;
|
||||
|
||||
public StaticInjection(InjectorImpl injector, StaticInjectionRequest request) {
|
||||
this.injector = injector;
|
||||
|
|
|
@ -16,7 +16,6 @@
|
|||
|
||||
package org.elasticsearch.common.inject;
|
||||
|
||||
import com.google.common.collect.ImmutableList;
|
||||
import com.google.common.collect.ImmutableSet;
|
||||
import com.google.common.collect.Maps;
|
||||
import org.elasticsearch.common.Classes;
|
||||
|
@ -49,6 +48,7 @@ import java.lang.reflect.Modifier;
|
|||
import java.lang.reflect.ParameterizedType;
|
||||
import java.lang.reflect.Type;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
@ -164,7 +164,7 @@ class InjectorImpl implements Injector, Lookups {
|
|||
|
||||
@Override
|
||||
public Injector createChildInjector(Module... modules) {
|
||||
return createChildInjector(ImmutableList.copyOf(modules));
|
||||
return createChildInjector(Arrays.asList(modules));
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -716,7 +716,7 @@ class InjectorImpl implements Injector, Lookups {
|
|||
List<Binding<?>> bindings = multimap.get(type);
|
||||
return bindings != null
|
||||
? Collections.<Binding<T>>unmodifiableList((List) multimap.get(type))
|
||||
: ImmutableList.<Binding<T>>of();
|
||||
: Collections.<Binding<T>>emptyList();
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -16,7 +16,6 @@
|
|||
|
||||
package org.elasticsearch.common.inject;
|
||||
|
||||
import com.google.common.collect.ImmutableList;
|
||||
import com.google.common.collect.ImmutableSet;
|
||||
import org.elasticsearch.common.inject.internal.Errors;
|
||||
import org.elasticsearch.common.inject.internal.ErrorsException;
|
||||
|
@ -24,6 +23,8 @@ import org.elasticsearch.common.inject.internal.InternalContext;
|
|||
import org.elasticsearch.common.inject.spi.InjectionListener;
|
||||
import org.elasticsearch.common.inject.spi.InjectionPoint;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
* Injects members of instances of a given type.
|
||||
*
|
||||
|
@ -32,12 +33,12 @@ import org.elasticsearch.common.inject.spi.InjectionPoint;
|
|||
class MembersInjectorImpl<T> implements MembersInjector<T> {
|
||||
private final TypeLiteral<T> typeLiteral;
|
||||
private final InjectorImpl injector;
|
||||
private final ImmutableList<SingleMemberInjector> memberInjectors;
|
||||
private final ImmutableList<MembersInjector<? super T>> userMembersInjectors;
|
||||
private final ImmutableList<InjectionListener<? super T>> injectionListeners;
|
||||
private final List<SingleMemberInjector> memberInjectors;
|
||||
private final List<MembersInjector<? super T>> userMembersInjectors;
|
||||
private final List<InjectionListener<? super T>> injectionListeners;
|
||||
|
||||
MembersInjectorImpl(InjectorImpl injector, TypeLiteral<T> typeLiteral,
|
||||
EncounterImpl<T> encounter, ImmutableList<SingleMemberInjector> memberInjectors) {
|
||||
EncounterImpl<T> encounter, List<SingleMemberInjector> memberInjectors) {
|
||||
this.injector = injector;
|
||||
this.typeLiteral = typeLiteral;
|
||||
this.memberInjectors = memberInjectors;
|
||||
|
@ -45,7 +46,7 @@ class MembersInjectorImpl<T> implements MembersInjector<T> {
|
|||
this.injectionListeners = encounter.getInjectionListeners();
|
||||
}
|
||||
|
||||
public ImmutableList<SingleMemberInjector> getMemberInjectors() {
|
||||
public List<SingleMemberInjector> getMemberInjectors() {
|
||||
return memberInjectors;
|
||||
}
|
||||
|
||||
|
|
|
@ -16,7 +16,6 @@
|
|||
|
||||
package org.elasticsearch.common.inject;
|
||||
|
||||
import com.google.common.collect.ImmutableList;
|
||||
import org.elasticsearch.common.inject.internal.Errors;
|
||||
import org.elasticsearch.common.inject.internal.ErrorsException;
|
||||
import org.elasticsearch.common.inject.internal.FailableCache;
|
||||
|
@ -25,6 +24,7 @@ import org.elasticsearch.common.inject.spi.TypeListenerBinding;
|
|||
|
||||
import java.lang.reflect.Field;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.Set;
|
||||
|
||||
|
@ -35,7 +35,7 @@ import java.util.Set;
|
|||
*/
|
||||
class MembersInjectorStore {
|
||||
private final InjectorImpl injector;
|
||||
private final ImmutableList<TypeListenerBinding> typeListenerBindings;
|
||||
private final List<TypeListenerBinding> typeListenerBindings;
|
||||
|
||||
private final FailableCache<TypeLiteral<?>, MembersInjectorImpl<?>> cache
|
||||
= new FailableCache<TypeLiteral<?>, MembersInjectorImpl<?>>() {
|
||||
|
@ -49,7 +49,7 @@ class MembersInjectorStore {
|
|||
MembersInjectorStore(InjectorImpl injector,
|
||||
List<TypeListenerBinding> typeListenerBindings) {
|
||||
this.injector = injector;
|
||||
this.typeListenerBindings = ImmutableList.copyOf(typeListenerBindings);
|
||||
this.typeListenerBindings = Collections.unmodifiableList(typeListenerBindings);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -82,7 +82,7 @@ class MembersInjectorStore {
|
|||
errors.merge(e.getErrorMessages());
|
||||
injectionPoints = e.getPartialValue();
|
||||
}
|
||||
ImmutableList<SingleMemberInjector> injectors = getInjectors(injectionPoints, errors);
|
||||
List<SingleMemberInjector> injectors = getInjectors(injectionPoints, errors);
|
||||
errors.throwIfNewErrors(numErrorsBefore);
|
||||
|
||||
EncounterImpl<T> encounter = new EncounterImpl<>(errors, injector.lookups);
|
||||
|
@ -104,7 +104,7 @@ class MembersInjectorStore {
|
|||
/**
|
||||
* Returns the injectors for the specified injection points.
|
||||
*/
|
||||
ImmutableList<SingleMemberInjector> getInjectors(
|
||||
List<SingleMemberInjector> getInjectors(
|
||||
Set<InjectionPoint> injectionPoints, Errors errors) {
|
||||
List<SingleMemberInjector> injectors = new ArrayList<>();
|
||||
for (InjectionPoint injectionPoint : injectionPoints) {
|
||||
|
@ -120,6 +120,6 @@ class MembersInjectorStore {
|
|||
// ignored for now
|
||||
}
|
||||
}
|
||||
return ImmutableList.copyOf(injectors);
|
||||
return Collections.unmodifiableList(injectors);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -16,12 +16,12 @@
|
|||
|
||||
package org.elasticsearch.common.inject;
|
||||
|
||||
import com.google.common.collect.ImmutableList;
|
||||
import com.google.common.collect.ImmutableSet;
|
||||
import org.elasticsearch.common.inject.internal.Errors;
|
||||
import org.elasticsearch.common.inject.spi.Message;
|
||||
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
|
||||
import static com.google.common.base.Preconditions.checkArgument;
|
||||
|
||||
|
@ -47,7 +47,7 @@ public final class ProvisionException extends RuntimeException {
|
|||
|
||||
public ProvisionException(String message, Throwable cause) {
|
||||
super(cause);
|
||||
this.messages = ImmutableSet.of(new Message(ImmutableList.of(), message, cause));
|
||||
this.messages = ImmutableSet.of(new Message(Collections.emptyList(), message, cause));
|
||||
}
|
||||
|
||||
public ProvisionException(String message) {
|
||||
|
|
|
@ -16,7 +16,6 @@
|
|||
|
||||
package org.elasticsearch.common.inject;
|
||||
|
||||
import com.google.common.collect.ImmutableList;
|
||||
import com.google.common.collect.ImmutableSet;
|
||||
import org.elasticsearch.common.inject.internal.BindingImpl;
|
||||
import org.elasticsearch.common.inject.internal.Errors;
|
||||
|
@ -24,6 +23,7 @@ import org.elasticsearch.common.inject.internal.MatcherAndConverter;
|
|||
import org.elasticsearch.common.inject.spi.TypeListenerBinding;
|
||||
|
||||
import java.lang.annotation.Annotation;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
|
@ -89,7 +89,7 @@ interface State {
|
|||
|
||||
@Override
|
||||
public List<TypeListenerBinding> getTypeListenerBindings() {
|
||||
return ImmutableList.of();
|
||||
return Collections.emptyList();
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue