Merge branch 'master' into feature/multi_cluster_search
This commit is contained in:
commit
52b35bf0f8
|
@ -286,6 +286,9 @@ class ClusterFormationTasks {
|
||||||
esConfig['node.max_local_storage_nodes'] = node.config.numNodes
|
esConfig['node.max_local_storage_nodes'] = node.config.numNodes
|
||||||
esConfig['http.port'] = node.config.httpPort
|
esConfig['http.port'] = node.config.httpPort
|
||||||
esConfig['transport.tcp.port'] = node.config.transportPort
|
esConfig['transport.tcp.port'] = node.config.transportPort
|
||||||
|
// Default the watermarks to absurdly low to prevent the tests from failing on nodes without enough disk space
|
||||||
|
esConfig['cluster.routing.allocation.disk.watermark.low'] = '1b'
|
||||||
|
esConfig['cluster.routing.allocation.disk.watermark.high'] = '1b'
|
||||||
esConfig.putAll(node.config.settings)
|
esConfig.putAll(node.config.settings)
|
||||||
|
|
||||||
Task writeConfig = project.tasks.create(name: name, type: DefaultTask, dependsOn: setup)
|
Task writeConfig = project.tasks.create(name: name, type: DefaultTask, dependsOn: setup)
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
elasticsearch = 6.0.0-alpha1
|
elasticsearch = 6.0.0-alpha1
|
||||||
lucene = 6.3.0
|
lucene = 6.4.0-snapshot-ec38570
|
||||||
|
|
||||||
# optional dependencies
|
# optional dependencies
|
||||||
spatial4j = 0.6
|
spatial4j = 0.6
|
||||||
|
|
|
@ -46,7 +46,7 @@ public class TransportNoopBulkAction extends HandledTransportAction<BulkRequest,
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected void doExecute(BulkRequest request, ActionListener<BulkResponse> listener) {
|
protected void doExecute(BulkRequest request, ActionListener<BulkResponse> listener) {
|
||||||
final int itemCount = request.subRequests().size();
|
final int itemCount = request.requests().size();
|
||||||
// simulate at least a realistic amount of data that gets serialized
|
// simulate at least a realistic amount of data that gets serialized
|
||||||
BulkItemResponse[] bulkItemResponses = new BulkItemResponse[itemCount];
|
BulkItemResponse[] bulkItemResponses = new BulkItemResponse[itemCount];
|
||||||
for (int idx = 0; idx < itemCount; idx++) {
|
for (int idx = 0; idx < itemCount; idx++) {
|
||||||
|
|
|
@ -1 +1 @@
|
||||||
8b5057f74ea378c0150a1860874a3ebdcb713767
|
8b5057f74ea378c0150a1860874a3ebdcb713767
|
|
@ -1 +1 @@
|
||||||
3ccfb9b60f04d71add996a666ceb8902904fd805
|
3ccfb9b60f04d71add996a666ceb8902904fd805
|
|
@ -1 +0,0 @@
|
||||||
494aed699af238c3872a6b65e17939e9cb7ddbe0
|
|
|
@ -0,0 +1 @@
|
||||||
|
770114e0188dd8b4f30e5878b4f6c8677cecf1be
|
|
@ -1 +0,0 @@
|
||||||
77dede7dff1b833ca2e92d8ab137edb209354d9b
|
|
|
@ -0,0 +1 @@
|
||||||
|
f4eb0257e8419beaa9f84da6a51375fda4e491f2
|
|
@ -1 +0,0 @@
|
||||||
d3c87ea89e2f83e401f9cc7f14e4c43945f7f1e1
|
|
|
@ -0,0 +1 @@
|
||||||
|
c80ad16cd36c41012abb8a8bb1c7328c6d680b4a
|
|
@ -1 +0,0 @@
|
||||||
2c96d59e318ea66838aeb9c5cfb8b4d27b40953c
|
|
|
@ -0,0 +1 @@
|
||||||
|
070d4e370f4fe0b8a04b2bce5b4381201b0c783f
|
|
@ -1 +0,0 @@
|
||||||
4f154d8badfe47fe45503c18fb30f2177f758794
|
|
|
@ -0,0 +1 @@
|
||||||
|
131d9a86f5943675493a85def0e692842f396458
|
|
@ -1 +0,0 @@
|
||||||
79b898117dcfde2981ec6806e420ff218842eca8
|
|
|
@ -0,0 +1 @@
|
||||||
|
385b2202036b50a764e4d2b032e21496b74a1c8e
|
|
@ -1 +0,0 @@
|
||||||
89edeb404e507d640cb13903acff6953199704a2
|
|
|
@ -0,0 +1 @@
|
||||||
|
e8742a44ef4849a17d5e59ef36e9a52a8f2370c2
|
|
@ -1 +0,0 @@
|
||||||
02d0e1f5a9df15ac911ad495bad5ea253ab50a9f
|
|
|
@ -0,0 +1 @@
|
||||||
|
7ce2e4948fb66393a34f4200a6131cfde43e47bd
|
|
@ -1 +0,0 @@
|
||||||
eb7938233c8103223069c7b5b5f785b4d20ddafa
|
|
|
@ -0,0 +1 @@
|
||||||
|
6c1c385a597ce797b0049d9b2281b09593e1488a
|
|
@ -1 +0,0 @@
|
||||||
e979fb02155cbe81a8d335d6dc41d2ef06be68b6
|
|
|
@ -0,0 +1 @@
|
||||||
|
fafaa22906c067e6894f9f2b18ad03ded98e2f38
|
|
@ -1 +0,0 @@
|
||||||
257387c45c6fa2b77fd6931751f93fdcd798ced4
|
|
|
@ -0,0 +1 @@
|
||||||
|
19c64a84617f42bb4c11b1e266df4009cd37fdd0
|
|
@ -1 +0,0 @@
|
||||||
3cf5fe5402b5e34b240b73501c9e97a82428259e
|
|
|
@ -0,0 +1 @@
|
||||||
|
bc8613fb61c0ae95dd3680b0f65e3380c3fd0d6c
|
|
@ -1 +0,0 @@
|
||||||
1b77ef3740dc885c62d5966fbe9aea1199d344fb
|
|
|
@ -0,0 +1 @@
|
||||||
|
0fa2c3e722294e863f3c70a15e97a18397391fb4
|
|
@ -1 +0,0 @@
|
||||||
aa94b4a8636b3633008640cc5155ad354aebcea5
|
|
|
@ -0,0 +1 @@
|
||||||
|
db74c6313965ffdd10d9b19be2eed4ae2c76d2e3
|
|
@ -1 +0,0 @@
|
||||||
ed5d8ee5cd7edcad5d4ffca2b4540ccc844e9bb0
|
|
|
@ -0,0 +1 @@
|
||||||
|
b85ae1121b5fd56df985615a3cdd7b3879e9b92d
|
|
@ -1 +1 @@
|
||||||
84ccf145ac2215e6bfa63baa3101c0af41017cfc
|
84ccf145ac2215e6bfa63baa3101c0af41017cfc
|
|
@ -0,0 +1,291 @@
|
||||||
|
/*
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||||
|
* contributor license agreements. See the NOTICE file distributed with
|
||||||
|
* this work for additional information regarding copyright ownership.
|
||||||
|
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||||
|
* (the "License"); you may not use this file except in compliance with
|
||||||
|
* the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package org.apache.lucene.analysis.synonym;
|
||||||
|
|
||||||
|
import static org.apache.lucene.util.automaton.Operations.DEFAULT_MAX_DETERMINIZED_STATES;
|
||||||
|
|
||||||
|
import org.apache.lucene.analysis.TokenStream;
|
||||||
|
import org.apache.lucene.analysis.tokenattributes.BytesTermAttribute;
|
||||||
|
import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
|
||||||
|
import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
|
||||||
|
import org.apache.lucene.analysis.tokenattributes.PositionLengthAttribute;
|
||||||
|
import org.apache.lucene.analysis.tokenattributes.TermToBytesRefAttribute;
|
||||||
|
import org.apache.lucene.util.BytesRef;
|
||||||
|
import org.apache.lucene.util.IntsRef;
|
||||||
|
import org.apache.lucene.util.automaton.Automaton;
|
||||||
|
import org.apache.lucene.util.automaton.FiniteStringsIterator;
|
||||||
|
import org.apache.lucene.util.automaton.Operations;
|
||||||
|
import org.apache.lucene.util.automaton.Transition;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.util.ArrayList;
|
||||||
|
import java.util.HashMap;
|
||||||
|
import java.util.List;
|
||||||
|
import java.util.Map;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Creates a list of {@link TokenStream} where each stream is the tokens that make up a finite string in graph token stream. To do this,
|
||||||
|
* the graph token stream is converted to an {@link Automaton} and from there we use a {@link FiniteStringsIterator} to collect the various
|
||||||
|
* token streams for each finite string.
|
||||||
|
*/
|
||||||
|
public class GraphTokenStreamFiniteStrings {
|
||||||
|
private final Automaton.Builder builder;
|
||||||
|
Automaton det;
|
||||||
|
private final Map<BytesRef, Integer> termToID = new HashMap<>();
|
||||||
|
private final Map<Integer, BytesRef> idToTerm = new HashMap<>();
|
||||||
|
private int anyTermID = -1;
|
||||||
|
|
||||||
|
public GraphTokenStreamFiniteStrings() {
|
||||||
|
this.builder = new Automaton.Builder();
|
||||||
|
}
|
||||||
|
|
||||||
|
private static class BytesRefArrayTokenStream extends TokenStream {
|
||||||
|
private final BytesTermAttribute termAtt = addAttribute(BytesTermAttribute.class);
|
||||||
|
private final BytesRef[] terms;
|
||||||
|
private int offset;
|
||||||
|
|
||||||
|
BytesRefArrayTokenStream(BytesRef[] terms) {
|
||||||
|
this.terms = terms;
|
||||||
|
offset = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean incrementToken() throws IOException {
|
||||||
|
if (offset < terms.length) {
|
||||||
|
clearAttributes();
|
||||||
|
termAtt.setBytesRef(terms[offset]);
|
||||||
|
offset = offset + 1;
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Gets
|
||||||
|
*/
|
||||||
|
public List<TokenStream> getTokenStreams(final TokenStream in) throws IOException {
|
||||||
|
// build automation
|
||||||
|
build(in);
|
||||||
|
|
||||||
|
List<TokenStream> tokenStreams = new ArrayList<>();
|
||||||
|
final FiniteStringsIterator finiteStrings = new FiniteStringsIterator(det);
|
||||||
|
for (IntsRef string; (string = finiteStrings.next()) != null; ) {
|
||||||
|
final BytesRef[] tokens = new BytesRef[string.length];
|
||||||
|
for (int idx = string.offset, len = string.offset + string.length; idx < len; idx++) {
|
||||||
|
tokens[idx - string.offset] = idToTerm.get(string.ints[idx]);
|
||||||
|
}
|
||||||
|
|
||||||
|
tokenStreams.add(new BytesRefArrayTokenStream(tokens));
|
||||||
|
}
|
||||||
|
|
||||||
|
return tokenStreams;
|
||||||
|
}
|
||||||
|
|
||||||
|
private void build(final TokenStream in) throws IOException {
|
||||||
|
if (det != null) {
|
||||||
|
throw new IllegalStateException("Automation already built");
|
||||||
|
}
|
||||||
|
|
||||||
|
final TermToBytesRefAttribute termBytesAtt = in.addAttribute(TermToBytesRefAttribute.class);
|
||||||
|
final PositionIncrementAttribute posIncAtt = in.addAttribute(PositionIncrementAttribute.class);
|
||||||
|
final PositionLengthAttribute posLengthAtt = in.addAttribute(PositionLengthAttribute.class);
|
||||||
|
final OffsetAttribute offsetAtt = in.addAttribute(OffsetAttribute.class);
|
||||||
|
|
||||||
|
in.reset();
|
||||||
|
|
||||||
|
int pos = -1;
|
||||||
|
int lastPos = 0;
|
||||||
|
int maxOffset = 0;
|
||||||
|
int maxPos = -1;
|
||||||
|
int state = -1;
|
||||||
|
while (in.incrementToken()) {
|
||||||
|
int posInc = posIncAtt.getPositionIncrement();
|
||||||
|
assert pos > -1 || posInc > 0;
|
||||||
|
|
||||||
|
if (posInc > 1) {
|
||||||
|
throw new IllegalArgumentException("cannot handle holes; to accept any term, use '*' term");
|
||||||
|
}
|
||||||
|
|
||||||
|
if (posInc > 0) {
|
||||||
|
// New node:
|
||||||
|
pos += posInc;
|
||||||
|
}
|
||||||
|
|
||||||
|
int endPos = pos + posLengthAtt.getPositionLength();
|
||||||
|
while (state < endPos) {
|
||||||
|
state = createState();
|
||||||
|
}
|
||||||
|
|
||||||
|
BytesRef term = termBytesAtt.getBytesRef();
|
||||||
|
//System.out.println(pos + "-" + endPos + ": " + term.utf8ToString() + ": posInc=" + posInc);
|
||||||
|
if (term.length == 1 && term.bytes[term.offset] == (byte) '*') {
|
||||||
|
addAnyTransition(pos, endPos);
|
||||||
|
} else {
|
||||||
|
addTransition(pos, endPos, term);
|
||||||
|
}
|
||||||
|
|
||||||
|
maxOffset = Math.max(maxOffset, offsetAtt.endOffset());
|
||||||
|
maxPos = Math.max(maxPos, endPos);
|
||||||
|
}
|
||||||
|
|
||||||
|
in.end();
|
||||||
|
|
||||||
|
// TODO: look at endOffset? ts2a did...
|
||||||
|
|
||||||
|
// TODO: this (setting "last" state as the only accept state) may be too simplistic?
|
||||||
|
setAccept(state, true);
|
||||||
|
finish();
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns a new state; state 0 is always the initial state.
|
||||||
|
*/
|
||||||
|
private int createState() {
|
||||||
|
return builder.createState();
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Marks the specified state as accept or not.
|
||||||
|
*/
|
||||||
|
private void setAccept(int state, boolean accept) {
|
||||||
|
builder.setAccept(state, accept);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Adds a transition to the automaton.
|
||||||
|
*/
|
||||||
|
private void addTransition(int source, int dest, String term) {
|
||||||
|
addTransition(source, dest, new BytesRef(term));
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Adds a transition to the automaton.
|
||||||
|
*/
|
||||||
|
private void addTransition(int source, int dest, BytesRef term) {
|
||||||
|
if (term == null) {
|
||||||
|
throw new NullPointerException("term should not be null");
|
||||||
|
}
|
||||||
|
builder.addTransition(source, dest, getTermID(term));
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Adds a transition matching any term.
|
||||||
|
*/
|
||||||
|
private void addAnyTransition(int source, int dest) {
|
||||||
|
builder.addTransition(source, dest, getTermID(null));
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Call this once you are done adding states/transitions.
|
||||||
|
*/
|
||||||
|
private void finish() {
|
||||||
|
finish(DEFAULT_MAX_DETERMINIZED_STATES);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Call this once you are done adding states/transitions.
|
||||||
|
*
|
||||||
|
* @param maxDeterminizedStates Maximum number of states created when determinizing the automaton. Higher numbers allow this operation
|
||||||
|
* to consume more memory but allow more complex automatons.
|
||||||
|
*/
|
||||||
|
private void finish(int maxDeterminizedStates) {
|
||||||
|
Automaton automaton = builder.finish();
|
||||||
|
|
||||||
|
// System.out.println("before det:\n" + automaton.toDot());
|
||||||
|
|
||||||
|
Transition t = new Transition();
|
||||||
|
|
||||||
|
// TODO: should we add "eps back to initial node" for all states,
|
||||||
|
// and det that? then we don't need to revisit initial node at
|
||||||
|
// every position? but automaton could blow up? And, this makes it
|
||||||
|
// harder to skip useless positions at search time?
|
||||||
|
|
||||||
|
if (anyTermID != -1) {
|
||||||
|
|
||||||
|
// Make sure there are no leading or trailing ANY:
|
||||||
|
int count = automaton.initTransition(0, t);
|
||||||
|
for (int i = 0; i < count; i++) {
|
||||||
|
automaton.getNextTransition(t);
|
||||||
|
if (anyTermID >= t.min && anyTermID <= t.max) {
|
||||||
|
throw new IllegalStateException("automaton cannot lead with an ANY transition");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
int numStates = automaton.getNumStates();
|
||||||
|
for (int i = 0; i < numStates; i++) {
|
||||||
|
count = automaton.initTransition(i, t);
|
||||||
|
for (int j = 0; j < count; j++) {
|
||||||
|
automaton.getNextTransition(t);
|
||||||
|
if (automaton.isAccept(t.dest) && anyTermID >= t.min && anyTermID <= t.max) {
|
||||||
|
throw new IllegalStateException("automaton cannot end with an ANY transition");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
int termCount = termToID.size();
|
||||||
|
|
||||||
|
// We have to carefully translate these transitions so automaton
|
||||||
|
// realizes they also match all other terms:
|
||||||
|
Automaton newAutomaton = new Automaton();
|
||||||
|
for (int i = 0; i < numStates; i++) {
|
||||||
|
newAutomaton.createState();
|
||||||
|
newAutomaton.setAccept(i, automaton.isAccept(i));
|
||||||
|
}
|
||||||
|
|
||||||
|
for (int i = 0; i < numStates; i++) {
|
||||||
|
count = automaton.initTransition(i, t);
|
||||||
|
for (int j = 0; j < count; j++) {
|
||||||
|
automaton.getNextTransition(t);
|
||||||
|
int min, max;
|
||||||
|
if (t.min <= anyTermID && anyTermID <= t.max) {
|
||||||
|
// Match any term
|
||||||
|
min = 0;
|
||||||
|
max = termCount - 1;
|
||||||
|
} else {
|
||||||
|
min = t.min;
|
||||||
|
max = t.max;
|
||||||
|
}
|
||||||
|
newAutomaton.addTransition(t.source, t.dest, min, max);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
newAutomaton.finishState();
|
||||||
|
automaton = newAutomaton;
|
||||||
|
}
|
||||||
|
|
||||||
|
det = Operations.removeDeadStates(Operations.determinize(automaton, maxDeterminizedStates));
|
||||||
|
}
|
||||||
|
|
||||||
|
private int getTermID(BytesRef term) {
|
||||||
|
Integer id = termToID.get(term);
|
||||||
|
if (id == null) {
|
||||||
|
id = termToID.size();
|
||||||
|
if (term != null) {
|
||||||
|
term = BytesRef.deepCopyOf(term);
|
||||||
|
}
|
||||||
|
termToID.put(term, id);
|
||||||
|
idToTerm.put(id, term);
|
||||||
|
if (term == null) {
|
||||||
|
anyTermID = id;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return id;
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,588 @@
|
||||||
|
/*
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||||
|
* contributor license agreements. See the NOTICE file distributed with
|
||||||
|
* this work for additional information regarding copyright ownership.
|
||||||
|
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||||
|
* (the "License"); you may not use this file except in compliance with
|
||||||
|
* the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package org.apache.lucene.analysis.synonym;
|
||||||
|
|
||||||
|
import org.apache.lucene.analysis.TokenFilter;
|
||||||
|
import org.apache.lucene.analysis.TokenStream;
|
||||||
|
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
|
||||||
|
import org.apache.lucene.analysis.tokenattributes.FlagsAttribute;
|
||||||
|
import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
|
||||||
|
import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
|
||||||
|
import org.apache.lucene.analysis.tokenattributes.PositionLengthAttribute;
|
||||||
|
import org.apache.lucene.analysis.tokenattributes.TypeAttribute;
|
||||||
|
import org.apache.lucene.store.ByteArrayDataInput;
|
||||||
|
import org.apache.lucene.util.AttributeSource;
|
||||||
|
import org.apache.lucene.util.BytesRef;
|
||||||
|
import org.apache.lucene.util.CharsRefBuilder;
|
||||||
|
import org.apache.lucene.util.RollingBuffer;
|
||||||
|
import org.apache.lucene.util.fst.FST;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.util.ArrayList;
|
||||||
|
import java.util.LinkedList;
|
||||||
|
import java.util.List;
|
||||||
|
|
||||||
|
// TODO: maybe we should resolve token -> wordID then run
|
||||||
|
// FST on wordIDs, for better perf?
|
||||||
|
|
||||||
|
// TODO: a more efficient approach would be Aho/Corasick's
|
||||||
|
// algorithm
|
||||||
|
// http://en.wikipedia.org/wiki/Aho%E2%80%93Corasick_string_matching_algorithm
|
||||||
|
// It improves over the current approach here
|
||||||
|
// because it does not fully re-start matching at every
|
||||||
|
// token. For example if one pattern is "a b c x"
|
||||||
|
// and another is "b c d" and the input is "a b c d", on
|
||||||
|
// trying to parse "a b c x" but failing when you got to x,
|
||||||
|
// rather than starting over again your really should
|
||||||
|
// immediately recognize that "b c d" matches at the next
|
||||||
|
// input. I suspect this won't matter that much in
|
||||||
|
// practice, but it's possible on some set of synonyms it
|
||||||
|
// will. We'd have to modify Aho/Corasick to enforce our
|
||||||
|
// conflict resolving (eg greedy matching) because that algo
|
||||||
|
// finds all matches. This really amounts to adding a .*
|
||||||
|
// closure to the FST and then determinizing it.
|
||||||
|
//
|
||||||
|
// Another possible solution is described at http://www.cis.uni-muenchen.de/people/Schulz/Pub/dictle5.ps
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Applies single- or multi-token synonyms from a {@link SynonymMap}
|
||||||
|
* to an incoming {@link TokenStream}, producing a fully correct graph
|
||||||
|
* output. This is a replacement for {@link SynonymFilter}, which produces
|
||||||
|
* incorrect graphs for multi-token synonyms.
|
||||||
|
*
|
||||||
|
* <b>NOTE</b>: this cannot consume an incoming graph; results will
|
||||||
|
* be undefined.
|
||||||
|
*/
|
||||||
|
public final class SynonymGraphFilter extends TokenFilter {
|
||||||
|
|
||||||
|
public static final String TYPE_SYNONYM = "SYNONYM";
|
||||||
|
public static final int GRAPH_FLAG = 8;
|
||||||
|
|
||||||
|
private final CharTermAttribute termAtt = addAttribute(CharTermAttribute.class);
|
||||||
|
private final PositionIncrementAttribute posIncrAtt = addAttribute(PositionIncrementAttribute.class);
|
||||||
|
private final PositionLengthAttribute posLenAtt = addAttribute(PositionLengthAttribute.class);
|
||||||
|
private final FlagsAttribute flagsAtt = addAttribute(FlagsAttribute.class);
|
||||||
|
|
||||||
|
private final TypeAttribute typeAtt = addAttribute(TypeAttribute.class);
|
||||||
|
private final OffsetAttribute offsetAtt = addAttribute(OffsetAttribute.class);
|
||||||
|
|
||||||
|
private final SynonymMap synonyms;
|
||||||
|
private final boolean ignoreCase;
|
||||||
|
|
||||||
|
private final FST<BytesRef> fst;
|
||||||
|
|
||||||
|
private final FST.BytesReader fstReader;
|
||||||
|
private final FST.Arc<BytesRef> scratchArc;
|
||||||
|
private final ByteArrayDataInput bytesReader = new ByteArrayDataInput();
|
||||||
|
private final BytesRef scratchBytes = new BytesRef();
|
||||||
|
private final CharsRefBuilder scratchChars = new CharsRefBuilder();
|
||||||
|
private final LinkedList<BufferedOutputToken> outputBuffer = new LinkedList<>();
|
||||||
|
|
||||||
|
private int nextNodeOut;
|
||||||
|
private int lastNodeOut;
|
||||||
|
private int maxLookaheadUsed;
|
||||||
|
|
||||||
|
// For testing:
|
||||||
|
private int captureCount;
|
||||||
|
|
||||||
|
private boolean liveToken;
|
||||||
|
|
||||||
|
// Start/end offset of the current match:
|
||||||
|
private int matchStartOffset;
|
||||||
|
private int matchEndOffset;
|
||||||
|
|
||||||
|
// True once the input TokenStream is exhausted:
|
||||||
|
private boolean finished;
|
||||||
|
|
||||||
|
private int lookaheadNextRead;
|
||||||
|
private int lookaheadNextWrite;
|
||||||
|
|
||||||
|
private RollingBuffer<BufferedInputToken> lookahead = new RollingBuffer<BufferedInputToken>() {
|
||||||
|
@Override
|
||||||
|
protected BufferedInputToken newInstance() {
|
||||||
|
return new BufferedInputToken();
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
static class BufferedInputToken implements RollingBuffer.Resettable {
|
||||||
|
final CharsRefBuilder term = new CharsRefBuilder();
|
||||||
|
AttributeSource.State state;
|
||||||
|
int startOffset = -1;
|
||||||
|
int endOffset = -1;
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void reset() {
|
||||||
|
state = null;
|
||||||
|
term.clear();
|
||||||
|
|
||||||
|
// Intentionally invalid to ferret out bugs:
|
||||||
|
startOffset = -1;
|
||||||
|
endOffset = -1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static class BufferedOutputToken {
|
||||||
|
final String term;
|
||||||
|
|
||||||
|
// Non-null if this was an incoming token:
|
||||||
|
final State state;
|
||||||
|
|
||||||
|
final int startNode;
|
||||||
|
final int endNode;
|
||||||
|
|
||||||
|
public BufferedOutputToken(State state, String term, int startNode, int endNode) {
|
||||||
|
this.state = state;
|
||||||
|
this.term = term;
|
||||||
|
this.startNode = startNode;
|
||||||
|
this.endNode = endNode;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
public SynonymGraphFilter(TokenStream input, SynonymMap synonyms, boolean ignoreCase) {
|
||||||
|
super(input);
|
||||||
|
this.synonyms = synonyms;
|
||||||
|
this.fst = synonyms.fst;
|
||||||
|
if (fst == null) {
|
||||||
|
throw new IllegalArgumentException("fst must be non-null");
|
||||||
|
}
|
||||||
|
this.fstReader = fst.getBytesReader();
|
||||||
|
scratchArc = new FST.Arc<>();
|
||||||
|
this.ignoreCase = ignoreCase;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean incrementToken() throws IOException {
|
||||||
|
//System.out.println("\nS: incrToken lastNodeOut=" + lastNodeOut + " nextNodeOut=" + nextNodeOut);
|
||||||
|
|
||||||
|
assert lastNodeOut <= nextNodeOut;
|
||||||
|
|
||||||
|
if (outputBuffer.isEmpty() == false) {
|
||||||
|
// We still have pending outputs from a prior synonym match:
|
||||||
|
releaseBufferedToken();
|
||||||
|
//System.out.println(" syn: ret buffered=" + this);
|
||||||
|
assert liveToken == false;
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Try to parse a new synonym match at the current token:
|
||||||
|
|
||||||
|
if (parse()) {
|
||||||
|
// A new match was found:
|
||||||
|
releaseBufferedToken();
|
||||||
|
//System.out.println(" syn: after parse, ret buffered=" + this);
|
||||||
|
assert liveToken == false;
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (lookaheadNextRead == lookaheadNextWrite) {
|
||||||
|
|
||||||
|
// Fast path: parse pulled one token, but it didn't match
|
||||||
|
// the start for any synonym, so we now return it "live" w/o having
|
||||||
|
// cloned all of its atts:
|
||||||
|
if (finished) {
|
||||||
|
//System.out.println(" syn: ret END");
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
assert liveToken;
|
||||||
|
liveToken = false;
|
||||||
|
|
||||||
|
// NOTE: no need to change posInc since it's relative, i.e. whatever
|
||||||
|
// node our output is upto will just increase by the incoming posInc.
|
||||||
|
// We also don't need to change posLen, but only because we cannot
|
||||||
|
// consume a graph, so the incoming token can never span a future
|
||||||
|
// synonym match.
|
||||||
|
|
||||||
|
} else {
|
||||||
|
// We still have buffered lookahead tokens from a previous
|
||||||
|
// parse attempt that required lookahead; just replay them now:
|
||||||
|
//System.out.println(" restore buffer");
|
||||||
|
assert lookaheadNextRead < lookaheadNextWrite : "read=" + lookaheadNextRead + " write=" + lookaheadNextWrite;
|
||||||
|
BufferedInputToken token = lookahead.get(lookaheadNextRead);
|
||||||
|
lookaheadNextRead++;
|
||||||
|
|
||||||
|
restoreState(token.state);
|
||||||
|
|
||||||
|
lookahead.freeBefore(lookaheadNextRead);
|
||||||
|
|
||||||
|
//System.out.println(" after restore offset=" + offsetAtt.startOffset() + "-" + offsetAtt.endOffset());
|
||||||
|
assert liveToken == false;
|
||||||
|
}
|
||||||
|
|
||||||
|
lastNodeOut += posIncrAtt.getPositionIncrement();
|
||||||
|
nextNodeOut = lastNodeOut + posLenAtt.getPositionLength();
|
||||||
|
|
||||||
|
//System.out.println(" syn: ret lookahead=" + this);
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
private void releaseBufferedToken() throws IOException {
|
||||||
|
//System.out.println(" releaseBufferedToken");
|
||||||
|
|
||||||
|
BufferedOutputToken token = outputBuffer.pollFirst();
|
||||||
|
|
||||||
|
if (token.state != null) {
|
||||||
|
// This is an original input token (keepOrig=true case):
|
||||||
|
//System.out.println(" hasState");
|
||||||
|
restoreState(token.state);
|
||||||
|
//System.out.println(" startOffset=" + offsetAtt.startOffset() + " endOffset=" + offsetAtt.endOffset());
|
||||||
|
} else {
|
||||||
|
clearAttributes();
|
||||||
|
//System.out.println(" no state");
|
||||||
|
termAtt.append(token.term);
|
||||||
|
|
||||||
|
// We better have a match already:
|
||||||
|
assert matchStartOffset != -1;
|
||||||
|
|
||||||
|
offsetAtt.setOffset(matchStartOffset, matchEndOffset);
|
||||||
|
//System.out.println(" startOffset=" + matchStartOffset + " endOffset=" + matchEndOffset);
|
||||||
|
typeAtt.setType(TYPE_SYNONYM);
|
||||||
|
}
|
||||||
|
|
||||||
|
//System.out.println(" lastNodeOut=" + lastNodeOut);
|
||||||
|
//System.out.println(" term=" + termAtt);
|
||||||
|
|
||||||
|
posIncrAtt.setPositionIncrement(token.startNode - lastNodeOut);
|
||||||
|
lastNodeOut = token.startNode;
|
||||||
|
posLenAtt.setPositionLength(token.endNode - token.startNode);
|
||||||
|
flagsAtt.setFlags(flagsAtt.getFlags() | GRAPH_FLAG); // set the graph flag
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Scans the next input token(s) to see if a synonym matches. Returns true
|
||||||
|
* if a match was found.
|
||||||
|
*/
|
||||||
|
private boolean parse() throws IOException {
|
||||||
|
// System.out.println(Thread.currentThread().getName() + ": S: parse: " + System.identityHashCode(this));
|
||||||
|
|
||||||
|
// Holds the longest match we've seen so far:
|
||||||
|
BytesRef matchOutput = null;
|
||||||
|
int matchInputLength = 0;
|
||||||
|
|
||||||
|
BytesRef pendingOutput = fst.outputs.getNoOutput();
|
||||||
|
fst.getFirstArc(scratchArc);
|
||||||
|
|
||||||
|
assert scratchArc.output == fst.outputs.getNoOutput();
|
||||||
|
|
||||||
|
// How many tokens in the current match
|
||||||
|
int matchLength = 0;
|
||||||
|
boolean doFinalCapture = false;
|
||||||
|
|
||||||
|
int lookaheadUpto = lookaheadNextRead;
|
||||||
|
matchStartOffset = -1;
|
||||||
|
|
||||||
|
byToken:
|
||||||
|
while (true) {
|
||||||
|
//System.out.println(" cycle lookaheadUpto=" + lookaheadUpto + " maxPos=" + lookahead.getMaxPos());
|
||||||
|
|
||||||
|
// Pull next token's chars:
|
||||||
|
final char[] buffer;
|
||||||
|
final int bufferLen;
|
||||||
|
final int inputEndOffset;
|
||||||
|
|
||||||
|
if (lookaheadUpto <= lookahead.getMaxPos()) {
|
||||||
|
// Still in our lookahead buffer
|
||||||
|
BufferedInputToken token = lookahead.get(lookaheadUpto);
|
||||||
|
lookaheadUpto++;
|
||||||
|
buffer = token.term.chars();
|
||||||
|
bufferLen = token.term.length();
|
||||||
|
inputEndOffset = token.endOffset;
|
||||||
|
//System.out.println(" use buffer now max=" + lookahead.getMaxPos());
|
||||||
|
if (matchStartOffset == -1) {
|
||||||
|
matchStartOffset = token.startOffset;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
|
||||||
|
// We used up our lookahead buffer of input tokens
|
||||||
|
// -- pull next real input token:
|
||||||
|
|
||||||
|
assert finished || liveToken == false;
|
||||||
|
|
||||||
|
if (finished) {
|
||||||
|
//System.out.println(" break: finished");
|
||||||
|
break;
|
||||||
|
} else if (input.incrementToken()) {
|
||||||
|
//System.out.println(" input.incrToken");
|
||||||
|
liveToken = true;
|
||||||
|
buffer = termAtt.buffer();
|
||||||
|
bufferLen = termAtt.length();
|
||||||
|
if (matchStartOffset == -1) {
|
||||||
|
matchStartOffset = offsetAtt.startOffset();
|
||||||
|
}
|
||||||
|
inputEndOffset = offsetAtt.endOffset();
|
||||||
|
|
||||||
|
lookaheadUpto++;
|
||||||
|
} else {
|
||||||
|
// No more input tokens
|
||||||
|
finished = true;
|
||||||
|
//System.out.println(" break: now set finished");
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
matchLength++;
|
||||||
|
//System.out.println(" cycle term=" + new String(buffer, 0, bufferLen));
|
||||||
|
|
||||||
|
// Run each char in this token through the FST:
|
||||||
|
int bufUpto = 0;
|
||||||
|
while (bufUpto < bufferLen) {
|
||||||
|
final int codePoint = Character.codePointAt(buffer, bufUpto, bufferLen);
|
||||||
|
if (fst.findTargetArc(ignoreCase ? Character.toLowerCase(codePoint) : codePoint, scratchArc, scratchArc, fstReader) ==
|
||||||
|
null) {
|
||||||
|
break byToken;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Accum the output
|
||||||
|
pendingOutput = fst.outputs.add(pendingOutput, scratchArc.output);
|
||||||
|
bufUpto += Character.charCount(codePoint);
|
||||||
|
}
|
||||||
|
|
||||||
|
assert bufUpto == bufferLen;
|
||||||
|
|
||||||
|
// OK, entire token matched; now see if this is a final
|
||||||
|
// state in the FST (a match):
|
||||||
|
if (scratchArc.isFinal()) {
|
||||||
|
matchOutput = fst.outputs.add(pendingOutput, scratchArc.nextFinalOutput);
|
||||||
|
matchInputLength = matchLength;
|
||||||
|
matchEndOffset = inputEndOffset;
|
||||||
|
//System.out.println(" ** match");
|
||||||
|
}
|
||||||
|
|
||||||
|
// See if the FST can continue matching (ie, needs to
|
||||||
|
// see the next input token):
|
||||||
|
if (fst.findTargetArc(SynonymMap.WORD_SEPARATOR, scratchArc, scratchArc, fstReader) == null) {
|
||||||
|
// No further rules can match here; we're done
|
||||||
|
// searching for matching rules starting at the
|
||||||
|
// current input position.
|
||||||
|
break;
|
||||||
|
} else {
|
||||||
|
// More matching is possible -- accum the output (if
|
||||||
|
// any) of the WORD_SEP arc:
|
||||||
|
pendingOutput = fst.outputs.add(pendingOutput, scratchArc.output);
|
||||||
|
doFinalCapture = true;
|
||||||
|
if (liveToken) {
|
||||||
|
capture();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (doFinalCapture && liveToken && finished == false) {
|
||||||
|
// Must capture the final token if we captured any prior tokens:
|
||||||
|
capture();
|
||||||
|
}
|
||||||
|
|
||||||
|
if (matchOutput != null) {
|
||||||
|
|
||||||
|
if (liveToken) {
|
||||||
|
// Single input token synonym; we must buffer it now:
|
||||||
|
capture();
|
||||||
|
}
|
||||||
|
|
||||||
|
// There is a match!
|
||||||
|
bufferOutputTokens(matchOutput, matchInputLength);
|
||||||
|
lookaheadNextRead += matchInputLength;
|
||||||
|
//System.out.println(" precmatch; set lookaheadNextRead=" + lookaheadNextRead + " now max=" + lookahead.getMaxPos());
|
||||||
|
lookahead.freeBefore(lookaheadNextRead);
|
||||||
|
//System.out.println(" match; set lookaheadNextRead=" + lookaheadNextRead + " now max=" + lookahead.getMaxPos());
|
||||||
|
return true;
|
||||||
|
} else {
|
||||||
|
//System.out.println(" no match; lookaheadNextRead=" + lookaheadNextRead);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
//System.out.println(" parse done inputSkipCount=" + inputSkipCount + " nextRead=" + nextRead + " nextWrite=" + nextWrite);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Expands the output graph into the necessary tokens, adding
|
||||||
|
* synonyms as side paths parallel to the input tokens, and
|
||||||
|
* buffers them in the output token buffer.
|
||||||
|
*/
|
||||||
|
private void bufferOutputTokens(BytesRef bytes, int matchInputLength) {
|
||||||
|
bytesReader.reset(bytes.bytes, bytes.offset, bytes.length);
|
||||||
|
|
||||||
|
final int code = bytesReader.readVInt();
|
||||||
|
final boolean keepOrig = (code & 0x1) == 0;
|
||||||
|
//System.out.println(" buffer: keepOrig=" + keepOrig + " matchInputLength=" + matchInputLength);
|
||||||
|
|
||||||
|
// How many nodes along all paths; we need this to assign the
|
||||||
|
// node ID for the final end node where all paths merge back:
|
||||||
|
int totalPathNodes;
|
||||||
|
if (keepOrig) {
|
||||||
|
assert matchInputLength > 0;
|
||||||
|
totalPathNodes = matchInputLength - 1;
|
||||||
|
} else {
|
||||||
|
totalPathNodes = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
// How many synonyms we will insert over this match:
|
||||||
|
final int count = code >>> 1;
|
||||||
|
|
||||||
|
// TODO: we could encode this instead into the FST:
|
||||||
|
|
||||||
|
// 1st pass: count how many new nodes we need
|
||||||
|
List<List<String>> paths = new ArrayList<>();
|
||||||
|
for (int outputIDX = 0; outputIDX < count; outputIDX++) {
|
||||||
|
int wordID = bytesReader.readVInt();
|
||||||
|
synonyms.words.get(wordID, scratchBytes);
|
||||||
|
scratchChars.copyUTF8Bytes(scratchBytes);
|
||||||
|
int lastStart = 0;
|
||||||
|
|
||||||
|
List<String> path = new ArrayList<>();
|
||||||
|
paths.add(path);
|
||||||
|
int chEnd = scratchChars.length();
|
||||||
|
for (int chUpto = 0; chUpto <= chEnd; chUpto++) {
|
||||||
|
if (chUpto == chEnd || scratchChars.charAt(chUpto) == SynonymMap.WORD_SEPARATOR) {
|
||||||
|
path.add(new String(scratchChars.chars(), lastStart, chUpto - lastStart));
|
||||||
|
lastStart = 1 + chUpto;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
assert path.size() > 0;
|
||||||
|
totalPathNodes += path.size() - 1;
|
||||||
|
}
|
||||||
|
//System.out.println(" totalPathNodes=" + totalPathNodes);
|
||||||
|
|
||||||
|
// 2nd pass: buffer tokens for the graph fragment
|
||||||
|
|
||||||
|
// NOTE: totalPathNodes will be 0 in the case where the matched
|
||||||
|
// input is a single token and all outputs are also a single token
|
||||||
|
|
||||||
|
// We "spawn" a side-path for each of the outputs for this matched
|
||||||
|
// synonym, all ending back at this end node:
|
||||||
|
|
||||||
|
int startNode = nextNodeOut;
|
||||||
|
|
||||||
|
int endNode = startNode + totalPathNodes + 1;
|
||||||
|
//System.out.println(" " + paths.size() + " new side-paths");
|
||||||
|
|
||||||
|
// First, fanout all tokens departing start node for these new side paths:
|
||||||
|
int newNodeCount = 0;
|
||||||
|
for (List<String> path : paths) {
|
||||||
|
int pathEndNode;
|
||||||
|
//System.out.println(" path size=" + path.size());
|
||||||
|
if (path.size() == 1) {
|
||||||
|
// Single token output, so there are no intermediate nodes:
|
||||||
|
pathEndNode = endNode;
|
||||||
|
} else {
|
||||||
|
pathEndNode = nextNodeOut + newNodeCount + 1;
|
||||||
|
newNodeCount += path.size() - 1;
|
||||||
|
}
|
||||||
|
outputBuffer.add(new BufferedOutputToken(null, path.get(0), startNode, pathEndNode));
|
||||||
|
}
|
||||||
|
|
||||||
|
// We must do the original tokens last, else the offsets "go backwards":
|
||||||
|
if (keepOrig) {
|
||||||
|
BufferedInputToken token = lookahead.get(lookaheadNextRead);
|
||||||
|
int inputEndNode;
|
||||||
|
if (matchInputLength == 1) {
|
||||||
|
// Single token matched input, so there are no intermediate nodes:
|
||||||
|
inputEndNode = endNode;
|
||||||
|
} else {
|
||||||
|
inputEndNode = nextNodeOut + newNodeCount + 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
//System.out.println(" keepOrig first token: " + token.term);
|
||||||
|
|
||||||
|
outputBuffer.add(new BufferedOutputToken(token.state, token.term.toString(), startNode, inputEndNode));
|
||||||
|
}
|
||||||
|
|
||||||
|
nextNodeOut = endNode;
|
||||||
|
|
||||||
|
// Do full side-path for each syn output:
|
||||||
|
for (int pathID = 0; pathID < paths.size(); pathID++) {
|
||||||
|
List<String> path = paths.get(pathID);
|
||||||
|
if (path.size() > 1) {
|
||||||
|
int lastNode = outputBuffer.get(pathID).endNode;
|
||||||
|
for (int i = 1; i < path.size() - 1; i++) {
|
||||||
|
outputBuffer.add(new BufferedOutputToken(null, path.get(i), lastNode, lastNode + 1));
|
||||||
|
lastNode++;
|
||||||
|
}
|
||||||
|
outputBuffer.add(new BufferedOutputToken(null, path.get(path.size() - 1), lastNode, endNode));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (keepOrig && matchInputLength > 1) {
|
||||||
|
// Do full "side path" with the original tokens:
|
||||||
|
int lastNode = outputBuffer.get(paths.size()).endNode;
|
||||||
|
for (int i = 1; i < matchInputLength - 1; i++) {
|
||||||
|
BufferedInputToken token = lookahead.get(lookaheadNextRead + i);
|
||||||
|
outputBuffer.add(new BufferedOutputToken(token.state, token.term.toString(), lastNode, lastNode + 1));
|
||||||
|
lastNode++;
|
||||||
|
}
|
||||||
|
BufferedInputToken token = lookahead.get(lookaheadNextRead + matchInputLength - 1);
|
||||||
|
outputBuffer.add(new BufferedOutputToken(token.state, token.term.toString(), lastNode, endNode));
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
System.out.println(" after buffer: " + outputBuffer.size() + " tokens:");
|
||||||
|
for(BufferedOutputToken token : outputBuffer) {
|
||||||
|
System.out.println(" tok: " + token.term + " startNode=" + token.startNode + " endNode=" + token.endNode);
|
||||||
|
}
|
||||||
|
*/
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Buffers the current input token into lookahead buffer.
|
||||||
|
*/
|
||||||
|
private void capture() {
|
||||||
|
assert liveToken;
|
||||||
|
liveToken = false;
|
||||||
|
BufferedInputToken token = lookahead.get(lookaheadNextWrite);
|
||||||
|
lookaheadNextWrite++;
|
||||||
|
|
||||||
|
token.state = captureState();
|
||||||
|
token.startOffset = offsetAtt.startOffset();
|
||||||
|
token.endOffset = offsetAtt.endOffset();
|
||||||
|
assert token.term.length() == 0;
|
||||||
|
token.term.append(termAtt);
|
||||||
|
|
||||||
|
captureCount++;
|
||||||
|
maxLookaheadUsed = Math.max(maxLookaheadUsed, lookahead.getBufferSize());
|
||||||
|
//System.out.println(" maxLookaheadUsed=" + maxLookaheadUsed);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void reset() throws IOException {
|
||||||
|
super.reset();
|
||||||
|
lookahead.reset();
|
||||||
|
lookaheadNextWrite = 0;
|
||||||
|
lookaheadNextRead = 0;
|
||||||
|
captureCount = 0;
|
||||||
|
lastNodeOut = -1;
|
||||||
|
nextNodeOut = 0;
|
||||||
|
matchStartOffset = -1;
|
||||||
|
matchEndOffset = -1;
|
||||||
|
finished = false;
|
||||||
|
liveToken = false;
|
||||||
|
outputBuffer.clear();
|
||||||
|
maxLookaheadUsed = 0;
|
||||||
|
//System.out.println("S: reset");
|
||||||
|
}
|
||||||
|
|
||||||
|
// for testing
|
||||||
|
int getCaptureCount() {
|
||||||
|
return captureCount;
|
||||||
|
}
|
||||||
|
|
||||||
|
// for testing
|
||||||
|
int getMaxLookaheadUsed() {
|
||||||
|
return maxLookaheadUsed;
|
||||||
|
}
|
||||||
|
}
|
|
@ -41,7 +41,6 @@ import org.apache.lucene.util.automaton.RegExp;
|
||||||
import org.elasticsearch.common.lucene.search.Queries;
|
import org.elasticsearch.common.lucene.search.Queries;
|
||||||
import org.elasticsearch.common.unit.Fuzziness;
|
import org.elasticsearch.common.unit.Fuzziness;
|
||||||
import org.elasticsearch.index.mapper.DateFieldMapper;
|
import org.elasticsearch.index.mapper.DateFieldMapper;
|
||||||
import org.elasticsearch.index.mapper.LegacyDateFieldMapper;
|
|
||||||
import org.elasticsearch.index.mapper.MappedFieldType;
|
import org.elasticsearch.index.mapper.MappedFieldType;
|
||||||
import org.elasticsearch.index.mapper.MapperService;
|
import org.elasticsearch.index.mapper.MapperService;
|
||||||
import org.elasticsearch.index.mapper.StringFieldType;
|
import org.elasticsearch.index.mapper.StringFieldType;
|
||||||
|
@ -336,11 +335,7 @@ public class MapperQueryParser extends AnalyzingQueryParser {
|
||||||
BytesRef part1Binary = part1 == null ? null : getAnalyzer().normalize(field, part1);
|
BytesRef part1Binary = part1 == null ? null : getAnalyzer().normalize(field, part1);
|
||||||
BytesRef part2Binary = part2 == null ? null : getAnalyzer().normalize(field, part2);
|
BytesRef part2Binary = part2 == null ? null : getAnalyzer().normalize(field, part2);
|
||||||
Query rangeQuery;
|
Query rangeQuery;
|
||||||
if (currentFieldType instanceof LegacyDateFieldMapper.DateFieldType && settings.timeZone() != null) {
|
if (currentFieldType instanceof DateFieldMapper.DateFieldType && settings.timeZone() != null) {
|
||||||
LegacyDateFieldMapper.DateFieldType dateFieldType = (LegacyDateFieldMapper.DateFieldType) this.currentFieldType;
|
|
||||||
rangeQuery = dateFieldType.rangeQuery(part1Binary, part2Binary,
|
|
||||||
startInclusive, endInclusive, settings.timeZone(), null, context);
|
|
||||||
} else if (currentFieldType instanceof DateFieldMapper.DateFieldType && settings.timeZone() != null) {
|
|
||||||
DateFieldMapper.DateFieldType dateFieldType = (DateFieldMapper.DateFieldType) this.currentFieldType;
|
DateFieldMapper.DateFieldType dateFieldType = (DateFieldMapper.DateFieldType) this.currentFieldType;
|
||||||
rangeQuery = dateFieldType.rangeQuery(part1Binary, part2Binary,
|
rangeQuery = dateFieldType.rangeQuery(part1Binary, part2Binary,
|
||||||
startInclusive, endInclusive, settings.timeZone(), null, context);
|
startInclusive, endInclusive, settings.timeZone(), null, context);
|
||||||
|
|
|
@ -0,0 +1,115 @@
|
||||||
|
/*
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||||
|
* contributor license agreements. See the NOTICE file distributed with
|
||||||
|
* this work for additional information regarding copyright ownership.
|
||||||
|
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||||
|
* (the "License"); you may not use this file except in compliance with
|
||||||
|
* the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package org.apache.lucene.search;
|
||||||
|
|
||||||
|
import org.apache.lucene.index.IndexReader;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.util.Arrays;
|
||||||
|
import java.util.Collections;
|
||||||
|
import java.util.List;
|
||||||
|
import java.util.Objects;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* A query that wraps multiple sub-queries generated from a graph token stream.
|
||||||
|
*/
|
||||||
|
public final class GraphQuery extends Query {
|
||||||
|
private final Query[] queries;
|
||||||
|
private final boolean hasBoolean;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Constructor sets the queries and checks if any of them are
|
||||||
|
* a boolean query.
|
||||||
|
*
|
||||||
|
* @param queries the non-null array of queries
|
||||||
|
*/
|
||||||
|
public GraphQuery(Query... queries) {
|
||||||
|
this.queries = Objects.requireNonNull(queries).clone();
|
||||||
|
for (Query query : queries) {
|
||||||
|
if (query instanceof BooleanQuery) {
|
||||||
|
hasBoolean = true;
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
hasBoolean = false;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Gets the queries
|
||||||
|
*
|
||||||
|
* @return unmodifiable list of Query
|
||||||
|
*/
|
||||||
|
public List<Query> getQueries() {
|
||||||
|
return Collections.unmodifiableList(Arrays.asList(queries));
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* If there is at least one boolean query or not.
|
||||||
|
*
|
||||||
|
* @return true if there is a boolean, false if not
|
||||||
|
*/
|
||||||
|
public boolean hasBoolean() {
|
||||||
|
return hasBoolean;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Rewrites to a single query or a boolean query where each query is a SHOULD clause.
|
||||||
|
*/
|
||||||
|
@Override
|
||||||
|
public Query rewrite(IndexReader reader) throws IOException {
|
||||||
|
if (queries.length == 0) {
|
||||||
|
return new BooleanQuery.Builder().build();
|
||||||
|
}
|
||||||
|
|
||||||
|
if (queries.length == 1) {
|
||||||
|
return queries[0];
|
||||||
|
}
|
||||||
|
|
||||||
|
BooleanQuery.Builder q = new BooleanQuery.Builder();
|
||||||
|
q.setDisableCoord(true);
|
||||||
|
for (Query clause : queries) {
|
||||||
|
q.add(clause, BooleanClause.Occur.SHOULD);
|
||||||
|
}
|
||||||
|
|
||||||
|
return q.build();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String toString(String field) {
|
||||||
|
StringBuilder builder = new StringBuilder("Graph(");
|
||||||
|
for (int i = 0; i < queries.length; i++) {
|
||||||
|
if (i != 0) {
|
||||||
|
builder.append(", ");
|
||||||
|
}
|
||||||
|
builder.append(Objects.toString(queries[i]));
|
||||||
|
}
|
||||||
|
builder.append(")");
|
||||||
|
return builder.toString();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean equals(Object other) {
|
||||||
|
return sameClassAs(other) &&
|
||||||
|
Arrays.equals(queries, ((GraphQuery) other).queries);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public int hashCode() {
|
||||||
|
return 31 * classHash() + Arrays.hashCode(queries);
|
||||||
|
}
|
||||||
|
}
|
|
@ -719,10 +719,9 @@ public class ElasticsearchException extends RuntimeException implements ToXConte
|
||||||
STATUS_EXCEPTION(org.elasticsearch.ElasticsearchStatusException.class, org.elasticsearch.ElasticsearchStatusException::new, 145,
|
STATUS_EXCEPTION(org.elasticsearch.ElasticsearchStatusException.class, org.elasticsearch.ElasticsearchStatusException::new, 145,
|
||||||
UNKNOWN_VERSION_ADDED),
|
UNKNOWN_VERSION_ADDED),
|
||||||
TASK_CANCELLED_EXCEPTION(org.elasticsearch.tasks.TaskCancelledException.class,
|
TASK_CANCELLED_EXCEPTION(org.elasticsearch.tasks.TaskCancelledException.class,
|
||||||
org.elasticsearch.tasks.TaskCancelledException::new, 146, Version.V_5_1_0_UNRELEASED),
|
org.elasticsearch.tasks.TaskCancelledException::new, 146, Version.V_5_1_1_UNRELEASED),
|
||||||
SHARD_LOCK_OBTAIN_FAILED_EXCEPTION(org.elasticsearch.env.ShardLockObtainFailedException.class,
|
SHARD_LOCK_OBTAIN_FAILED_EXCEPTION(org.elasticsearch.env.ShardLockObtainFailedException.class,
|
||||||
org.elasticsearch.env.ShardLockObtainFailedException::new, 147, Version.V_5_0_2_UNRELEASED);
|
org.elasticsearch.env.ShardLockObtainFailedException::new, 147, Version.V_5_0_2);
|
||||||
|
|
||||||
|
|
||||||
final Class<? extends ElasticsearchException> exceptionClass;
|
final Class<? extends ElasticsearchException> exceptionClass;
|
||||||
final FunctionThatThrowsIOException<StreamInput, ? extends ElasticsearchException> constructor;
|
final FunctionThatThrowsIOException<StreamInput, ? extends ElasticsearchException> constructor;
|
||||||
|
|
|
@ -95,15 +95,18 @@ public class Version {
|
||||||
public static final Version V_5_0_0 = new Version(V_5_0_0_ID, org.apache.lucene.util.Version.LUCENE_6_2_0);
|
public static final Version V_5_0_0 = new Version(V_5_0_0_ID, org.apache.lucene.util.Version.LUCENE_6_2_0);
|
||||||
public static final int V_5_0_1_ID = 5000199;
|
public static final int V_5_0_1_ID = 5000199;
|
||||||
public static final Version V_5_0_1 = new Version(V_5_0_1_ID, org.apache.lucene.util.Version.LUCENE_6_2_1);
|
public static final Version V_5_0_1 = new Version(V_5_0_1_ID, org.apache.lucene.util.Version.LUCENE_6_2_1);
|
||||||
public static final int V_5_0_2_ID_UNRELEASED = 5000299;
|
public static final int V_5_0_2_ID = 5000299;
|
||||||
public static final Version V_5_0_2_UNRELEASED = new Version(V_5_0_2_ID_UNRELEASED, org.apache.lucene.util.Version.LUCENE_6_2_1);
|
public static final Version V_5_0_2 = new Version(V_5_0_2_ID, org.apache.lucene.util.Version.LUCENE_6_2_1);
|
||||||
public static final int V_5_1_0_ID_UNRELEASED = 5010099;
|
public static final int V_5_0_3_ID_UNRELEASED = 5000399;
|
||||||
public static final Version V_5_1_0_UNRELEASED = new Version(V_5_1_0_ID_UNRELEASED, org.apache.lucene.util.Version.LUCENE_6_3_0);
|
public static final Version V_5_0_3_UNRELEASED = new Version(V_5_0_3_ID_UNRELEASED, org.apache.lucene.util.Version.LUCENE_6_3_0);
|
||||||
|
// no version constant for 5.1.0 due to inadvertent release
|
||||||
|
public static final int V_5_1_1_ID_UNRELEASED = 5010199;
|
||||||
|
public static final Version V_5_1_1_UNRELEASED = new Version(V_5_1_1_ID_UNRELEASED, org.apache.lucene.util.Version.LUCENE_6_3_0);
|
||||||
public static final int V_5_2_0_ID_UNRELEASED = 5020099;
|
public static final int V_5_2_0_ID_UNRELEASED = 5020099;
|
||||||
public static final Version V_5_2_0_UNRELEASED = new Version(V_5_2_0_ID_UNRELEASED, org.apache.lucene.util.Version.LUCENE_6_3_0);
|
public static final Version V_5_2_0_UNRELEASED = new Version(V_5_2_0_ID_UNRELEASED, org.apache.lucene.util.Version.LUCENE_6_3_0);
|
||||||
public static final int V_6_0_0_alpha1_ID_UNRELEASED = 6000001;
|
public static final int V_6_0_0_alpha1_ID_UNRELEASED = 6000001;
|
||||||
public static final Version V_6_0_0_alpha1_UNRELEASED =
|
public static final Version V_6_0_0_alpha1_UNRELEASED =
|
||||||
new Version(V_6_0_0_alpha1_ID_UNRELEASED, org.apache.lucene.util.Version.LUCENE_6_3_0);
|
new Version(V_6_0_0_alpha1_ID_UNRELEASED, org.apache.lucene.util.Version.LUCENE_6_4_0);
|
||||||
public static final Version CURRENT = V_6_0_0_alpha1_UNRELEASED;
|
public static final Version CURRENT = V_6_0_0_alpha1_UNRELEASED;
|
||||||
|
|
||||||
// unreleased versions must be added to the above list with the suffix _UNRELEASED (with the exception of CURRENT)
|
// unreleased versions must be added to the above list with the suffix _UNRELEASED (with the exception of CURRENT)
|
||||||
|
@ -123,10 +126,12 @@ public class Version {
|
||||||
return V_6_0_0_alpha1_UNRELEASED;
|
return V_6_0_0_alpha1_UNRELEASED;
|
||||||
case V_5_2_0_ID_UNRELEASED:
|
case V_5_2_0_ID_UNRELEASED:
|
||||||
return V_5_2_0_UNRELEASED;
|
return V_5_2_0_UNRELEASED;
|
||||||
case V_5_1_0_ID_UNRELEASED:
|
case V_5_1_1_ID_UNRELEASED:
|
||||||
return V_5_1_0_UNRELEASED;
|
return V_5_1_1_UNRELEASED;
|
||||||
case V_5_0_2_ID_UNRELEASED:
|
case V_5_0_3_ID_UNRELEASED:
|
||||||
return V_5_0_2_UNRELEASED;
|
return V_5_0_3_UNRELEASED;
|
||||||
|
case V_5_0_2_ID:
|
||||||
|
return V_5_0_2;
|
||||||
case V_5_0_1_ID:
|
case V_5_0_1_ID:
|
||||||
return V_5_0_1;
|
return V_5_0_1;
|
||||||
case V_5_0_0_ID:
|
case V_5_0_0_ID:
|
||||||
|
@ -213,12 +218,17 @@ public class Version {
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Returns the smallest version between the 2.
|
* Returns the minimum version between the 2.
|
||||||
*/
|
*/
|
||||||
public static Version smallest(Version version1, Version version2) {
|
public static Version min(Version version1, Version version2) {
|
||||||
return version1.id < version2.id ? version1 : version2;
|
return version1.id < version2.id ? version1 : version2;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns the maximum version between the 2
|
||||||
|
*/
|
||||||
|
public static Version max(Version version1, Version version2) { return version1.id > version2.id ? version1 : version2; }
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Returns the version given its string representation, current version if the argument is null or empty
|
* Returns the version given its string representation, current version if the argument is null or empty
|
||||||
*/
|
*/
|
||||||
|
@ -321,7 +331,22 @@ public class Version {
|
||||||
bwcMajor = major;
|
bwcMajor = major;
|
||||||
bwcMinor = 0;
|
bwcMinor = 0;
|
||||||
}
|
}
|
||||||
return Version.smallest(this, fromId(bwcMajor * 1000000 + bwcMinor * 10000 + 99));
|
return Version.min(this, fromId(bwcMajor * 1000000 + bwcMinor * 10000 + 99));
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns the minimum created index version that this version supports. Indices created with lower versions
|
||||||
|
* can't be used with this version.
|
||||||
|
*/
|
||||||
|
public Version minimumIndexCompatibilityVersion() {
|
||||||
|
final int bwcMajor;
|
||||||
|
if (major == 5) {
|
||||||
|
bwcMajor = 2; // we jumped from 2 to 5
|
||||||
|
} else {
|
||||||
|
bwcMajor = major - 1;
|
||||||
|
}
|
||||||
|
final int bwcMinor = 0;
|
||||||
|
return Version.min(this, fromId(bwcMajor * 1000000 + bwcMinor * 10000 + 99));
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -409,5 +434,4 @@ public class Version {
|
||||||
public boolean isRelease() {
|
public boolean isRelease() {
|
||||||
return build == 99;
|
return build == 99;
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -19,18 +19,11 @@
|
||||||
|
|
||||||
package org.elasticsearch.action;
|
package org.elasticsearch.action;
|
||||||
|
|
||||||
import java.util.List;
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Needs to be implemented by all {@link org.elasticsearch.action.ActionRequest} subclasses that are composed of multiple subrequests
|
* Marker interface that needs to be implemented by all {@link org.elasticsearch.action.ActionRequest} subclasses that are composed of
|
||||||
* which relate to one or more indices. Allows to retrieve those subrequests and reason about them separately. A composite request is
|
* multiple sub-requests which relate to one or more indices. A composite request is executed by its own transport action class
|
||||||
* executed by its own transport action class (e.g. {@link org.elasticsearch.action.search.TransportMultiSearchAction}), which goes
|
* (e.g. {@link org.elasticsearch.action.search.TransportMultiSearchAction}), which goes through all sub-requests and delegates their
|
||||||
* through all the subrequests and delegates their exection to the appropriate transport action (e.g.
|
* execution to the appropriate transport action (e.g. {@link org.elasticsearch.action.search.TransportSearchAction}) for each single item.
|
||||||
* {@link org.elasticsearch.action.search.TransportSearchAction}) for each single item.
|
|
||||||
*/
|
*/
|
||||||
public interface CompositeIndicesRequest {
|
public interface CompositeIndicesRequest {
|
||||||
/**
|
|
||||||
* Returns the subrequests that a composite request is composed of
|
|
||||||
*/
|
|
||||||
List<? extends IndicesRequest> subRequests();
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -134,7 +134,7 @@ public class ClusterSearchShardsRequest extends MasterNodeReadRequest<ClusterSea
|
||||||
routing = in.readOptionalString();
|
routing = in.readOptionalString();
|
||||||
preference = in.readOptionalString();
|
preference = in.readOptionalString();
|
||||||
|
|
||||||
if (in.getVersion().onOrBefore(Version.V_5_1_0_UNRELEASED)) {
|
if (in.getVersion().onOrBefore(Version.V_5_1_1_UNRELEASED)) {
|
||||||
//types
|
//types
|
||||||
in.readStringArray();
|
in.readStringArray();
|
||||||
}
|
}
|
||||||
|
@ -153,7 +153,7 @@ public class ClusterSearchShardsRequest extends MasterNodeReadRequest<ClusterSea
|
||||||
out.writeOptionalString(routing);
|
out.writeOptionalString(routing);
|
||||||
out.writeOptionalString(preference);
|
out.writeOptionalString(preference);
|
||||||
|
|
||||||
if (out.getVersion().onOrBefore(Version.V_5_1_0_UNRELEASED)) {
|
if (out.getVersion().onOrBefore(Version.V_5_1_1_UNRELEASED)) {
|
||||||
//types
|
//types
|
||||||
out.writeStringArray(Strings.EMPTY_ARRAY);
|
out.writeStringArray(Strings.EMPTY_ARRAY);
|
||||||
}
|
}
|
||||||
|
|
|
@ -71,7 +71,7 @@ public class ClusterSearchShardsResponse extends ActionResponse implements ToXCo
|
||||||
for (int i = 0; i < nodes.length; i++) {
|
for (int i = 0; i < nodes.length; i++) {
|
||||||
nodes[i] = new DiscoveryNode(in);
|
nodes[i] = new DiscoveryNode(in);
|
||||||
}
|
}
|
||||||
if (in.getVersion().onOrAfter(Version.V_5_1_0_UNRELEASED)) {
|
if (in.getVersion().onOrAfter(Version.V_5_1_1_UNRELEASED)) {
|
||||||
int size = in.readVInt();
|
int size = in.readVInt();
|
||||||
indicesAndFilters = new HashMap<>();
|
indicesAndFilters = new HashMap<>();
|
||||||
for (int i = 0; i < size; i++) {
|
for (int i = 0; i < size; i++) {
|
||||||
|
@ -93,7 +93,7 @@ public class ClusterSearchShardsResponse extends ActionResponse implements ToXCo
|
||||||
for (DiscoveryNode node : nodes) {
|
for (DiscoveryNode node : nodes) {
|
||||||
node.writeTo(out);
|
node.writeTo(out);
|
||||||
}
|
}
|
||||||
if (out.getVersion().onOrAfter(Version.V_5_1_0_UNRELEASED)) {
|
if (out.getVersion().onOrAfter(Version.V_5_1_1_UNRELEASED)) {
|
||||||
out.writeVInt(indicesAndFilters.size());
|
out.writeVInt(indicesAndFilters.size());
|
||||||
for (Map.Entry<String, AliasFilter> entry : indicesAndFilters.entrySet()) {
|
for (Map.Entry<String, AliasFilter> entry : indicesAndFilters.entrySet()) {
|
||||||
out.writeString(entry.getKey());
|
out.writeString(entry.getKey());
|
||||||
|
@ -115,7 +115,8 @@ public class ClusterSearchShardsResponse extends ActionResponse implements ToXCo
|
||||||
String index = entry.getKey();
|
String index = entry.getKey();
|
||||||
builder.startObject(index);
|
builder.startObject(index);
|
||||||
AliasFilter aliasFilter = entry.getValue();
|
AliasFilter aliasFilter = entry.getValue();
|
||||||
if (aliasFilter.getQueryBuilder() != null) {
|
if (aliasFilter.getAliases().length > 0) {
|
||||||
|
builder.array("aliases", aliasFilter.getAliases());
|
||||||
builder.field("filter");
|
builder.field("filter");
|
||||||
aliasFilter.getQueryBuilder().toXContent(builder, params);
|
aliasFilter.getQueryBuilder().toXContent(builder, params);
|
||||||
}
|
}
|
||||||
|
|
|
@ -211,11 +211,6 @@ public class BulkRequest extends ActionRequest implements CompositeIndicesReques
|
||||||
return this.requests;
|
return this.requests;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
|
||||||
public List<? extends IndicesRequest> subRequests() {
|
|
||||||
return requests.stream().collect(Collectors.toList());
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* The list of optional payloads associated with requests in the same order as the requests. Note, elements within
|
* The list of optional payloads associated with requests in the same order as the requests. Note, elements within
|
||||||
* it might be null if no payload has been provided.
|
* it might be null if no payload has been provided.
|
||||||
|
@ -305,8 +300,6 @@ public class BulkRequest extends ActionRequest implements CompositeIndicesReques
|
||||||
String parent = null;
|
String parent = null;
|
||||||
FetchSourceContext fetchSourceContext = defaultFetchSourceContext;
|
FetchSourceContext fetchSourceContext = defaultFetchSourceContext;
|
||||||
String[] fields = defaultFields;
|
String[] fields = defaultFields;
|
||||||
String timestamp = null;
|
|
||||||
TimeValue ttl = null;
|
|
||||||
String opType = null;
|
String opType = null;
|
||||||
long version = Versions.MATCH_ANY;
|
long version = Versions.MATCH_ANY;
|
||||||
VersionType versionType = VersionType.INTERNAL;
|
VersionType versionType = VersionType.INTERNAL;
|
||||||
|
@ -336,14 +329,6 @@ public class BulkRequest extends ActionRequest implements CompositeIndicesReques
|
||||||
routing = parser.text();
|
routing = parser.text();
|
||||||
} else if ("_parent".equals(currentFieldName) || "parent".equals(currentFieldName)) {
|
} else if ("_parent".equals(currentFieldName) || "parent".equals(currentFieldName)) {
|
||||||
parent = parser.text();
|
parent = parser.text();
|
||||||
} else if ("_timestamp".equals(currentFieldName) || "timestamp".equals(currentFieldName)) {
|
|
||||||
timestamp = parser.text();
|
|
||||||
} else if ("_ttl".equals(currentFieldName) || "ttl".equals(currentFieldName)) {
|
|
||||||
if (parser.currentToken() == XContentParser.Token.VALUE_STRING) {
|
|
||||||
ttl = TimeValue.parseTimeValue(parser.text(), null, currentFieldName);
|
|
||||||
} else {
|
|
||||||
ttl = new TimeValue(parser.longValue());
|
|
||||||
}
|
|
||||||
} else if ("op_type".equals(currentFieldName) || "opType".equals(currentFieldName)) {
|
} else if ("op_type".equals(currentFieldName) || "opType".equals(currentFieldName)) {
|
||||||
opType = parser.text();
|
opType = parser.text();
|
||||||
} else if ("_version".equals(currentFieldName) || "version".equals(currentFieldName)) {
|
} else if ("_version".equals(currentFieldName) || "version".equals(currentFieldName)) {
|
||||||
|
@ -394,15 +379,15 @@ public class BulkRequest extends ActionRequest implements CompositeIndicesReques
|
||||||
// of index request.
|
// of index request.
|
||||||
if ("index".equals(action)) {
|
if ("index".equals(action)) {
|
||||||
if (opType == null) {
|
if (opType == null) {
|
||||||
internalAdd(new IndexRequest(index, type, id).routing(routing).parent(parent).timestamp(timestamp).ttl(ttl).version(version).versionType(versionType)
|
internalAdd(new IndexRequest(index, type, id).routing(routing).parent(parent).version(version).versionType(versionType)
|
||||||
.setPipeline(pipeline).source(data.slice(from, nextMarker - from)), payload);
|
.setPipeline(pipeline).source(data.slice(from, nextMarker - from)), payload);
|
||||||
} else {
|
} else {
|
||||||
internalAdd(new IndexRequest(index, type, id).routing(routing).parent(parent).timestamp(timestamp).ttl(ttl).version(version).versionType(versionType)
|
internalAdd(new IndexRequest(index, type, id).routing(routing).parent(parent).version(version).versionType(versionType)
|
||||||
.create("create".equals(opType)).setPipeline(pipeline)
|
.create("create".equals(opType)).setPipeline(pipeline)
|
||||||
.source(data.slice(from, nextMarker - from)), payload);
|
.source(data.slice(from, nextMarker - from)), payload);
|
||||||
}
|
}
|
||||||
} else if ("create".equals(action)) {
|
} else if ("create".equals(action)) {
|
||||||
internalAdd(new IndexRequest(index, type, id).routing(routing).parent(parent).timestamp(timestamp).ttl(ttl).version(version).versionType(versionType)
|
internalAdd(new IndexRequest(index, type, id).routing(routing).parent(parent).version(version).versionType(versionType)
|
||||||
.create(true).setPipeline(pipeline)
|
.create(true).setPipeline(pipeline)
|
||||||
.source(data.slice(from, nextMarker - from)), payload);
|
.source(data.slice(from, nextMarker - from)), payload);
|
||||||
} else if ("update".equals(action)) {
|
} else if ("update".equals(action)) {
|
||||||
|
@ -420,15 +405,11 @@ public class BulkRequest extends ActionRequest implements CompositeIndicesReques
|
||||||
|
|
||||||
IndexRequest upsertRequest = updateRequest.upsertRequest();
|
IndexRequest upsertRequest = updateRequest.upsertRequest();
|
||||||
if (upsertRequest != null) {
|
if (upsertRequest != null) {
|
||||||
upsertRequest.timestamp(timestamp);
|
|
||||||
upsertRequest.ttl(ttl);
|
|
||||||
upsertRequest.version(version);
|
upsertRequest.version(version);
|
||||||
upsertRequest.versionType(versionType);
|
upsertRequest.versionType(versionType);
|
||||||
}
|
}
|
||||||
IndexRequest doc = updateRequest.doc();
|
IndexRequest doc = updateRequest.doc();
|
||||||
if (doc != null) {
|
if (doc != null) {
|
||||||
doc.timestamp(timestamp);
|
|
||||||
doc.ttl(ttl);
|
|
||||||
doc.version(version);
|
doc.version(version);
|
||||||
doc.versionType(versionType);
|
doc.versionType(versionType);
|
||||||
}
|
}
|
||||||
|
|
|
@ -284,11 +284,6 @@ public class MultiGetRequest extends ActionRequest implements Iterable<MultiGetR
|
||||||
return validationException;
|
return validationException;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
|
||||||
public List<? extends IndicesRequest> subRequests() {
|
|
||||||
return items;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Sets the preference to execute the search. Defaults to randomize across shards. Can be set to
|
* Sets the preference to execute the search. Defaults to randomize across shards. Can be set to
|
||||||
* <tt>_local</tt> to prefer local shards, <tt>_primary</tt> to execute only on primary shards, or
|
* <tt>_local</tt> to prefer local shards, <tt>_primary</tt> to execute only on primary shards, or
|
||||||
|
|
|
@ -20,10 +20,10 @@
|
||||||
package org.elasticsearch.action.index;
|
package org.elasticsearch.action.index;
|
||||||
|
|
||||||
import org.elasticsearch.ElasticsearchGenerationException;
|
import org.elasticsearch.ElasticsearchGenerationException;
|
||||||
|
import org.elasticsearch.Version;
|
||||||
import org.elasticsearch.action.ActionRequestValidationException;
|
import org.elasticsearch.action.ActionRequestValidationException;
|
||||||
import org.elasticsearch.action.DocWriteRequest;
|
import org.elasticsearch.action.DocWriteRequest;
|
||||||
import org.elasticsearch.action.RoutingMissingException;
|
import org.elasticsearch.action.RoutingMissingException;
|
||||||
import org.elasticsearch.action.TimestampParsingException;
|
|
||||||
import org.elasticsearch.action.support.replication.ReplicatedWriteRequest;
|
import org.elasticsearch.action.support.replication.ReplicatedWriteRequest;
|
||||||
import org.elasticsearch.client.Requests;
|
import org.elasticsearch.client.Requests;
|
||||||
import org.elasticsearch.cluster.metadata.MappingMetaData;
|
import org.elasticsearch.cluster.metadata.MappingMetaData;
|
||||||
|
@ -41,7 +41,6 @@ import org.elasticsearch.common.xcontent.XContentFactory;
|
||||||
import org.elasticsearch.common.xcontent.XContentHelper;
|
import org.elasticsearch.common.xcontent.XContentHelper;
|
||||||
import org.elasticsearch.common.xcontent.XContentType;
|
import org.elasticsearch.common.xcontent.XContentType;
|
||||||
import org.elasticsearch.index.VersionType;
|
import org.elasticsearch.index.VersionType;
|
||||||
import org.elasticsearch.index.mapper.TimestampFieldMapper;
|
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.nio.charset.StandardCharsets;
|
import java.nio.charset.StandardCharsets;
|
||||||
|
@ -75,10 +74,6 @@ public class IndexRequest extends ReplicatedWriteRequest<IndexRequest> implement
|
||||||
private String routing;
|
private String routing;
|
||||||
@Nullable
|
@Nullable
|
||||||
private String parent;
|
private String parent;
|
||||||
@Nullable
|
|
||||||
private String timestamp;
|
|
||||||
@Nullable
|
|
||||||
private TimeValue ttl;
|
|
||||||
|
|
||||||
private BytesReference source;
|
private BytesReference source;
|
||||||
|
|
||||||
|
@ -164,12 +159,6 @@ public class IndexRequest extends ReplicatedWriteRequest<IndexRequest> implement
|
||||||
validationException = addValidationError("version type [force] may no longer be used", validationException);
|
validationException = addValidationError("version type [force] may no longer be used", validationException);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (ttl != null) {
|
|
||||||
if (ttl.millis() < 0) {
|
|
||||||
validationException = addValidationError("ttl must not be negative", validationException);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (id != null && id.getBytes(StandardCharsets.UTF_8).length > 512) {
|
if (id != null && id.getBytes(StandardCharsets.UTF_8).length > 512) {
|
||||||
validationException = addValidationError("id is too long, must be no longer than 512 bytes but was: " +
|
validationException = addValidationError("id is too long, must be no longer than 512 bytes but was: " +
|
||||||
id.getBytes(StandardCharsets.UTF_8).length, validationException);
|
id.getBytes(StandardCharsets.UTF_8).length, validationException);
|
||||||
|
@ -265,49 +254,6 @@ public class IndexRequest extends ReplicatedWriteRequest<IndexRequest> implement
|
||||||
return this.parent;
|
return this.parent;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* Sets the timestamp either as millis since the epoch, or, in the configured date format.
|
|
||||||
*/
|
|
||||||
public IndexRequest timestamp(String timestamp) {
|
|
||||||
this.timestamp = timestamp;
|
|
||||||
return this;
|
|
||||||
}
|
|
||||||
|
|
||||||
public String timestamp() {
|
|
||||||
return this.timestamp;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Sets the ttl value as a time value expression.
|
|
||||||
*/
|
|
||||||
public IndexRequest ttl(String ttl) {
|
|
||||||
this.ttl = TimeValue.parseTimeValue(ttl, null, "ttl");
|
|
||||||
return this;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Sets the ttl as a {@link TimeValue} instance.
|
|
||||||
*/
|
|
||||||
public IndexRequest ttl(TimeValue ttl) {
|
|
||||||
this.ttl = ttl;
|
|
||||||
return this;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Sets the relative ttl value in milliseconds. It musts be greater than 0 as it makes little sense otherwise.
|
|
||||||
*/
|
|
||||||
public IndexRequest ttl(long ttl) {
|
|
||||||
this.ttl = new TimeValue(ttl);
|
|
||||||
return this;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Returns the ttl as a {@link TimeValue}
|
|
||||||
*/
|
|
||||||
public TimeValue ttl() {
|
|
||||||
return this.ttl;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Sets the ingest pipeline to be executed before indexing the document
|
* Sets the ingest pipeline to be executed before indexing the document
|
||||||
*/
|
*/
|
||||||
|
@ -537,11 +483,6 @@ public class IndexRequest extends ReplicatedWriteRequest<IndexRequest> implement
|
||||||
|
|
||||||
|
|
||||||
public void process(@Nullable MappingMetaData mappingMd, boolean allowIdGeneration, String concreteIndex) {
|
public void process(@Nullable MappingMetaData mappingMd, boolean allowIdGeneration, String concreteIndex) {
|
||||||
// resolve timestamp if provided externally
|
|
||||||
if (timestamp != null) {
|
|
||||||
timestamp = MappingMetaData.Timestamp.parseStringTimestamp(timestamp,
|
|
||||||
mappingMd != null ? mappingMd.timestamp().dateTimeFormatter() : TimestampFieldMapper.Defaults.DATE_TIME_FORMATTER);
|
|
||||||
}
|
|
||||||
if (mappingMd != null) {
|
if (mappingMd != null) {
|
||||||
// might as well check for routing here
|
// might as well check for routing here
|
||||||
if (mappingMd.routing().required() && routing == null) {
|
if (mappingMd.routing().required() && routing == null) {
|
||||||
|
@ -563,30 +504,6 @@ public class IndexRequest extends ReplicatedWriteRequest<IndexRequest> implement
|
||||||
autoGeneratedTimestamp = Math.max(0, System.currentTimeMillis()); // extra paranoia
|
autoGeneratedTimestamp = Math.max(0, System.currentTimeMillis()); // extra paranoia
|
||||||
id(UUIDs.base64UUID());
|
id(UUIDs.base64UUID());
|
||||||
}
|
}
|
||||||
|
|
||||||
// generate timestamp if not provided, we always have one post this stage...
|
|
||||||
if (timestamp == null) {
|
|
||||||
String defaultTimestamp = TimestampFieldMapper.Defaults.DEFAULT_TIMESTAMP;
|
|
||||||
if (mappingMd != null && mappingMd.timestamp() != null) {
|
|
||||||
// If we explicitly ask to reject null timestamp
|
|
||||||
if (mappingMd.timestamp().ignoreMissing() != null && mappingMd.timestamp().ignoreMissing() == false) {
|
|
||||||
throw new TimestampParsingException("timestamp is required by mapping");
|
|
||||||
}
|
|
||||||
defaultTimestamp = mappingMd.timestamp().defaultTimestamp();
|
|
||||||
}
|
|
||||||
|
|
||||||
if (defaultTimestamp.equals(TimestampFieldMapper.Defaults.DEFAULT_TIMESTAMP)) {
|
|
||||||
timestamp = Long.toString(System.currentTimeMillis());
|
|
||||||
} else {
|
|
||||||
// if we are here, the defaultTimestamp is not
|
|
||||||
// TimestampFieldMapper.Defaults.DEFAULT_TIMESTAMP but
|
|
||||||
// this can only happen if defaultTimestamp was
|
|
||||||
// assigned again because mappingMd and
|
|
||||||
// mappingMd#timestamp() are not null
|
|
||||||
assert mappingMd != null;
|
|
||||||
timestamp = MappingMetaData.Timestamp.parseStringTimestamp(defaultTimestamp, mappingMd.timestamp().dateTimeFormatter());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* resolve the routing if needed */
|
/* resolve the routing if needed */
|
||||||
|
@ -601,8 +518,10 @@ public class IndexRequest extends ReplicatedWriteRequest<IndexRequest> implement
|
||||||
id = in.readOptionalString();
|
id = in.readOptionalString();
|
||||||
routing = in.readOptionalString();
|
routing = in.readOptionalString();
|
||||||
parent = in.readOptionalString();
|
parent = in.readOptionalString();
|
||||||
timestamp = in.readOptionalString();
|
if (in.getVersion().before(Version.V_6_0_0_alpha1_UNRELEASED)) {
|
||||||
ttl = in.readOptionalWriteable(TimeValue::new);
|
in.readOptionalString(); // timestamp
|
||||||
|
in.readOptionalWriteable(TimeValue::new); // ttl
|
||||||
|
}
|
||||||
source = in.readBytesReference();
|
source = in.readBytesReference();
|
||||||
opType = OpType.fromId(in.readByte());
|
opType = OpType.fromId(in.readByte());
|
||||||
version = in.readLong();
|
version = in.readLong();
|
||||||
|
@ -619,8 +538,10 @@ public class IndexRequest extends ReplicatedWriteRequest<IndexRequest> implement
|
||||||
out.writeOptionalString(id);
|
out.writeOptionalString(id);
|
||||||
out.writeOptionalString(routing);
|
out.writeOptionalString(routing);
|
||||||
out.writeOptionalString(parent);
|
out.writeOptionalString(parent);
|
||||||
out.writeOptionalString(timestamp);
|
if (out.getVersion().before(Version.V_6_0_0_alpha1_UNRELEASED)) {
|
||||||
out.writeOptionalWriteable(ttl);
|
out.writeOptionalString(null);
|
||||||
|
out.writeOptionalWriteable(null);
|
||||||
|
}
|
||||||
out.writeBytesReference(source);
|
out.writeBytesReference(source);
|
||||||
out.writeByte(opType.getId());
|
out.writeByte(opType.getId());
|
||||||
out.writeLong(version);
|
out.writeLong(version);
|
||||||
|
|
|
@ -25,7 +25,6 @@ import org.elasticsearch.action.support.replication.ReplicationRequestBuilder;
|
||||||
import org.elasticsearch.client.ElasticsearchClient;
|
import org.elasticsearch.client.ElasticsearchClient;
|
||||||
import org.elasticsearch.common.Nullable;
|
import org.elasticsearch.common.Nullable;
|
||||||
import org.elasticsearch.common.bytes.BytesReference;
|
import org.elasticsearch.common.bytes.BytesReference;
|
||||||
import org.elasticsearch.common.unit.TimeValue;
|
|
||||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||||
import org.elasticsearch.common.xcontent.XContentType;
|
import org.elasticsearch.common.xcontent.XContentType;
|
||||||
import org.elasticsearch.index.VersionType;
|
import org.elasticsearch.index.VersionType;
|
||||||
|
@ -231,38 +230,6 @@ public class IndexRequestBuilder extends ReplicationRequestBuilder<IndexRequest,
|
||||||
return this;
|
return this;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* Sets the timestamp either as millis since the epoch, or, in the configured date format.
|
|
||||||
*/
|
|
||||||
public IndexRequestBuilder setTimestamp(String timestamp) {
|
|
||||||
request.timestamp(timestamp);
|
|
||||||
return this;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Sets the ttl value as a time value expression.
|
|
||||||
*/
|
|
||||||
public IndexRequestBuilder setTTL(String ttl) {
|
|
||||||
request.ttl(ttl);
|
|
||||||
return this;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Sets the relative ttl value in milliseconds. It musts be greater than 0 as it makes little sense otherwise.
|
|
||||||
*/
|
|
||||||
public IndexRequestBuilder setTTL(long ttl) {
|
|
||||||
request.ttl(ttl);
|
|
||||||
return this;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Sets the ttl as a {@link TimeValue} instance.
|
|
||||||
*/
|
|
||||||
public IndexRequestBuilder setTTL(TimeValue ttl) {
|
|
||||||
request.ttl(ttl);
|
|
||||||
return this;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Sets the ingest pipeline to be executed before indexing the document
|
* Sets the ingest pipeline to be executed before indexing the document
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -171,7 +171,7 @@ public class TransportIndexAction extends TransportWriteAction<IndexRequest, Ind
|
||||||
public static Engine.IndexResult executeIndexRequestOnReplica(IndexRequest request, IndexShard replica) {
|
public static Engine.IndexResult executeIndexRequestOnReplica(IndexRequest request, IndexShard replica) {
|
||||||
final ShardId shardId = replica.shardId();
|
final ShardId shardId = replica.shardId();
|
||||||
SourceToParse sourceToParse = SourceToParse.source(SourceToParse.Origin.REPLICA, shardId.getIndexName(), request.type(), request.id(), request.source())
|
SourceToParse sourceToParse = SourceToParse.source(SourceToParse.Origin.REPLICA, shardId.getIndexName(), request.type(), request.id(), request.source())
|
||||||
.routing(request.routing()).parent(request.parent()).timestamp(request.timestamp()).ttl(request.ttl());
|
.routing(request.routing()).parent(request.parent());
|
||||||
|
|
||||||
final Engine.Index operation;
|
final Engine.Index operation;
|
||||||
try {
|
try {
|
||||||
|
@ -189,7 +189,7 @@ public class TransportIndexAction extends TransportWriteAction<IndexRequest, Ind
|
||||||
/** Utility method to prepare an index operation on primary shards */
|
/** Utility method to prepare an index operation on primary shards */
|
||||||
static Engine.Index prepareIndexOperationOnPrimary(IndexRequest request, IndexShard primary) {
|
static Engine.Index prepareIndexOperationOnPrimary(IndexRequest request, IndexShard primary) {
|
||||||
SourceToParse sourceToParse = SourceToParse.source(SourceToParse.Origin.PRIMARY, request.index(), request.type(), request.id(), request.source())
|
SourceToParse sourceToParse = SourceToParse.source(SourceToParse.Origin.PRIMARY, request.index(), request.type(), request.id(), request.source())
|
||||||
.routing(request.routing()).parent(request.parent()).timestamp(request.timestamp()).ttl(request.ttl());
|
.routing(request.routing()).parent(request.parent());
|
||||||
return primary.prepareIndexOnPrimary(sourceToParse, request.version(), request.versionType(), request.getAutoGeneratedTimestamp(), request.isRetry());
|
return primary.prepareIndexOnPrimary(sourceToParse, request.version(), request.versionType(), request.getAutoGeneratedTimestamp(), request.isRetry());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -156,8 +156,6 @@ public class SimulatePipelineRequest extends ActionRequest {
|
||||||
ConfigurationUtils.readStringProperty(null, null, dataMap, MetaData.ID.getFieldName(), "_id"),
|
ConfigurationUtils.readStringProperty(null, null, dataMap, MetaData.ID.getFieldName(), "_id"),
|
||||||
ConfigurationUtils.readOptionalStringProperty(null, null, dataMap, MetaData.ROUTING.getFieldName()),
|
ConfigurationUtils.readOptionalStringProperty(null, null, dataMap, MetaData.ROUTING.getFieldName()),
|
||||||
ConfigurationUtils.readOptionalStringProperty(null, null, dataMap, MetaData.PARENT.getFieldName()),
|
ConfigurationUtils.readOptionalStringProperty(null, null, dataMap, MetaData.PARENT.getFieldName()),
|
||||||
ConfigurationUtils.readOptionalStringProperty(null, null, dataMap, MetaData.TIMESTAMP.getFieldName()),
|
|
||||||
ConfigurationUtils.readOptionalStringProperty(null, null, dataMap, MetaData.TTL.getFieldName()),
|
|
||||||
document);
|
document);
|
||||||
ingestDocumentList.add(ingestDocument);
|
ingestDocumentList.add(ingestDocument);
|
||||||
}
|
}
|
||||||
|
|
|
@ -22,7 +22,6 @@ package org.elasticsearch.action.search;
|
||||||
import org.elasticsearch.action.ActionRequest;
|
import org.elasticsearch.action.ActionRequest;
|
||||||
import org.elasticsearch.action.ActionRequestValidationException;
|
import org.elasticsearch.action.ActionRequestValidationException;
|
||||||
import org.elasticsearch.action.CompositeIndicesRequest;
|
import org.elasticsearch.action.CompositeIndicesRequest;
|
||||||
import org.elasticsearch.action.IndicesRequest;
|
|
||||||
import org.elasticsearch.action.support.IndicesOptions;
|
import org.elasticsearch.action.support.IndicesOptions;
|
||||||
import org.elasticsearch.common.io.stream.StreamInput;
|
import org.elasticsearch.common.io.stream.StreamInput;
|
||||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||||
|
@ -84,11 +83,6 @@ public class MultiSearchRequest extends ActionRequest implements CompositeIndice
|
||||||
return this.requests;
|
return this.requests;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
|
||||||
public List<? extends IndicesRequest> subRequests() {
|
|
||||||
return this.requests;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public ActionRequestValidationException validate() {
|
public ActionRequestValidationException validate() {
|
||||||
ActionRequestValidationException validationException = null;
|
ActionRequestValidationException validationException = null;
|
||||||
|
|
|
@ -23,7 +23,6 @@ import org.elasticsearch.ElasticsearchParseException;
|
||||||
import org.elasticsearch.action.ActionRequest;
|
import org.elasticsearch.action.ActionRequest;
|
||||||
import org.elasticsearch.action.ActionRequestValidationException;
|
import org.elasticsearch.action.ActionRequestValidationException;
|
||||||
import org.elasticsearch.action.CompositeIndicesRequest;
|
import org.elasticsearch.action.CompositeIndicesRequest;
|
||||||
import org.elasticsearch.action.IndicesRequest;
|
|
||||||
import org.elasticsearch.action.RealtimeRequest;
|
import org.elasticsearch.action.RealtimeRequest;
|
||||||
import org.elasticsearch.action.ValidateActions;
|
import org.elasticsearch.action.ValidateActions;
|
||||||
import org.elasticsearch.common.Nullable;
|
import org.elasticsearch.common.Nullable;
|
||||||
|
@ -76,11 +75,6 @@ public class MultiTermVectorsRequest extends ActionRequest implements Iterable<T
|
||||||
return validationException;
|
return validationException;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
|
||||||
public List<? extends IndicesRequest> subRequests() {
|
|
||||||
return requests;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public Iterator<TermVectorsRequest> iterator() {
|
public Iterator<TermVectorsRequest> iterator() {
|
||||||
return Collections.unmodifiableCollection(requests).iterator();
|
return Collections.unmodifiableCollection(requests).iterator();
|
||||||
|
|
|
@ -31,7 +31,6 @@ import org.elasticsearch.common.component.AbstractComponent;
|
||||||
import org.elasticsearch.common.io.stream.BytesStreamOutput;
|
import org.elasticsearch.common.io.stream.BytesStreamOutput;
|
||||||
import org.elasticsearch.common.io.stream.Streamable;
|
import org.elasticsearch.common.io.stream.Streamable;
|
||||||
import org.elasticsearch.common.settings.Settings;
|
import org.elasticsearch.common.settings.Settings;
|
||||||
import org.elasticsearch.common.unit.TimeValue;
|
|
||||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||||
import org.elasticsearch.common.xcontent.XContentHelper;
|
import org.elasticsearch.common.xcontent.XContentHelper;
|
||||||
import org.elasticsearch.common.xcontent.XContentType;
|
import org.elasticsearch.common.xcontent.XContentType;
|
||||||
|
@ -42,8 +41,6 @@ import org.elasticsearch.index.get.GetField;
|
||||||
import org.elasticsearch.index.get.GetResult;
|
import org.elasticsearch.index.get.GetResult;
|
||||||
import org.elasticsearch.index.mapper.ParentFieldMapper;
|
import org.elasticsearch.index.mapper.ParentFieldMapper;
|
||||||
import org.elasticsearch.index.mapper.RoutingFieldMapper;
|
import org.elasticsearch.index.mapper.RoutingFieldMapper;
|
||||||
import org.elasticsearch.index.mapper.TTLFieldMapper;
|
|
||||||
import org.elasticsearch.index.mapper.TimestampFieldMapper;
|
|
||||||
import org.elasticsearch.index.shard.IndexShard;
|
import org.elasticsearch.index.shard.IndexShard;
|
||||||
import org.elasticsearch.index.shard.ShardId;
|
import org.elasticsearch.index.shard.ShardId;
|
||||||
import org.elasticsearch.script.ExecutableScript;
|
import org.elasticsearch.script.ExecutableScript;
|
||||||
|
@ -55,7 +52,6 @@ import org.elasticsearch.search.lookup.SourceLookup;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
import java.util.Collections;
|
|
||||||
import java.util.HashMap;
|
import java.util.HashMap;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
import java.util.function.LongSupplier;
|
import java.util.function.LongSupplier;
|
||||||
|
@ -76,7 +72,7 @@ public class UpdateHelper extends AbstractComponent {
|
||||||
*/
|
*/
|
||||||
public Result prepare(UpdateRequest request, IndexShard indexShard, LongSupplier nowInMillis) {
|
public Result prepare(UpdateRequest request, IndexShard indexShard, LongSupplier nowInMillis) {
|
||||||
final GetResult getResult = indexShard.getService().get(request.type(), request.id(),
|
final GetResult getResult = indexShard.getService().get(request.type(), request.id(),
|
||||||
new String[]{RoutingFieldMapper.NAME, ParentFieldMapper.NAME, TTLFieldMapper.NAME, TimestampFieldMapper.NAME},
|
new String[]{RoutingFieldMapper.NAME, ParentFieldMapper.NAME},
|
||||||
true, request.version(), request.versionType(), FetchSourceContext.FETCH_SOURCE);
|
true, request.version(), request.versionType(), FetchSourceContext.FETCH_SOURCE);
|
||||||
return prepare(indexShard.shardId(), request, getResult, nowInMillis);
|
return prepare(indexShard.shardId(), request, getResult, nowInMillis);
|
||||||
}
|
}
|
||||||
|
@ -86,13 +82,11 @@ public class UpdateHelper extends AbstractComponent {
|
||||||
*/
|
*/
|
||||||
@SuppressWarnings("unchecked")
|
@SuppressWarnings("unchecked")
|
||||||
protected Result prepare(ShardId shardId, UpdateRequest request, final GetResult getResult, LongSupplier nowInMillis) {
|
protected Result prepare(ShardId shardId, UpdateRequest request, final GetResult getResult, LongSupplier nowInMillis) {
|
||||||
long getDateNS = System.nanoTime();
|
|
||||||
if (!getResult.isExists()) {
|
if (!getResult.isExists()) {
|
||||||
if (request.upsertRequest() == null && !request.docAsUpsert()) {
|
if (request.upsertRequest() == null && !request.docAsUpsert()) {
|
||||||
throw new DocumentMissingException(shardId, request.type(), request.id());
|
throw new DocumentMissingException(shardId, request.type(), request.id());
|
||||||
}
|
}
|
||||||
IndexRequest indexRequest = request.docAsUpsert() ? request.doc() : request.upsertRequest();
|
IndexRequest indexRequest = request.docAsUpsert() ? request.doc() : request.upsertRequest();
|
||||||
TimeValue ttl = indexRequest.ttl();
|
|
||||||
if (request.scriptedUpsert() && request.script() != null) {
|
if (request.scriptedUpsert() && request.script() != null) {
|
||||||
// Run the script to perform the create logic
|
// Run the script to perform the create logic
|
||||||
IndexRequest upsert = request.upsertRequest();
|
IndexRequest upsert = request.upsertRequest();
|
||||||
|
@ -103,10 +97,6 @@ public class UpdateHelper extends AbstractComponent {
|
||||||
ctx.put("_source", upsertDoc);
|
ctx.put("_source", upsertDoc);
|
||||||
ctx.put("_now", nowInMillis.getAsLong());
|
ctx.put("_now", nowInMillis.getAsLong());
|
||||||
ctx = executeScript(request.script, ctx);
|
ctx = executeScript(request.script, ctx);
|
||||||
//Allow the script to set TTL using ctx._ttl
|
|
||||||
if (ttl == null) {
|
|
||||||
ttl = getTTLFromScriptContext(ctx);
|
|
||||||
}
|
|
||||||
|
|
||||||
//Allow the script to abort the create by setting "op" to "none"
|
//Allow the script to abort the create by setting "op" to "none"
|
||||||
String scriptOpChoice = (String) ctx.get("op");
|
String scriptOpChoice = (String) ctx.get("op");
|
||||||
|
@ -129,7 +119,6 @@ public class UpdateHelper extends AbstractComponent {
|
||||||
indexRequest.index(request.index()).type(request.type()).id(request.id())
|
indexRequest.index(request.index()).type(request.type()).id(request.id())
|
||||||
// it has to be a "create!"
|
// it has to be a "create!"
|
||||||
.create(true)
|
.create(true)
|
||||||
.ttl(ttl)
|
|
||||||
.setRefreshPolicy(request.getRefreshPolicy())
|
.setRefreshPolicy(request.getRefreshPolicy())
|
||||||
.routing(request.routing())
|
.routing(request.routing())
|
||||||
.parent(request.parent())
|
.parent(request.parent())
|
||||||
|
@ -155,8 +144,6 @@ public class UpdateHelper extends AbstractComponent {
|
||||||
|
|
||||||
Tuple<XContentType, Map<String, Object>> sourceAndContent = XContentHelper.convertToMap(getResult.internalSourceRef(), true);
|
Tuple<XContentType, Map<String, Object>> sourceAndContent = XContentHelper.convertToMap(getResult.internalSourceRef(), true);
|
||||||
String operation = null;
|
String operation = null;
|
||||||
String timestamp = null;
|
|
||||||
TimeValue ttl = null;
|
|
||||||
final Map<String, Object> updatedSourceAsMap;
|
final Map<String, Object> updatedSourceAsMap;
|
||||||
final XContentType updateSourceContentType = sourceAndContent.v1();
|
final XContentType updateSourceContentType = sourceAndContent.v1();
|
||||||
String routing = getResult.getFields().containsKey(RoutingFieldMapper.NAME) ? getResult.field(RoutingFieldMapper.NAME).getValue().toString() : null;
|
String routing = getResult.getFields().containsKey(RoutingFieldMapper.NAME) ? getResult.field(RoutingFieldMapper.NAME).getValue().toString() : null;
|
||||||
|
@ -165,10 +152,6 @@ public class UpdateHelper extends AbstractComponent {
|
||||||
if (request.script() == null && request.doc() != null) {
|
if (request.script() == null && request.doc() != null) {
|
||||||
IndexRequest indexRequest = request.doc();
|
IndexRequest indexRequest = request.doc();
|
||||||
updatedSourceAsMap = sourceAndContent.v2();
|
updatedSourceAsMap = sourceAndContent.v2();
|
||||||
if (indexRequest.ttl() != null) {
|
|
||||||
ttl = indexRequest.ttl();
|
|
||||||
}
|
|
||||||
timestamp = indexRequest.timestamp();
|
|
||||||
if (indexRequest.routing() != null) {
|
if (indexRequest.routing() != null) {
|
||||||
routing = indexRequest.routing();
|
routing = indexRequest.routing();
|
||||||
}
|
}
|
||||||
|
@ -184,16 +167,12 @@ public class UpdateHelper extends AbstractComponent {
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
Map<String, Object> ctx = new HashMap<>(16);
|
Map<String, Object> ctx = new HashMap<>(16);
|
||||||
Long originalTtl = getResult.getFields().containsKey(TTLFieldMapper.NAME) ? (Long) getResult.field(TTLFieldMapper.NAME).getValue() : null;
|
|
||||||
Long originalTimestamp = getResult.getFields().containsKey(TimestampFieldMapper.NAME) ? (Long) getResult.field(TimestampFieldMapper.NAME).getValue() : null;
|
|
||||||
ctx.put("_index", getResult.getIndex());
|
ctx.put("_index", getResult.getIndex());
|
||||||
ctx.put("_type", getResult.getType());
|
ctx.put("_type", getResult.getType());
|
||||||
ctx.put("_id", getResult.getId());
|
ctx.put("_id", getResult.getId());
|
||||||
ctx.put("_version", getResult.getVersion());
|
ctx.put("_version", getResult.getVersion());
|
||||||
ctx.put("_routing", routing);
|
ctx.put("_routing", routing);
|
||||||
ctx.put("_parent", parent);
|
ctx.put("_parent", parent);
|
||||||
ctx.put("_timestamp", originalTimestamp);
|
|
||||||
ctx.put("_ttl", originalTtl);
|
|
||||||
ctx.put("_source", sourceAndContent.v2());
|
ctx.put("_source", sourceAndContent.v2());
|
||||||
ctx.put("_now", nowInMillis.getAsLong());
|
ctx.put("_now", nowInMillis.getAsLong());
|
||||||
|
|
||||||
|
@ -201,34 +180,14 @@ public class UpdateHelper extends AbstractComponent {
|
||||||
|
|
||||||
operation = (String) ctx.get("op");
|
operation = (String) ctx.get("op");
|
||||||
|
|
||||||
Object fetchedTimestamp = ctx.get("_timestamp");
|
|
||||||
if (fetchedTimestamp != null) {
|
|
||||||
timestamp = fetchedTimestamp.toString();
|
|
||||||
} else if (originalTimestamp != null) {
|
|
||||||
// No timestamp has been given in the update script, so we keep the previous timestamp if there is one
|
|
||||||
timestamp = originalTimestamp.toString();
|
|
||||||
}
|
|
||||||
|
|
||||||
ttl = getTTLFromScriptContext(ctx);
|
|
||||||
|
|
||||||
updatedSourceAsMap = (Map<String, Object>) ctx.get("_source");
|
updatedSourceAsMap = (Map<String, Object>) ctx.get("_source");
|
||||||
}
|
}
|
||||||
|
|
||||||
// apply script to update the source
|
|
||||||
// No TTL has been given in the update script so we keep previous TTL value if there is one
|
|
||||||
if (ttl == null) {
|
|
||||||
Long ttlAsLong = getResult.getFields().containsKey(TTLFieldMapper.NAME) ? (Long) getResult.field(TTLFieldMapper.NAME).getValue() : null;
|
|
||||||
if (ttlAsLong != null) {
|
|
||||||
ttl = new TimeValue(ttlAsLong - TimeValue.nsecToMSec(System.nanoTime() - getDateNS));// It is an approximation of exact TTL value, could be improved
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (operation == null || "index".equals(operation)) {
|
if (operation == null || "index".equals(operation)) {
|
||||||
final IndexRequest indexRequest = Requests.indexRequest(request.index()).type(request.type()).id(request.id()).routing(routing).parent(parent)
|
final IndexRequest indexRequest = Requests.indexRequest(request.index()).type(request.type()).id(request.id()).routing(routing).parent(parent)
|
||||||
.source(updatedSourceAsMap, updateSourceContentType)
|
.source(updatedSourceAsMap, updateSourceContentType)
|
||||||
.version(updateVersion).versionType(request.versionType())
|
.version(updateVersion).versionType(request.versionType())
|
||||||
.waitForActiveShards(request.waitForActiveShards())
|
.waitForActiveShards(request.waitForActiveShards())
|
||||||
.timestamp(timestamp).ttl(ttl)
|
|
||||||
.setRefreshPolicy(request.getRefreshPolicy());
|
.setRefreshPolicy(request.getRefreshPolicy());
|
||||||
return new Result(indexRequest, DocWriteResponse.Result.UPDATED, updatedSourceAsMap, updateSourceContentType);
|
return new Result(indexRequest, DocWriteResponse.Result.UPDATED, updatedSourceAsMap, updateSourceContentType);
|
||||||
} else if ("delete".equals(operation)) {
|
} else if ("delete".equals(operation)) {
|
||||||
|
@ -263,17 +222,6 @@ public class UpdateHelper extends AbstractComponent {
|
||||||
return ctx;
|
return ctx;
|
||||||
}
|
}
|
||||||
|
|
||||||
private TimeValue getTTLFromScriptContext(Map<String, Object> ctx) {
|
|
||||||
Object fetchedTTL = ctx.get("_ttl");
|
|
||||||
if (fetchedTTL != null) {
|
|
||||||
if (fetchedTTL instanceof Number) {
|
|
||||||
return new TimeValue(((Number) fetchedTTL).longValue());
|
|
||||||
}
|
|
||||||
return TimeValue.parseTimeValue((String) fetchedTTL, null, "_ttl");
|
|
||||||
}
|
|
||||||
return null;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Applies {@link UpdateRequest#fetchSource()} to the _source of the updated document to be returned in a update response.
|
* Applies {@link UpdateRequest#fetchSource()} to the _source of the updated document to be returned in a update response.
|
||||||
* For BWC this function also extracts the {@link UpdateRequest#fields()} from the updated document to be returned in a update response
|
* For BWC this function also extracts the {@link UpdateRequest#fields()} from the updated document to be returned in a update response
|
||||||
|
|
|
@ -28,7 +28,6 @@ import org.elasticsearch.client.ElasticsearchClient;
|
||||||
import org.elasticsearch.common.Nullable;
|
import org.elasticsearch.common.Nullable;
|
||||||
import org.elasticsearch.common.logging.DeprecationLogger;
|
import org.elasticsearch.common.logging.DeprecationLogger;
|
||||||
import org.elasticsearch.common.logging.Loggers;
|
import org.elasticsearch.common.logging.Loggers;
|
||||||
import org.elasticsearch.common.unit.TimeValue;
|
|
||||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||||
import org.elasticsearch.common.xcontent.XContentType;
|
import org.elasticsearch.common.xcontent.XContentType;
|
||||||
import org.elasticsearch.index.VersionType;
|
import org.elasticsearch.index.VersionType;
|
||||||
|
@ -355,33 +354,4 @@ public class UpdateRequestBuilder extends InstanceShardOperationRequestBuilder<U
|
||||||
return this;
|
return this;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* Set the new ttl of the document as a long. Note that if detectNoop is true (the default)
|
|
||||||
* and the source of the document isn't changed then the ttl update won't take
|
|
||||||
* effect.
|
|
||||||
*/
|
|
||||||
public UpdateRequestBuilder setTtl(Long ttl) {
|
|
||||||
request.doc().ttl(ttl);
|
|
||||||
return this;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Set the new ttl of the document as a time value expression. Note that if detectNoop is true (the default)
|
|
||||||
* and the source of the document isn't changed then the ttl update won't take
|
|
||||||
* effect.
|
|
||||||
*/
|
|
||||||
public UpdateRequestBuilder setTtl(String ttl) {
|
|
||||||
request.doc().ttl(ttl);
|
|
||||||
return this;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Set the new ttl of the document as a {@link TimeValue} instance. Note that if detectNoop is true (the default)
|
|
||||||
* and the source of the document isn't changed then the ttl update won't take
|
|
||||||
* effect.
|
|
||||||
*/
|
|
||||||
public UpdateRequestBuilder setTtl(TimeValue ttl) {
|
|
||||||
request.doc().ttl(ttl);
|
|
||||||
return this;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -42,6 +42,7 @@ import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
|
||||||
import org.elasticsearch.common.util.concurrent.FutureUtils;
|
import org.elasticsearch.common.util.concurrent.FutureUtils;
|
||||||
import org.elasticsearch.threadpool.ThreadPool;
|
import org.elasticsearch.threadpool.ThreadPool;
|
||||||
import org.elasticsearch.transport.ConnectTransportException;
|
import org.elasticsearch.transport.ConnectTransportException;
|
||||||
|
import org.elasticsearch.transport.ConnectionProfile;
|
||||||
import org.elasticsearch.transport.FutureTransportResponseHandler;
|
import org.elasticsearch.transport.FutureTransportResponseHandler;
|
||||||
import org.elasticsearch.transport.NodeDisconnectedException;
|
import org.elasticsearch.transport.NodeDisconnectedException;
|
||||||
import org.elasticsearch.transport.NodeNotConnectedException;
|
import org.elasticsearch.transport.NodeNotConnectedException;
|
||||||
|
@ -389,9 +390,9 @@ final class TransportClientNodesService extends AbstractComponent implements Clo
|
||||||
try {
|
try {
|
||||||
// its a listed node, light connect to it...
|
// its a listed node, light connect to it...
|
||||||
logger.trace("connecting to listed node (light) [{}]", listedNode);
|
logger.trace("connecting to listed node (light) [{}]", listedNode);
|
||||||
transportService.connectToNodeLight(listedNode);
|
transportService.connectToNode(listedNode, ConnectionProfile.LIGHT_PROFILE);
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
logger.debug(
|
logger.info(
|
||||||
(Supplier<?>)
|
(Supplier<?>)
|
||||||
() -> new ParameterizedMessage("failed to connect to node [{}], removed from nodes list", listedNode), e);
|
() -> new ParameterizedMessage("failed to connect to node [{}], removed from nodes list", listedNode), e);
|
||||||
hostFailureListener.onNodeDisconnected(listedNode, e);
|
hostFailureListener.onNodeDisconnected(listedNode, e);
|
||||||
|
@ -469,7 +470,7 @@ final class TransportClientNodesService extends AbstractComponent implements Clo
|
||||||
} else {
|
} else {
|
||||||
// its a listed node, light connect to it...
|
// its a listed node, light connect to it...
|
||||||
logger.trace("connecting to listed node (light) [{}]", listedNode);
|
logger.trace("connecting to listed node (light) [{}]", listedNode);
|
||||||
transportService.connectToNodeLight(listedNode);
|
transportService.connectToNode(listedNode, ConnectionProfile.LIGHT_PROFILE);
|
||||||
}
|
}
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
logger.debug(
|
logger.debug(
|
||||||
|
|
|
@ -224,12 +224,15 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
|
||||||
Setting.boolSetting(SETTING_SHARED_FS_ALLOW_RECOVERY_ON_ANY_NODE, false, Property.Dynamic, Property.IndexScope);
|
Setting.boolSetting(SETTING_SHARED_FS_ALLOW_RECOVERY_ON_ANY_NODE, false, Property.Dynamic, Property.IndexScope);
|
||||||
public static final String INDEX_UUID_NA_VALUE = "_na_";
|
public static final String INDEX_UUID_NA_VALUE = "_na_";
|
||||||
|
|
||||||
|
public static final String INDEX_ROUTING_REQUIRE_GROUP_PREFIX = "index.routing.allocation.require";
|
||||||
|
public static final String INDEX_ROUTING_INCLUDE_GROUP_PREFIX = "index.routing.allocation.include";
|
||||||
|
public static final String INDEX_ROUTING_EXCLUDE_GROUP_PREFIX = "index.routing.allocation.exclude";
|
||||||
public static final Setting<Settings> INDEX_ROUTING_REQUIRE_GROUP_SETTING =
|
public static final Setting<Settings> INDEX_ROUTING_REQUIRE_GROUP_SETTING =
|
||||||
Setting.groupSetting("index.routing.allocation.require.", Property.Dynamic, Property.IndexScope);
|
Setting.groupSetting(INDEX_ROUTING_REQUIRE_GROUP_PREFIX + ".", Property.Dynamic, Property.IndexScope);
|
||||||
public static final Setting<Settings> INDEX_ROUTING_INCLUDE_GROUP_SETTING =
|
public static final Setting<Settings> INDEX_ROUTING_INCLUDE_GROUP_SETTING =
|
||||||
Setting.groupSetting("index.routing.allocation.include.", Property.Dynamic, Property.IndexScope);
|
Setting.groupSetting(INDEX_ROUTING_INCLUDE_GROUP_PREFIX + ".", Property.Dynamic, Property.IndexScope);
|
||||||
public static final Setting<Settings> INDEX_ROUTING_EXCLUDE_GROUP_SETTING =
|
public static final Setting<Settings> INDEX_ROUTING_EXCLUDE_GROUP_SETTING =
|
||||||
Setting.groupSetting("index.routing.allocation.exclude.", Property.Dynamic, Property.IndexScope);
|
Setting.groupSetting(INDEX_ROUTING_EXCLUDE_GROUP_PREFIX + ".", Property.Dynamic, Property.IndexScope);
|
||||||
public static final Setting<Settings> INDEX_ROUTING_INITIAL_RECOVERY_GROUP_SETTING =
|
public static final Setting<Settings> INDEX_ROUTING_INITIAL_RECOVERY_GROUP_SETTING =
|
||||||
Setting.groupSetting("index.routing.allocation.initial_recovery."); // this is only setable internally not a registered setting!!
|
Setting.groupSetting("index.routing.allocation.initial_recovery."); // this is only setable internally not a registered setting!!
|
||||||
|
|
||||||
|
|
|
@ -19,19 +19,17 @@
|
||||||
|
|
||||||
package org.elasticsearch.cluster.metadata;
|
package org.elasticsearch.cluster.metadata;
|
||||||
|
|
||||||
import org.elasticsearch.action.TimestampParsingException;
|
import org.elasticsearch.Version;
|
||||||
import org.elasticsearch.cluster.AbstractDiffable;
|
import org.elasticsearch.cluster.AbstractDiffable;
|
||||||
import org.elasticsearch.common.compress.CompressedXContent;
|
import org.elasticsearch.common.compress.CompressedXContent;
|
||||||
import org.elasticsearch.common.io.stream.StreamInput;
|
import org.elasticsearch.common.io.stream.StreamInput;
|
||||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||||
import org.elasticsearch.common.joda.FormatDateTimeFormatter;
|
|
||||||
import org.elasticsearch.common.joda.Joda;
|
|
||||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||||
import org.elasticsearch.common.xcontent.XContentFactory;
|
import org.elasticsearch.common.xcontent.XContentFactory;
|
||||||
import org.elasticsearch.common.xcontent.XContentHelper;
|
import org.elasticsearch.common.xcontent.XContentHelper;
|
||||||
import org.elasticsearch.common.xcontent.XContentParser;
|
import org.elasticsearch.common.xcontent.XContentParser;
|
||||||
|
import org.elasticsearch.index.mapper.DateFieldMapper;
|
||||||
import org.elasticsearch.index.mapper.DocumentMapper;
|
import org.elasticsearch.index.mapper.DocumentMapper;
|
||||||
import org.elasticsearch.index.mapper.TimestampFieldMapper;
|
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
|
@ -75,103 +73,17 @@ public class MappingMetaData extends AbstractDiffable<MappingMetaData> {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
public static class Timestamp {
|
|
||||||
|
|
||||||
public static String parseStringTimestamp(String timestampAsString, FormatDateTimeFormatter dateTimeFormatter) throws TimestampParsingException {
|
|
||||||
try {
|
|
||||||
return Long.toString(dateTimeFormatter.parser().parseMillis(timestampAsString));
|
|
||||||
} catch (RuntimeException e) {
|
|
||||||
throw new TimestampParsingException(timestampAsString, e);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
public static final Timestamp EMPTY = new Timestamp(false, TimestampFieldMapper.DEFAULT_DATE_TIME_FORMAT,
|
|
||||||
TimestampFieldMapper.Defaults.DEFAULT_TIMESTAMP, null);
|
|
||||||
|
|
||||||
private final boolean enabled;
|
|
||||||
|
|
||||||
private final String format;
|
|
||||||
|
|
||||||
private final FormatDateTimeFormatter dateTimeFormatter;
|
|
||||||
|
|
||||||
private final String defaultTimestamp;
|
|
||||||
|
|
||||||
private final Boolean ignoreMissing;
|
|
||||||
|
|
||||||
public Timestamp(boolean enabled, String format, String defaultTimestamp, Boolean ignoreMissing) {
|
|
||||||
this.enabled = enabled;
|
|
||||||
this.format = format;
|
|
||||||
this.dateTimeFormatter = Joda.forPattern(format);
|
|
||||||
this.defaultTimestamp = defaultTimestamp;
|
|
||||||
this.ignoreMissing = ignoreMissing;
|
|
||||||
}
|
|
||||||
|
|
||||||
public boolean enabled() {
|
|
||||||
return enabled;
|
|
||||||
}
|
|
||||||
|
|
||||||
public String format() {
|
|
||||||
return this.format;
|
|
||||||
}
|
|
||||||
|
|
||||||
public String defaultTimestamp() {
|
|
||||||
return this.defaultTimestamp;
|
|
||||||
}
|
|
||||||
|
|
||||||
public boolean hasDefaultTimestamp() {
|
|
||||||
return this.defaultTimestamp != null;
|
|
||||||
}
|
|
||||||
|
|
||||||
public Boolean ignoreMissing() {
|
|
||||||
return ignoreMissing;
|
|
||||||
}
|
|
||||||
|
|
||||||
public FormatDateTimeFormatter dateTimeFormatter() {
|
|
||||||
return this.dateTimeFormatter;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public boolean equals(Object o) {
|
|
||||||
if (this == o) return true;
|
|
||||||
if (o == null || getClass() != o.getClass()) return false;
|
|
||||||
|
|
||||||
Timestamp timestamp = (Timestamp) o;
|
|
||||||
|
|
||||||
if (enabled != timestamp.enabled) return false;
|
|
||||||
if (format != null ? !format.equals(timestamp.format) : timestamp.format != null) return false;
|
|
||||||
if (defaultTimestamp != null ? !defaultTimestamp.equals(timestamp.defaultTimestamp) : timestamp.defaultTimestamp != null) return false;
|
|
||||||
if (ignoreMissing != null ? !ignoreMissing.equals(timestamp.ignoreMissing) : timestamp.ignoreMissing != null) return false;
|
|
||||||
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public int hashCode() {
|
|
||||||
int result = (enabled ? 1 : 0);
|
|
||||||
result = 31 * result + (format != null ? format.hashCode() : 0);
|
|
||||||
result = 31 * result + (dateTimeFormatter != null ? dateTimeFormatter.hashCode() : 0);
|
|
||||||
result = 31 * result + (defaultTimestamp != null ? defaultTimestamp.hashCode() : 0);
|
|
||||||
result = 31 * result + (ignoreMissing != null ? ignoreMissing.hashCode() : 0);
|
|
||||||
return result;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
private final String type;
|
private final String type;
|
||||||
|
|
||||||
private final CompressedXContent source;
|
private final CompressedXContent source;
|
||||||
|
|
||||||
private Routing routing;
|
private Routing routing;
|
||||||
private Timestamp timestamp;
|
|
||||||
private boolean hasParentField;
|
private boolean hasParentField;
|
||||||
|
|
||||||
public MappingMetaData(DocumentMapper docMapper) {
|
public MappingMetaData(DocumentMapper docMapper) {
|
||||||
this.type = docMapper.type();
|
this.type = docMapper.type();
|
||||||
this.source = docMapper.mappingSource();
|
this.source = docMapper.mappingSource();
|
||||||
this.routing = new Routing(docMapper.routingFieldMapper().required());
|
this.routing = new Routing(docMapper.routingFieldMapper().required());
|
||||||
this.timestamp = new Timestamp(docMapper.timestampFieldMapper().enabled(),
|
|
||||||
docMapper.timestampFieldMapper().fieldType().dateTimeFormatter().format(), docMapper.timestampFieldMapper().defaultTimestamp(),
|
|
||||||
docMapper.timestampFieldMapper().ignoreMissing());
|
|
||||||
this.hasParentField = docMapper.parentFieldMapper().active();
|
this.hasParentField = docMapper.parentFieldMapper().active();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -227,29 +139,6 @@ public class MappingMetaData extends AbstractDiffable<MappingMetaData> {
|
||||||
} else {
|
} else {
|
||||||
this.routing = Routing.EMPTY;
|
this.routing = Routing.EMPTY;
|
||||||
}
|
}
|
||||||
if (withoutType.containsKey("_timestamp")) {
|
|
||||||
boolean enabled = false;
|
|
||||||
String format = TimestampFieldMapper.DEFAULT_DATE_TIME_FORMAT;
|
|
||||||
String defaultTimestamp = TimestampFieldMapper.Defaults.DEFAULT_TIMESTAMP;
|
|
||||||
Boolean ignoreMissing = null;
|
|
||||||
Map<String, Object> timestampNode = (Map<String, Object>) withoutType.get("_timestamp");
|
|
||||||
for (Map.Entry<String, Object> entry : timestampNode.entrySet()) {
|
|
||||||
String fieldName = entry.getKey();
|
|
||||||
Object fieldNode = entry.getValue();
|
|
||||||
if (fieldName.equals("enabled")) {
|
|
||||||
enabled = lenientNodeBooleanValue(fieldNode);
|
|
||||||
} else if (fieldName.equals("format")) {
|
|
||||||
format = fieldNode.toString();
|
|
||||||
} else if (fieldName.equals("default") && fieldNode != null) {
|
|
||||||
defaultTimestamp = fieldNode.toString();
|
|
||||||
} else if (fieldName.equals("ignore_missing")) {
|
|
||||||
ignoreMissing = lenientNodeBooleanValue(fieldNode);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
this.timestamp = new Timestamp(enabled, format, defaultTimestamp, ignoreMissing);
|
|
||||||
} else {
|
|
||||||
this.timestamp = Timestamp.EMPTY;
|
|
||||||
}
|
|
||||||
if (withoutType.containsKey("_parent")) {
|
if (withoutType.containsKey("_parent")) {
|
||||||
this.hasParentField = true;
|
this.hasParentField = true;
|
||||||
} else {
|
} else {
|
||||||
|
@ -257,11 +146,10 @@ public class MappingMetaData extends AbstractDiffable<MappingMetaData> {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
public MappingMetaData(String type, CompressedXContent source, Routing routing, Timestamp timestamp, boolean hasParentField) {
|
public MappingMetaData(String type, CompressedXContent source, Routing routing, boolean hasParentField) {
|
||||||
this.type = type;
|
this.type = type;
|
||||||
this.source = source;
|
this.source = source;
|
||||||
this.routing = routing;
|
this.routing = routing;
|
||||||
this.timestamp = timestamp;
|
|
||||||
this.hasParentField = hasParentField;
|
this.hasParentField = hasParentField;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -269,9 +157,6 @@ public class MappingMetaData extends AbstractDiffable<MappingMetaData> {
|
||||||
if (routing == Routing.EMPTY) {
|
if (routing == Routing.EMPTY) {
|
||||||
routing = defaultMapping.routing();
|
routing = defaultMapping.routing();
|
||||||
}
|
}
|
||||||
if (timestamp == Timestamp.EMPTY) {
|
|
||||||
timestamp = defaultMapping.timestamp();
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
public String type() {
|
public String type() {
|
||||||
|
@ -309,21 +194,19 @@ public class MappingMetaData extends AbstractDiffable<MappingMetaData> {
|
||||||
return this.routing;
|
return this.routing;
|
||||||
}
|
}
|
||||||
|
|
||||||
public Timestamp timestamp() {
|
|
||||||
return this.timestamp;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void writeTo(StreamOutput out) throws IOException {
|
public void writeTo(StreamOutput out) throws IOException {
|
||||||
out.writeString(type());
|
out.writeString(type());
|
||||||
source().writeTo(out);
|
source().writeTo(out);
|
||||||
// routing
|
// routing
|
||||||
out.writeBoolean(routing().required());
|
out.writeBoolean(routing().required());
|
||||||
// timestamp
|
if (out.getVersion().before(Version.V_6_0_0_alpha1_UNRELEASED)) {
|
||||||
out.writeBoolean(timestamp().enabled());
|
// timestamp
|
||||||
out.writeString(timestamp().format());
|
out.writeBoolean(false); // enabled
|
||||||
out.writeOptionalString(timestamp().defaultTimestamp());
|
out.writeString(DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.format());
|
||||||
out.writeOptionalBoolean(timestamp().ignoreMissing());
|
out.writeOptionalString(null);
|
||||||
|
out.writeOptionalBoolean(null);
|
||||||
|
}
|
||||||
out.writeBoolean(hasParentField());
|
out.writeBoolean(hasParentField());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -336,7 +219,6 @@ public class MappingMetaData extends AbstractDiffable<MappingMetaData> {
|
||||||
|
|
||||||
if (!routing.equals(that.routing)) return false;
|
if (!routing.equals(that.routing)) return false;
|
||||||
if (!source.equals(that.source)) return false;
|
if (!source.equals(that.source)) return false;
|
||||||
if (!timestamp.equals(that.timestamp)) return false;
|
|
||||||
if (!type.equals(that.type)) return false;
|
if (!type.equals(that.type)) return false;
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
|
@ -347,7 +229,6 @@ public class MappingMetaData extends AbstractDiffable<MappingMetaData> {
|
||||||
int result = type.hashCode();
|
int result = type.hashCode();
|
||||||
result = 31 * result + source.hashCode();
|
result = 31 * result + source.hashCode();
|
||||||
result = 31 * result + routing.hashCode();
|
result = 31 * result + routing.hashCode();
|
||||||
result = 31 * result + timestamp.hashCode();
|
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -356,18 +237,19 @@ public class MappingMetaData extends AbstractDiffable<MappingMetaData> {
|
||||||
CompressedXContent source = CompressedXContent.readCompressedString(in);
|
CompressedXContent source = CompressedXContent.readCompressedString(in);
|
||||||
// routing
|
// routing
|
||||||
Routing routing = new Routing(in.readBoolean());
|
Routing routing = new Routing(in.readBoolean());
|
||||||
// timestamp
|
if (in.getVersion().before(Version.V_6_0_0_alpha1_UNRELEASED)) {
|
||||||
|
// timestamp
|
||||||
|
boolean enabled = in.readBoolean();
|
||||||
|
if (enabled) {
|
||||||
|
throw new IllegalArgumentException("_timestamp may not be enabled");
|
||||||
|
}
|
||||||
|
in.readString(); // format
|
||||||
|
in.readOptionalString(); // defaultTimestamp
|
||||||
|
in.readOptionalBoolean(); // ignoreMissing
|
||||||
|
}
|
||||||
|
|
||||||
boolean enabled = in.readBoolean();
|
|
||||||
String format = in.readString();
|
|
||||||
String defaultTimestamp = in.readOptionalString();
|
|
||||||
Boolean ignoreMissing = null;
|
|
||||||
|
|
||||||
ignoreMissing = in.readOptionalBoolean();
|
|
||||||
|
|
||||||
final Timestamp timestamp = new Timestamp(enabled, format, defaultTimestamp, ignoreMissing);
|
|
||||||
final boolean hasParentField = in.readBoolean();
|
final boolean hasParentField = in.readBoolean();
|
||||||
return new MappingMetaData(type, source, routing, timestamp, hasParentField);
|
return new MappingMetaData(type, source, routing, hasParentField);
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -55,7 +55,6 @@ import org.elasticsearch.gateway.MetaDataStateFormat;
|
||||||
import org.elasticsearch.index.Index;
|
import org.elasticsearch.index.Index;
|
||||||
import org.elasticsearch.index.IndexNotFoundException;
|
import org.elasticsearch.index.IndexNotFoundException;
|
||||||
import org.elasticsearch.indices.recovery.RecoverySettings;
|
import org.elasticsearch.indices.recovery.RecoverySettings;
|
||||||
import org.elasticsearch.indices.ttl.IndicesTTLService;
|
|
||||||
import org.elasticsearch.ingest.IngestMetadata;
|
import org.elasticsearch.ingest.IngestMetadata;
|
||||||
import org.elasticsearch.rest.RestStatus;
|
import org.elasticsearch.rest.RestStatus;
|
||||||
import org.elasticsearch.script.ScriptMetaData;
|
import org.elasticsearch.script.ScriptMetaData;
|
||||||
|
@ -761,7 +760,6 @@ public class MetaData implements Iterable<IndexMetaData>, Diffable<MetaData>, Fr
|
||||||
|
|
||||||
/** All known time cluster settings. */
|
/** All known time cluster settings. */
|
||||||
public static final Set<String> CLUSTER_TIME_SETTINGS = unmodifiableSet(newHashSet(
|
public static final Set<String> CLUSTER_TIME_SETTINGS = unmodifiableSet(newHashSet(
|
||||||
IndicesTTLService.INDICES_TTL_INTERVAL_SETTING.getKey(),
|
|
||||||
RecoverySettings.INDICES_RECOVERY_RETRY_DELAY_STATE_SYNC_SETTING.getKey(),
|
RecoverySettings.INDICES_RECOVERY_RETRY_DELAY_STATE_SYNC_SETTING.getKey(),
|
||||||
RecoverySettings.INDICES_RECOVERY_RETRY_DELAY_NETWORK_SETTING.getKey(),
|
RecoverySettings.INDICES_RECOVERY_RETRY_DELAY_NETWORK_SETTING.getKey(),
|
||||||
RecoverySettings.INDICES_RECOVERY_ACTIVITY_TIMEOUT_SETTING.getKey(),
|
RecoverySettings.INDICES_RECOVERY_ACTIVITY_TIMEOUT_SETTING.getKey(),
|
||||||
|
|
|
@ -316,7 +316,7 @@ public class MetaDataCreateIndexService extends AbstractComponent {
|
||||||
|
|
||||||
if (indexSettingsBuilder.get(SETTING_VERSION_CREATED) == null) {
|
if (indexSettingsBuilder.get(SETTING_VERSION_CREATED) == null) {
|
||||||
DiscoveryNodes nodes = currentState.nodes();
|
DiscoveryNodes nodes = currentState.nodes();
|
||||||
final Version createdVersion = Version.smallest(Version.CURRENT, nodes.getSmallestNonClientNodeVersion());
|
final Version createdVersion = Version.min(Version.CURRENT, nodes.getSmallestNonClientNodeVersion());
|
||||||
indexSettingsBuilder.put(SETTING_VERSION_CREATED, createdVersion);
|
indexSettingsBuilder.put(SETTING_VERSION_CREATED, createdVersion);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -20,6 +20,7 @@
|
||||||
package org.elasticsearch.cluster.metadata;
|
package org.elasticsearch.cluster.metadata;
|
||||||
|
|
||||||
import org.elasticsearch.ElasticsearchException;
|
import org.elasticsearch.ElasticsearchException;
|
||||||
|
import org.elasticsearch.Version;
|
||||||
import org.elasticsearch.action.ActionListener;
|
import org.elasticsearch.action.ActionListener;
|
||||||
import org.elasticsearch.action.admin.indices.close.CloseIndexClusterStateUpdateRequest;
|
import org.elasticsearch.action.admin.indices.close.CloseIndexClusterStateUpdateRequest;
|
||||||
import org.elasticsearch.action.admin.indices.open.OpenIndexClusterStateUpdateRequest;
|
import org.elasticsearch.action.admin.indices.open.OpenIndexClusterStateUpdateRequest;
|
||||||
|
@ -160,12 +161,14 @@ public class MetaDataIndexStateService extends AbstractComponent {
|
||||||
MetaData.Builder mdBuilder = MetaData.builder(currentState.metaData());
|
MetaData.Builder mdBuilder = MetaData.builder(currentState.metaData());
|
||||||
ClusterBlocks.Builder blocksBuilder = ClusterBlocks.builder()
|
ClusterBlocks.Builder blocksBuilder = ClusterBlocks.builder()
|
||||||
.blocks(currentState.blocks());
|
.blocks(currentState.blocks());
|
||||||
|
final Version minIndexCompatibilityVersion = currentState.getNodes().getMaxNodeVersion()
|
||||||
|
.minimumIndexCompatibilityVersion();
|
||||||
for (IndexMetaData closedMetaData : indicesToOpen) {
|
for (IndexMetaData closedMetaData : indicesToOpen) {
|
||||||
final String indexName = closedMetaData.getIndex().getName();
|
final String indexName = closedMetaData.getIndex().getName();
|
||||||
IndexMetaData indexMetaData = IndexMetaData.builder(closedMetaData).state(IndexMetaData.State.OPEN).build();
|
IndexMetaData indexMetaData = IndexMetaData.builder(closedMetaData).state(IndexMetaData.State.OPEN).build();
|
||||||
// The index might be closed because we couldn't import it due to old incompatible version
|
// The index might be closed because we couldn't import it due to old incompatible version
|
||||||
// We need to check that this index can be upgraded to the current version
|
// We need to check that this index can be upgraded to the current version
|
||||||
indexMetaData = metaDataIndexUpgradeService.upgradeIndexMetaData(indexMetaData);
|
indexMetaData = metaDataIndexUpgradeService.upgradeIndexMetaData(indexMetaData, minIndexCompatibilityVersion);
|
||||||
try {
|
try {
|
||||||
indicesService.verifyIndexMetadata(indexMetaData, indexMetaData);
|
indicesService.verifyIndexMetadata(indexMetaData, indexMetaData);
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
|
|
|
@ -67,13 +67,13 @@ public class MetaDataIndexUpgradeService extends AbstractComponent {
|
||||||
* If the index does not need upgrade it returns the index metadata unchanged, otherwise it returns a modified index metadata. If index
|
* If the index does not need upgrade it returns the index metadata unchanged, otherwise it returns a modified index metadata. If index
|
||||||
* cannot be updated the method throws an exception.
|
* cannot be updated the method throws an exception.
|
||||||
*/
|
*/
|
||||||
public IndexMetaData upgradeIndexMetaData(IndexMetaData indexMetaData) {
|
public IndexMetaData upgradeIndexMetaData(IndexMetaData indexMetaData, Version minimumIndexCompatibilityVersion) {
|
||||||
// Throws an exception if there are too-old segments:
|
// Throws an exception if there are too-old segments:
|
||||||
if (isUpgraded(indexMetaData)) {
|
if (isUpgraded(indexMetaData)) {
|
||||||
assert indexMetaData == archiveBrokenIndexSettings(indexMetaData) : "all settings must have been upgraded before";
|
assert indexMetaData == archiveBrokenIndexSettings(indexMetaData) : "all settings must have been upgraded before";
|
||||||
return indexMetaData;
|
return indexMetaData;
|
||||||
}
|
}
|
||||||
checkSupportedVersion(indexMetaData);
|
checkSupportedVersion(indexMetaData, minimumIndexCompatibilityVersion);
|
||||||
IndexMetaData newMetaData = indexMetaData;
|
IndexMetaData newMetaData = indexMetaData;
|
||||||
// we have to run this first otherwise in we try to create IndexSettings
|
// we have to run this first otherwise in we try to create IndexSettings
|
||||||
// with broken settings and fail in checkMappingsCompatibility
|
// with broken settings and fail in checkMappingsCompatibility
|
||||||
|
@ -92,21 +92,26 @@ public class MetaDataIndexUpgradeService extends AbstractComponent {
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Elasticsearch 5.0 no longer supports indices with pre Lucene v5.0 (Elasticsearch v2.0.0.beta1) segments. All indices
|
* Elasticsearch v6.0 no longer supports indices created pre v5.0. All indices
|
||||||
* that were created before Elasticsearch v2.0.0.beta1 should be reindexed in Elasticsearch 2.x
|
* that were created before Elasticsearch v5.0 should be re-indexed in Elasticsearch 5.x
|
||||||
* before they can be opened by this version of elasticsearch. */
|
* before they can be opened by this version of elasticsearch.
|
||||||
private void checkSupportedVersion(IndexMetaData indexMetaData) {
|
*/
|
||||||
if (indexMetaData.getState() == IndexMetaData.State.OPEN && isSupportedVersion(indexMetaData) == false) {
|
private void checkSupportedVersion(IndexMetaData indexMetaData, Version minimumIndexCompatibilityVersion) {
|
||||||
throw new IllegalStateException("The index [" + indexMetaData.getIndex() + "] was created before v2.0.0.beta1."
|
if (indexMetaData.getState() == IndexMetaData.State.OPEN && isSupportedVersion(indexMetaData,
|
||||||
+ " It should be reindexed in Elasticsearch 2.x before upgrading to " + Version.CURRENT + ".");
|
minimumIndexCompatibilityVersion) == false) {
|
||||||
|
throw new IllegalStateException("The index [" + indexMetaData.getIndex() + "] was created with version ["
|
||||||
|
+ indexMetaData.getCreationVersion() + "] but the minimum compatible version is ["
|
||||||
|
|
||||||
|
+ minimumIndexCompatibilityVersion + "]. It should be re-indexed in Elasticsearch " + minimumIndexCompatibilityVersion.major
|
||||||
|
+ ".x before upgrading to " + Version.CURRENT + ".");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Returns true if this index can be supported by the current version of elasticsearch
|
* Returns true if this index can be supported by the current version of elasticsearch
|
||||||
*/
|
*/
|
||||||
private static boolean isSupportedVersion(IndexMetaData indexMetaData) {
|
private static boolean isSupportedVersion(IndexMetaData indexMetaData, Version minimumIndexCompatibilityVersion) {
|
||||||
return indexMetaData.getCreationVersion().onOrAfter(Version.V_2_0_0_beta1);
|
return indexMetaData.getCreationVersion().onOrAfter(minimumIndexCompatibilityVersion);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -173,4 +178,4 @@ public class MetaDataIndexUpgradeService extends AbstractComponent {
|
||||||
return indexMetaData;
|
return indexMetaData;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -135,7 +135,8 @@ public class DiscoveryNode implements Writeable, ToXContent {
|
||||||
*/
|
*/
|
||||||
public DiscoveryNode(String nodeName, String nodeId, TransportAddress address,
|
public DiscoveryNode(String nodeName, String nodeId, TransportAddress address,
|
||||||
Map<String, String> attributes, Set<Role> roles, Version version) {
|
Map<String, String> attributes, Set<Role> roles, Version version) {
|
||||||
this(nodeName, nodeId, UUIDs.randomBase64UUID(), address.getAddress(), address.getAddress(), address, attributes, roles, version);
|
this(nodeName, nodeId, UUIDs.randomBase64UUID(), address.address().getHostString(), address.getAddress(), address, attributes,
|
||||||
|
roles, version);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -56,10 +56,13 @@ public class DiscoveryNodes extends AbstractDiffable<DiscoveryNodes> implements
|
||||||
private final String masterNodeId;
|
private final String masterNodeId;
|
||||||
private final String localNodeId;
|
private final String localNodeId;
|
||||||
private final Version minNonClientNodeVersion;
|
private final Version minNonClientNodeVersion;
|
||||||
|
private final Version maxNodeVersion;
|
||||||
|
private final Version minNodeVersion;
|
||||||
|
|
||||||
private DiscoveryNodes(ImmutableOpenMap<String, DiscoveryNode> nodes, ImmutableOpenMap<String, DiscoveryNode> dataNodes,
|
private DiscoveryNodes(ImmutableOpenMap<String, DiscoveryNode> nodes, ImmutableOpenMap<String, DiscoveryNode> dataNodes,
|
||||||
ImmutableOpenMap<String, DiscoveryNode> masterNodes, ImmutableOpenMap<String, DiscoveryNode> ingestNodes,
|
ImmutableOpenMap<String, DiscoveryNode> masterNodes, ImmutableOpenMap<String, DiscoveryNode> ingestNodes,
|
||||||
String masterNodeId, String localNodeId, Version minNonClientNodeVersion) {
|
String masterNodeId, String localNodeId, Version minNonClientNodeVersion, Version maxNodeVersion,
|
||||||
|
Version minNodeVersion) {
|
||||||
this.nodes = nodes;
|
this.nodes = nodes;
|
||||||
this.dataNodes = dataNodes;
|
this.dataNodes = dataNodes;
|
||||||
this.masterNodes = masterNodes;
|
this.masterNodes = masterNodes;
|
||||||
|
@ -67,6 +70,8 @@ public class DiscoveryNodes extends AbstractDiffable<DiscoveryNodes> implements
|
||||||
this.masterNodeId = masterNodeId;
|
this.masterNodeId = masterNodeId;
|
||||||
this.localNodeId = localNodeId;
|
this.localNodeId = localNodeId;
|
||||||
this.minNonClientNodeVersion = minNonClientNodeVersion;
|
this.minNonClientNodeVersion = minNonClientNodeVersion;
|
||||||
|
this.minNodeVersion = minNodeVersion;
|
||||||
|
this.maxNodeVersion = maxNodeVersion;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
@ -235,6 +240,24 @@ public class DiscoveryNodes extends AbstractDiffable<DiscoveryNodes> implements
|
||||||
return minNonClientNodeVersion;
|
return minNonClientNodeVersion;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns the version of the node with the oldest version in the cluster.
|
||||||
|
*
|
||||||
|
* @return the oldest version in the cluster
|
||||||
|
*/
|
||||||
|
public Version getMinNodeVersion() {
|
||||||
|
return minNodeVersion;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns the version of the node with the yougest version in the cluster
|
||||||
|
*
|
||||||
|
* @return the oldest version in the cluster
|
||||||
|
*/
|
||||||
|
public Version getMaxNodeVersion() {
|
||||||
|
return maxNodeVersion;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Resolve a node with a given id
|
* Resolve a node with a given id
|
||||||
*
|
*
|
||||||
|
@ -631,25 +654,27 @@ public class DiscoveryNodes extends AbstractDiffable<DiscoveryNodes> implements
|
||||||
ImmutableOpenMap.Builder<String, DiscoveryNode> masterNodesBuilder = ImmutableOpenMap.builder();
|
ImmutableOpenMap.Builder<String, DiscoveryNode> masterNodesBuilder = ImmutableOpenMap.builder();
|
||||||
ImmutableOpenMap.Builder<String, DiscoveryNode> ingestNodesBuilder = ImmutableOpenMap.builder();
|
ImmutableOpenMap.Builder<String, DiscoveryNode> ingestNodesBuilder = ImmutableOpenMap.builder();
|
||||||
Version minNodeVersion = Version.CURRENT;
|
Version minNodeVersion = Version.CURRENT;
|
||||||
|
Version maxNodeVersion = Version.CURRENT;
|
||||||
Version minNonClientNodeVersion = Version.CURRENT;
|
Version minNonClientNodeVersion = Version.CURRENT;
|
||||||
for (ObjectObjectCursor<String, DiscoveryNode> nodeEntry : nodes) {
|
for (ObjectObjectCursor<String, DiscoveryNode> nodeEntry : nodes) {
|
||||||
if (nodeEntry.value.isDataNode()) {
|
if (nodeEntry.value.isDataNode()) {
|
||||||
dataNodesBuilder.put(nodeEntry.key, nodeEntry.value);
|
dataNodesBuilder.put(nodeEntry.key, nodeEntry.value);
|
||||||
minNonClientNodeVersion = Version.smallest(minNonClientNodeVersion, nodeEntry.value.getVersion());
|
minNonClientNodeVersion = Version.min(minNonClientNodeVersion, nodeEntry.value.getVersion());
|
||||||
}
|
}
|
||||||
if (nodeEntry.value.isMasterNode()) {
|
if (nodeEntry.value.isMasterNode()) {
|
||||||
masterNodesBuilder.put(nodeEntry.key, nodeEntry.value);
|
masterNodesBuilder.put(nodeEntry.key, nodeEntry.value);
|
||||||
minNonClientNodeVersion = Version.smallest(minNonClientNodeVersion, nodeEntry.value.getVersion());
|
minNonClientNodeVersion = Version.min(minNonClientNodeVersion, nodeEntry.value.getVersion());
|
||||||
}
|
}
|
||||||
if (nodeEntry.value.isIngestNode()) {
|
if (nodeEntry.value.isIngestNode()) {
|
||||||
ingestNodesBuilder.put(nodeEntry.key, nodeEntry.value);
|
ingestNodesBuilder.put(nodeEntry.key, nodeEntry.value);
|
||||||
}
|
}
|
||||||
minNodeVersion = Version.smallest(minNodeVersion, nodeEntry.value.getVersion());
|
minNodeVersion = Version.min(minNodeVersion, nodeEntry.value.getVersion());
|
||||||
|
maxNodeVersion = Version.max(maxNodeVersion, nodeEntry.value.getVersion());
|
||||||
}
|
}
|
||||||
|
|
||||||
return new DiscoveryNodes(
|
return new DiscoveryNodes(
|
||||||
nodes.build(), dataNodesBuilder.build(), masterNodesBuilder.build(), ingestNodesBuilder.build(),
|
nodes.build(), dataNodesBuilder.build(), masterNodesBuilder.build(), ingestNodesBuilder.build(),
|
||||||
masterNodeId, localNodeId, minNonClientNodeVersion
|
masterNodeId, localNodeId, minNonClientNodeVersion, maxNodeVersion, minNodeVersion
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1,153 +0,0 @@
|
||||||
/*
|
|
||||||
* Licensed to Elasticsearch under one or more contributor
|
|
||||||
* license agreements. See the NOTICE file distributed with
|
|
||||||
* this work for additional information regarding copyright
|
|
||||||
* ownership. Elasticsearch licenses this file to you under
|
|
||||||
* the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
* not use this file except in compliance with the License.
|
|
||||||
* You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing,
|
|
||||||
* software distributed under the License is distributed on an
|
|
||||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
|
||||||
* KIND, either express or implied. See the License for the
|
|
||||||
* specific language governing permissions and limitations
|
|
||||||
* under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package org.elasticsearch.cluster.routing.allocation;
|
|
||||||
|
|
||||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
|
||||||
import org.elasticsearch.common.io.stream.StreamInput;
|
|
||||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
|
||||||
import org.elasticsearch.common.io.stream.Streamable;
|
|
||||||
import org.elasticsearch.index.shard.ShardId;
|
|
||||||
|
|
||||||
import java.io.IOException;
|
|
||||||
import java.util.ArrayList;
|
|
||||||
import java.util.HashMap;
|
|
||||||
import java.util.List;
|
|
||||||
import java.util.Map;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Instances of this class keeps explanations of decisions that have been made by allocation.
|
|
||||||
* An {@link AllocationExplanation} consists of a set of per node explanations.
|
|
||||||
* Since {@link NodeExplanation}s are related to shards an {@link AllocationExplanation} maps
|
|
||||||
* a shards id to a set of {@link NodeExplanation}s.
|
|
||||||
*/
|
|
||||||
public class AllocationExplanation implements Streamable {
|
|
||||||
|
|
||||||
public static final AllocationExplanation EMPTY = new AllocationExplanation();
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Instances of this class keep messages and informations about nodes of an allocation
|
|
||||||
*/
|
|
||||||
public static class NodeExplanation {
|
|
||||||
private final DiscoveryNode node;
|
|
||||||
|
|
||||||
private final String description;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Creates a new {@link NodeExplanation}
|
|
||||||
*
|
|
||||||
* @param node node referenced by this {@link NodeExplanation}
|
|
||||||
* @param description a message associated with the given node
|
|
||||||
*/
|
|
||||||
public NodeExplanation(DiscoveryNode node, String description) {
|
|
||||||
this.node = node;
|
|
||||||
this.description = description;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* The node referenced by the explanation
|
|
||||||
* @return referenced node
|
|
||||||
*/
|
|
||||||
public DiscoveryNode node() {
|
|
||||||
return node;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Get the explanation for the node
|
|
||||||
* @return explanation for the node
|
|
||||||
*/
|
|
||||||
public String description() {
|
|
||||||
return description;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
private final Map<ShardId, List<NodeExplanation>> explanations = new HashMap<>();
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Create and add a node explanation to this explanation referencing a shard
|
|
||||||
* @param shardId id the of the referenced shard
|
|
||||||
* @param nodeExplanation Explanation itself
|
|
||||||
* @return AllocationExplanation involving the explanation
|
|
||||||
*/
|
|
||||||
public AllocationExplanation add(ShardId shardId, NodeExplanation nodeExplanation) {
|
|
||||||
List<NodeExplanation> list = explanations.get(shardId);
|
|
||||||
if (list == null) {
|
|
||||||
list = new ArrayList<>();
|
|
||||||
explanations.put(shardId, list);
|
|
||||||
}
|
|
||||||
list.add(nodeExplanation);
|
|
||||||
return this;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* List of explanations involved by this AllocationExplanation
|
|
||||||
* @return Map of shard ids and corresponding explanations
|
|
||||||
*/
|
|
||||||
public Map<ShardId, List<NodeExplanation>> explanations() {
|
|
||||||
return this.explanations;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Read an {@link AllocationExplanation} from an {@link StreamInput}
|
|
||||||
* @param in {@link StreamInput} to read from
|
|
||||||
* @return a new {@link AllocationExplanation} read from the stream
|
|
||||||
* @throws IOException if something bad happened while reading
|
|
||||||
*/
|
|
||||||
public static AllocationExplanation readAllocationExplanation(StreamInput in) throws IOException {
|
|
||||||
AllocationExplanation e = new AllocationExplanation();
|
|
||||||
e.readFrom(in);
|
|
||||||
return e;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public void readFrom(StreamInput in) throws IOException {
|
|
||||||
int size = in.readVInt();
|
|
||||||
for (int i = 0; i < size; i++) {
|
|
||||||
ShardId shardId = ShardId.readShardId(in);
|
|
||||||
int size2 = in.readVInt();
|
|
||||||
List<NodeExplanation> ne = new ArrayList<>(size2);
|
|
||||||
for (int j = 0; j < size2; j++) {
|
|
||||||
DiscoveryNode node = null;
|
|
||||||
if (in.readBoolean()) {
|
|
||||||
node = new DiscoveryNode(in);
|
|
||||||
}
|
|
||||||
ne.add(new NodeExplanation(node, in.readString()));
|
|
||||||
}
|
|
||||||
explanations.put(shardId, ne);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public void writeTo(StreamOutput out) throws IOException {
|
|
||||||
out.writeVInt(explanations.size());
|
|
||||||
for (Map.Entry<ShardId, List<NodeExplanation>> entry : explanations.entrySet()) {
|
|
||||||
entry.getKey().writeTo(out);
|
|
||||||
out.writeVInt(entry.getValue().size());
|
|
||||||
for (NodeExplanation nodeExplanation : entry.getValue()) {
|
|
||||||
if (nodeExplanation.node() == null) {
|
|
||||||
out.writeBoolean(false);
|
|
||||||
} else {
|
|
||||||
out.writeBoolean(true);
|
|
||||||
nodeExplanation.node().writeTo(out);
|
|
||||||
}
|
|
||||||
out.writeString(nodeExplanation.description());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -49,6 +49,8 @@ public class DiskThresholdSettings {
|
||||||
Setting.positiveTimeSetting("cluster.routing.allocation.disk.reroute_interval", TimeValue.timeValueSeconds(60),
|
Setting.positiveTimeSetting("cluster.routing.allocation.disk.reroute_interval", TimeValue.timeValueSeconds(60),
|
||||||
Setting.Property.Dynamic, Setting.Property.NodeScope);
|
Setting.Property.Dynamic, Setting.Property.NodeScope);
|
||||||
|
|
||||||
|
private volatile String lowWatermarkRaw;
|
||||||
|
private volatile String highWatermarkRaw;
|
||||||
private volatile Double freeDiskThresholdLow;
|
private volatile Double freeDiskThresholdLow;
|
||||||
private volatile Double freeDiskThresholdHigh;
|
private volatile Double freeDiskThresholdHigh;
|
||||||
private volatile ByteSizeValue freeBytesThresholdLow;
|
private volatile ByteSizeValue freeBytesThresholdLow;
|
||||||
|
@ -86,6 +88,7 @@ public class DiskThresholdSettings {
|
||||||
|
|
||||||
private void setLowWatermark(String lowWatermark) {
|
private void setLowWatermark(String lowWatermark) {
|
||||||
// Watermark is expressed in terms of used data, but we need "free" data watermark
|
// Watermark is expressed in terms of used data, but we need "free" data watermark
|
||||||
|
this.lowWatermarkRaw = lowWatermark;
|
||||||
this.freeDiskThresholdLow = 100.0 - thresholdPercentageFromWatermark(lowWatermark);
|
this.freeDiskThresholdLow = 100.0 - thresholdPercentageFromWatermark(lowWatermark);
|
||||||
this.freeBytesThresholdLow = thresholdBytesFromWatermark(lowWatermark,
|
this.freeBytesThresholdLow = thresholdBytesFromWatermark(lowWatermark,
|
||||||
CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING.getKey());
|
CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING.getKey());
|
||||||
|
@ -93,11 +96,26 @@ public class DiskThresholdSettings {
|
||||||
|
|
||||||
private void setHighWatermark(String highWatermark) {
|
private void setHighWatermark(String highWatermark) {
|
||||||
// Watermark is expressed in terms of used data, but we need "free" data watermark
|
// Watermark is expressed in terms of used data, but we need "free" data watermark
|
||||||
|
this.highWatermarkRaw = highWatermark;
|
||||||
this.freeDiskThresholdHigh = 100.0 - thresholdPercentageFromWatermark(highWatermark);
|
this.freeDiskThresholdHigh = 100.0 - thresholdPercentageFromWatermark(highWatermark);
|
||||||
this.freeBytesThresholdHigh = thresholdBytesFromWatermark(highWatermark,
|
this.freeBytesThresholdHigh = thresholdBytesFromWatermark(highWatermark,
|
||||||
CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING.getKey());
|
CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING.getKey());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Gets the raw (uninterpreted) low watermark value as found in the settings.
|
||||||
|
*/
|
||||||
|
public String getLowWatermarkRaw() {
|
||||||
|
return lowWatermarkRaw;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Gets the raw (uninterpreted) high watermark value as found in the settings.
|
||||||
|
*/
|
||||||
|
public String getHighWatermarkRaw() {
|
||||||
|
return highWatermarkRaw;
|
||||||
|
}
|
||||||
|
|
||||||
public Double getFreeDiskThresholdLow() {
|
public Double getFreeDiskThresholdLow() {
|
||||||
return freeDiskThresholdLow;
|
return freeDiskThresholdLow;
|
||||||
}
|
}
|
||||||
|
|
|
@ -31,7 +31,6 @@ import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders;
|
||||||
import org.elasticsearch.cluster.routing.allocation.decider.Decision;
|
import org.elasticsearch.cluster.routing.allocation.decider.Decision;
|
||||||
import org.elasticsearch.common.collect.ImmutableOpenMap;
|
import org.elasticsearch.common.collect.ImmutableOpenMap;
|
||||||
import org.elasticsearch.index.shard.ShardId;
|
import org.elasticsearch.index.shard.ShardId;
|
||||||
import org.elasticsearch.snapshots.RestoreService;
|
|
||||||
import org.elasticsearch.snapshots.RestoreService.RestoreInProgressUpdater;
|
import org.elasticsearch.snapshots.RestoreService.RestoreInProgressUpdater;
|
||||||
|
|
||||||
import java.util.HashMap;
|
import java.util.HashMap;
|
||||||
|
@ -61,8 +60,6 @@ public class RoutingAllocation {
|
||||||
|
|
||||||
private final ImmutableOpenMap<String, ClusterState.Custom> customs;
|
private final ImmutableOpenMap<String, ClusterState.Custom> customs;
|
||||||
|
|
||||||
private final AllocationExplanation explanation = new AllocationExplanation();
|
|
||||||
|
|
||||||
private final ClusterInfo clusterInfo;
|
private final ClusterInfo clusterInfo;
|
||||||
|
|
||||||
private Map<ShardId, Set<String>> ignoredShardToNodes = null;
|
private Map<ShardId, Set<String>> ignoredShardToNodes = null;
|
||||||
|
@ -162,14 +159,6 @@ public class RoutingAllocation {
|
||||||
return customs;
|
return customs;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* Get explanations of current routing
|
|
||||||
* @return explanation of routing
|
|
||||||
*/
|
|
||||||
public AllocationExplanation explanation() {
|
|
||||||
return explanation;
|
|
||||||
}
|
|
||||||
|
|
||||||
public void ignoreDisable(boolean ignoreDisable) {
|
public void ignoreDisable(boolean ignoreDisable) {
|
||||||
this.ignoreDisable = ignoreDisable;
|
this.ignoreDisable = ignoreDisable;
|
||||||
}
|
}
|
||||||
|
|
|
@ -87,22 +87,6 @@ public class AwarenessAllocationDecider extends AllocationDecider {
|
||||||
|
|
||||||
private volatile Map<String, String[]> forcedAwarenessAttributes;
|
private volatile Map<String, String[]> forcedAwarenessAttributes;
|
||||||
|
|
||||||
/**
|
|
||||||
* Creates a new {@link AwarenessAllocationDecider} instance
|
|
||||||
*/
|
|
||||||
public AwarenessAllocationDecider() {
|
|
||||||
this(Settings.Builder.EMPTY_SETTINGS);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Creates a new {@link AwarenessAllocationDecider} instance from given settings
|
|
||||||
*
|
|
||||||
* @param settings {@link Settings} to use
|
|
||||||
*/
|
|
||||||
public AwarenessAllocationDecider(Settings settings) {
|
|
||||||
this(settings, new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS));
|
|
||||||
}
|
|
||||||
|
|
||||||
public AwarenessAllocationDecider(Settings settings, ClusterSettings clusterSettings) {
|
public AwarenessAllocationDecider(Settings settings, ClusterSettings clusterSettings) {
|
||||||
super(settings);
|
super(settings);
|
||||||
this.awarenessAttributes = CLUSTER_ROUTING_ALLOCATION_AWARENESS_ATTRIBUTE_SETTING.get(settings);
|
this.awarenessAttributes = CLUSTER_ROUTING_ALLOCATION_AWARENESS_ATTRIBUTE_SETTING.get(settings);
|
||||||
|
@ -140,7 +124,9 @@ public class AwarenessAllocationDecider extends AllocationDecider {
|
||||||
|
|
||||||
private Decision underCapacity(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation, boolean moveToNode) {
|
private Decision underCapacity(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation, boolean moveToNode) {
|
||||||
if (awarenessAttributes.length == 0) {
|
if (awarenessAttributes.length == 0) {
|
||||||
return allocation.decision(Decision.YES, NAME, "allocation awareness is not enabled");
|
return allocation.decision(Decision.YES, NAME,
|
||||||
|
"allocation awareness is not enabled, set [%s] to enable it",
|
||||||
|
CLUSTER_ROUTING_ALLOCATION_AWARENESS_ATTRIBUTE_SETTING.getKey());
|
||||||
}
|
}
|
||||||
|
|
||||||
IndexMetaData indexMetaData = allocation.metaData().getIndexSafe(shardRouting.index());
|
IndexMetaData indexMetaData = allocation.metaData().getIndexSafe(shardRouting.index());
|
||||||
|
@ -148,7 +134,10 @@ public class AwarenessAllocationDecider extends AllocationDecider {
|
||||||
for (String awarenessAttribute : awarenessAttributes) {
|
for (String awarenessAttribute : awarenessAttributes) {
|
||||||
// the node the shard exists on must be associated with an awareness attribute
|
// the node the shard exists on must be associated with an awareness attribute
|
||||||
if (!node.node().getAttributes().containsKey(awarenessAttribute)) {
|
if (!node.node().getAttributes().containsKey(awarenessAttribute)) {
|
||||||
return allocation.decision(Decision.NO, NAME, "node does not contain the awareness attribute: [%s]", awarenessAttribute);
|
return allocation.decision(Decision.NO, NAME,
|
||||||
|
"node does not contain the awareness attribute [%s]; required attributes [%s=%s]",
|
||||||
|
awarenessAttribute, CLUSTER_ROUTING_ALLOCATION_AWARENESS_ATTRIBUTE_SETTING.getKey(),
|
||||||
|
allocation.debugDecision() ? Strings.arrayToCommaDelimitedString(awarenessAttributes) : null);
|
||||||
}
|
}
|
||||||
|
|
||||||
// build attr_value -> nodes map
|
// build attr_value -> nodes map
|
||||||
|
@ -206,15 +195,14 @@ public class AwarenessAllocationDecider extends AllocationDecider {
|
||||||
// if we are above with leftover, then we know we are not good, even with mod
|
// if we are above with leftover, then we know we are not good, even with mod
|
||||||
if (currentNodeCount > (requiredCountPerAttribute + leftoverPerAttribute)) {
|
if (currentNodeCount > (requiredCountPerAttribute + leftoverPerAttribute)) {
|
||||||
return allocation.decision(Decision.NO, NAME,
|
return allocation.decision(Decision.NO, NAME,
|
||||||
"there are too many shards on the node for attribute [%s], there are [%d] total shards for the index " +
|
"there are too many copies of the shard allocated to nodes with attribute [%s], there are [%d] total configured " +
|
||||||
" and [%d] total attributes values, expected the node count [%d] to be lower or equal to the required " +
|
"shard copies for this shard id and [%d] total attribute values, expected the allocated shard count per " +
|
||||||
"number of shards per attribute [%d] plus leftover [%d]",
|
"attribute [%d] to be less than or equal to the upper bound of the required number of shards per attribute [%d]",
|
||||||
awarenessAttribute,
|
awarenessAttribute,
|
||||||
shardCount,
|
shardCount,
|
||||||
numberOfAttributes,
|
numberOfAttributes,
|
||||||
currentNodeCount,
|
currentNodeCount,
|
||||||
requiredCountPerAttribute,
|
requiredCountPerAttribute + leftoverPerAttribute);
|
||||||
leftoverPerAttribute);
|
|
||||||
}
|
}
|
||||||
// all is well, we are below or same as average
|
// all is well, we are below or same as average
|
||||||
if (currentNodeCount <= requiredCountPerAttribute) {
|
if (currentNodeCount <= requiredCountPerAttribute) {
|
||||||
|
|
|
@ -48,14 +48,15 @@ import org.elasticsearch.common.settings.Settings;
|
||||||
public class ClusterRebalanceAllocationDecider extends AllocationDecider {
|
public class ClusterRebalanceAllocationDecider extends AllocationDecider {
|
||||||
|
|
||||||
public static final String NAME = "cluster_rebalance";
|
public static final String NAME = "cluster_rebalance";
|
||||||
|
private static final String CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE = "cluster.routing.allocation.allow_rebalance";
|
||||||
public static final Setting<ClusterRebalanceType> CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING =
|
public static final Setting<ClusterRebalanceType> CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING =
|
||||||
new Setting<>("cluster.routing.allocation.allow_rebalance", ClusterRebalanceType.INDICES_ALL_ACTIVE.name().toLowerCase(Locale.ROOT),
|
new Setting<>(CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, ClusterRebalanceType.INDICES_ALL_ACTIVE.toString(),
|
||||||
ClusterRebalanceType::parseString, Property.Dynamic, Property.NodeScope);
|
ClusterRebalanceType::parseString, Property.Dynamic, Property.NodeScope);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* An enum representation for the configured re-balance type.
|
* An enum representation for the configured re-balance type.
|
||||||
*/
|
*/
|
||||||
public static enum ClusterRebalanceType {
|
public enum ClusterRebalanceType {
|
||||||
/**
|
/**
|
||||||
* Re-balancing is allowed once a shard replication group is active
|
* Re-balancing is allowed once a shard replication group is active
|
||||||
*/
|
*/
|
||||||
|
@ -80,6 +81,11 @@ public class ClusterRebalanceAllocationDecider extends AllocationDecider {
|
||||||
throw new IllegalArgumentException("Illegal value for " +
|
throw new IllegalArgumentException("Illegal value for " +
|
||||||
CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING + ": " + typeString);
|
CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING + ": " + typeString);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String toString() {
|
||||||
|
return name().toLowerCase(Locale.ROOT);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
private volatile ClusterRebalanceType type;
|
private volatile ClusterRebalanceType type;
|
||||||
|
@ -94,8 +100,7 @@ public class ClusterRebalanceAllocationDecider extends AllocationDecider {
|
||||||
CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getRaw(settings));
|
CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getRaw(settings));
|
||||||
type = ClusterRebalanceType.INDICES_ALL_ACTIVE;
|
type = ClusterRebalanceType.INDICES_ALL_ACTIVE;
|
||||||
}
|
}
|
||||||
logger.debug("using [{}] with [{}]", CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(),
|
logger.debug("using [{}] with [{}]", CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, type);
|
||||||
type.toString().toLowerCase(Locale.ROOT));
|
|
||||||
|
|
||||||
clusterSettings.addSettingsUpdateConsumer(CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING, this::setType);
|
clusterSettings.addSettingsUpdateConsumer(CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING, this::setType);
|
||||||
}
|
}
|
||||||
|
@ -115,12 +120,14 @@ public class ClusterRebalanceAllocationDecider extends AllocationDecider {
|
||||||
// check if there are unassigned primaries.
|
// check if there are unassigned primaries.
|
||||||
if ( allocation.routingNodes().hasUnassignedPrimaries() ) {
|
if ( allocation.routingNodes().hasUnassignedPrimaries() ) {
|
||||||
return allocation.decision(Decision.NO, NAME,
|
return allocation.decision(Decision.NO, NAME,
|
||||||
"the cluster has unassigned primary shards and rebalance type is set to [%s]", type);
|
"the cluster has unassigned primary shards and [%s] is set to [%s]",
|
||||||
|
CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, type);
|
||||||
}
|
}
|
||||||
// check if there are initializing primaries that don't have a relocatingNodeId entry.
|
// check if there are initializing primaries that don't have a relocatingNodeId entry.
|
||||||
if ( allocation.routingNodes().hasInactivePrimaries() ) {
|
if ( allocation.routingNodes().hasInactivePrimaries() ) {
|
||||||
return allocation.decision(Decision.NO, NAME,
|
return allocation.decision(Decision.NO, NAME,
|
||||||
"the cluster has inactive primary shards and rebalance type is set to [%s]", type);
|
"the cluster has inactive primary shards and [%s] is set to [%s]",
|
||||||
|
CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, type);
|
||||||
}
|
}
|
||||||
|
|
||||||
return allocation.decision(Decision.YES, NAME, "all primary shards are active");
|
return allocation.decision(Decision.YES, NAME, "all primary shards are active");
|
||||||
|
@ -129,16 +136,18 @@ public class ClusterRebalanceAllocationDecider extends AllocationDecider {
|
||||||
// check if there are unassigned shards.
|
// check if there are unassigned shards.
|
||||||
if (allocation.routingNodes().hasUnassignedShards() ) {
|
if (allocation.routingNodes().hasUnassignedShards() ) {
|
||||||
return allocation.decision(Decision.NO, NAME,
|
return allocation.decision(Decision.NO, NAME,
|
||||||
"the cluster has unassigned shards and rebalance type is set to [%s]", type);
|
"the cluster has unassigned shards and [%s] is set to [%s]",
|
||||||
|
CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, type);
|
||||||
}
|
}
|
||||||
// in case all indices are assigned, are there initializing shards which
|
// in case all indices are assigned, are there initializing shards which
|
||||||
// are not relocating?
|
// are not relocating?
|
||||||
if ( allocation.routingNodes().hasInactiveShards() ) {
|
if ( allocation.routingNodes().hasInactiveShards() ) {
|
||||||
return allocation.decision(Decision.NO, NAME,
|
return allocation.decision(Decision.NO, NAME,
|
||||||
"the cluster has inactive shards and rebalance type is set to [%s]", type);
|
"the cluster has inactive shards and [%s] is set to [%s]",
|
||||||
|
CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, type);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// type == Type.ALWAYS
|
// type == Type.ALWAYS
|
||||||
return allocation.decision(Decision.YES, NAME, "all shards are active, rebalance type is [%s]", type);
|
return allocation.decision(Decision.YES, NAME, "all shards are active");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -66,9 +66,11 @@ public class ConcurrentRebalanceAllocationDecider extends AllocationDecider {
|
||||||
}
|
}
|
||||||
int relocatingShards = allocation.routingNodes().getRelocatingShardCount();
|
int relocatingShards = allocation.routingNodes().getRelocatingShardCount();
|
||||||
if (relocatingShards >= clusterConcurrentRebalance) {
|
if (relocatingShards >= clusterConcurrentRebalance) {
|
||||||
return allocation.decision(Decision.NO, NAME,
|
return allocation.decision(Decision.THROTTLE, NAME,
|
||||||
"too many shards are concurrently rebalancing [%d], limit: [%d]",
|
"reached the limit of concurrently rebalancing shards [%d], [%s=%d]",
|
||||||
relocatingShards, clusterConcurrentRebalance);
|
relocatingShards,
|
||||||
|
CLUSTER_ROUTING_ALLOCATION_CLUSTER_CONCURRENT_REBALANCE_SETTING.getKey(),
|
||||||
|
clusterConcurrentRebalance);
|
||||||
}
|
}
|
||||||
return allocation.decision(Decision.YES, NAME,
|
return allocation.decision(Decision.YES, NAME,
|
||||||
"below threshold [%d] for concurrent rebalances, current rebalance shard count [%d]",
|
"below threshold [%d] for concurrent rebalances, current rebalance shard count [%d]",
|
||||||
|
|
|
@ -40,6 +40,9 @@ import org.elasticsearch.common.unit.ByteSizeValue;
|
||||||
import org.elasticsearch.index.Index;
|
import org.elasticsearch.index.Index;
|
||||||
import org.elasticsearch.index.shard.ShardId;
|
import org.elasticsearch.index.shard.ShardId;
|
||||||
|
|
||||||
|
import static org.elasticsearch.cluster.routing.allocation.DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING;
|
||||||
|
import static org.elasticsearch.cluster.routing.allocation.DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* The {@link DiskThresholdDecider} checks that the node a shard is potentially
|
* The {@link DiskThresholdDecider} checks that the node a shard is potentially
|
||||||
* being allocated to has enough disk space.
|
* being allocated to has enough disk space.
|
||||||
|
@ -135,8 +138,10 @@ public class DiskThresholdDecider extends AllocationDecider {
|
||||||
diskThresholdSettings.getFreeBytesThresholdLow(), freeBytes, node.nodeId());
|
diskThresholdSettings.getFreeBytesThresholdLow(), freeBytes, node.nodeId());
|
||||||
}
|
}
|
||||||
return allocation.decision(Decision.NO, NAME,
|
return allocation.decision(Decision.NO, NAME,
|
||||||
"the node is above the low watermark and has less than required [%s] free, free: [%s]",
|
"the node is above the low watermark [%s=%s], having less than the minimum required [%s] free space, actual free: [%s]",
|
||||||
diskThresholdSettings.getFreeBytesThresholdLow(), new ByteSizeValue(freeBytes));
|
CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING.getKey(),
|
||||||
|
diskThresholdSettings.getLowWatermarkRaw(),
|
||||||
|
diskThresholdSettings.getFreeBytesThresholdLow(), new ByteSizeValue(freeBytes));
|
||||||
} else if (freeBytes > diskThresholdSettings.getFreeBytesThresholdHigh().getBytes()) {
|
} else if (freeBytes > diskThresholdSettings.getFreeBytesThresholdHigh().getBytes()) {
|
||||||
// Allow the shard to be allocated because it is primary that
|
// Allow the shard to be allocated because it is primary that
|
||||||
// has never been allocated if it's under the high watermark
|
// has never been allocated if it's under the high watermark
|
||||||
|
@ -146,7 +151,8 @@ public class DiskThresholdDecider extends AllocationDecider {
|
||||||
diskThresholdSettings.getFreeBytesThresholdLow(), freeBytes, node.nodeId());
|
diskThresholdSettings.getFreeBytesThresholdLow(), freeBytes, node.nodeId());
|
||||||
}
|
}
|
||||||
return allocation.decision(Decision.YES, NAME,
|
return allocation.decision(Decision.YES, NAME,
|
||||||
"the node is above the low watermark, but this primary shard has never been allocated before");
|
"the node is above the low watermark, but less than the high watermark, and this primary shard has " +
|
||||||
|
"never been allocated before");
|
||||||
} else {
|
} else {
|
||||||
// Even though the primary has never been allocated, the node is
|
// Even though the primary has never been allocated, the node is
|
||||||
// above the high watermark, so don't allow allocating the shard
|
// above the high watermark, so don't allow allocating the shard
|
||||||
|
@ -156,9 +162,11 @@ public class DiskThresholdDecider extends AllocationDecider {
|
||||||
diskThresholdSettings.getFreeBytesThresholdHigh(), freeBytes, node.nodeId());
|
diskThresholdSettings.getFreeBytesThresholdHigh(), freeBytes, node.nodeId());
|
||||||
}
|
}
|
||||||
return allocation.decision(Decision.NO, NAME,
|
return allocation.decision(Decision.NO, NAME,
|
||||||
"the node is above the high watermark even though this shard has never been allocated " +
|
"the node is above the high watermark [%s=%s], having less than the minimum required [%s] free space, " +
|
||||||
"and has less than required [%s] free on node, free: [%s]",
|
"actual free: [%s]",
|
||||||
diskThresholdSettings.getFreeBytesThresholdHigh(), new ByteSizeValue(freeBytes));
|
CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING.getKey(),
|
||||||
|
diskThresholdSettings.getHighWatermarkRaw(),
|
||||||
|
diskThresholdSettings.getFreeBytesThresholdHigh(), new ByteSizeValue(freeBytes));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -172,8 +180,10 @@ public class DiskThresholdDecider extends AllocationDecider {
|
||||||
Strings.format1Decimals(usedDiskPercentage, "%"), node.nodeId());
|
Strings.format1Decimals(usedDiskPercentage, "%"), node.nodeId());
|
||||||
}
|
}
|
||||||
return allocation.decision(Decision.NO, NAME,
|
return allocation.decision(Decision.NO, NAME,
|
||||||
"the node is above the low watermark and has more than allowed [%s%%] used disk, free: [%s%%]",
|
"the node is above the low watermark [%s=%s], using more disk space than the maximum allowed [%s%%], " +
|
||||||
usedDiskThresholdLow, freeDiskPercentage);
|
"actual free: [%s%%]",
|
||||||
|
CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING.getKey(),
|
||||||
|
diskThresholdSettings.getLowWatermarkRaw(), usedDiskThresholdLow, freeDiskPercentage);
|
||||||
} else if (freeDiskPercentage > diskThresholdSettings.getFreeDiskThresholdHigh()) {
|
} else if (freeDiskPercentage > diskThresholdSettings.getFreeDiskThresholdHigh()) {
|
||||||
// Allow the shard to be allocated because it is primary that
|
// Allow the shard to be allocated because it is primary that
|
||||||
// has never been allocated if it's under the high watermark
|
// has never been allocated if it's under the high watermark
|
||||||
|
@ -184,7 +194,8 @@ public class DiskThresholdDecider extends AllocationDecider {
|
||||||
Strings.format1Decimals(usedDiskPercentage, "%"), node.nodeId());
|
Strings.format1Decimals(usedDiskPercentage, "%"), node.nodeId());
|
||||||
}
|
}
|
||||||
return allocation.decision(Decision.YES, NAME,
|
return allocation.decision(Decision.YES, NAME,
|
||||||
"the node is above the low watermark, but this primary shard has never been allocated before");
|
"the node is above the low watermark, but less than the high watermark, and this primary shard has " +
|
||||||
|
"never been allocated before");
|
||||||
} else {
|
} else {
|
||||||
// Even though the primary has never been allocated, the node is
|
// Even though the primary has never been allocated, the node is
|
||||||
// above the high watermark, so don't allow allocating the shard
|
// above the high watermark, so don't allow allocating the shard
|
||||||
|
@ -195,9 +206,10 @@ public class DiskThresholdDecider extends AllocationDecider {
|
||||||
Strings.format1Decimals(freeDiskPercentage, "%"), node.nodeId());
|
Strings.format1Decimals(freeDiskPercentage, "%"), node.nodeId());
|
||||||
}
|
}
|
||||||
return allocation.decision(Decision.NO, NAME,
|
return allocation.decision(Decision.NO, NAME,
|
||||||
"the node is above the high watermark even though this shard has never been allocated " +
|
"the node is above the high watermark [%s=%s], using more disk space than the maximum allowed [%s%%], " +
|
||||||
"and has more than allowed [%s%%] used disk, free: [%s%%]",
|
"actual free: [%s%%]",
|
||||||
usedDiskThresholdHigh, freeDiskPercentage);
|
CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING.getKey(),
|
||||||
|
diskThresholdSettings.getHighWatermarkRaw(), usedDiskThresholdHigh, freeDiskPercentage);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -210,9 +222,11 @@ public class DiskThresholdDecider extends AllocationDecider {
|
||||||
"{} free bytes threshold ({} bytes free), preventing allocation",
|
"{} free bytes threshold ({} bytes free), preventing allocation",
|
||||||
node.nodeId(), diskThresholdSettings.getFreeBytesThresholdHigh(), freeBytesAfterShard);
|
node.nodeId(), diskThresholdSettings.getFreeBytesThresholdHigh(), freeBytesAfterShard);
|
||||||
return allocation.decision(Decision.NO, NAME,
|
return allocation.decision(Decision.NO, NAME,
|
||||||
"after allocating the shard to this node, it would be above the high watermark " +
|
"allocating the shard to this node will bring the node above the high watermark [%s=%s] " +
|
||||||
"and have less than required [%s] free, free: [%s]",
|
"and cause it to have less than the minimum required [%s] of free space (free bytes after shard added: [%s])",
|
||||||
diskThresholdSettings.getFreeBytesThresholdLow(), new ByteSizeValue(freeBytesAfterShard));
|
CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING.getKey(),
|
||||||
|
diskThresholdSettings.getHighWatermarkRaw(),
|
||||||
|
diskThresholdSettings.getFreeBytesThresholdHigh(), new ByteSizeValue(freeBytesAfterShard));
|
||||||
}
|
}
|
||||||
if (freeSpaceAfterShard < diskThresholdSettings.getFreeDiskThresholdHigh()) {
|
if (freeSpaceAfterShard < diskThresholdSettings.getFreeDiskThresholdHigh()) {
|
||||||
logger.warn("after allocating, node [{}] would have more than the allowed " +
|
logger.warn("after allocating, node [{}] would have more than the allowed " +
|
||||||
|
@ -220,9 +234,10 @@ public class DiskThresholdDecider extends AllocationDecider {
|
||||||
node.nodeId(), Strings.format1Decimals(diskThresholdSettings.getFreeDiskThresholdHigh(), "%"),
|
node.nodeId(), Strings.format1Decimals(diskThresholdSettings.getFreeDiskThresholdHigh(), "%"),
|
||||||
Strings.format1Decimals(freeSpaceAfterShard, "%"));
|
Strings.format1Decimals(freeSpaceAfterShard, "%"));
|
||||||
return allocation.decision(Decision.NO, NAME,
|
return allocation.decision(Decision.NO, NAME,
|
||||||
"after allocating the shard to this node, it would be above the high watermark " +
|
"allocating the shard to this node will bring the node above the high watermark [%s=%s] " +
|
||||||
"and have more than allowed [%s%%] used disk, free: [%s%%]",
|
"and cause it to use more disk space than the maximum allowed [%s%%] (free space after shard added: [%s%%])",
|
||||||
usedDiskThresholdLow, freeSpaceAfterShard);
|
CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING.getKey(),
|
||||||
|
diskThresholdSettings.getHighWatermarkRaw(), usedDiskThresholdHigh, freeSpaceAfterShard);
|
||||||
}
|
}
|
||||||
|
|
||||||
return allocation.decision(Decision.YES, NAME,
|
return allocation.decision(Decision.YES, NAME,
|
||||||
|
@ -264,9 +279,11 @@ public class DiskThresholdDecider extends AllocationDecider {
|
||||||
diskThresholdSettings.getFreeBytesThresholdHigh(), freeBytes, node.nodeId());
|
diskThresholdSettings.getFreeBytesThresholdHigh(), freeBytes, node.nodeId());
|
||||||
}
|
}
|
||||||
return allocation.decision(Decision.NO, NAME,
|
return allocation.decision(Decision.NO, NAME,
|
||||||
"after allocating this shard this node would be above the high watermark " +
|
"the shard cannot remain on this node because it is above the high watermark [%s=%s] " +
|
||||||
"and there would be less than required [%s] free on node, free: [%s]",
|
"and there is less than the required [%s] free space on node, actual free: [%s]",
|
||||||
diskThresholdSettings.getFreeBytesThresholdHigh(), new ByteSizeValue(freeBytes));
|
CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING.getKey(),
|
||||||
|
diskThresholdSettings.getHighWatermarkRaw(),
|
||||||
|
diskThresholdSettings.getFreeBytesThresholdHigh(), new ByteSizeValue(freeBytes));
|
||||||
}
|
}
|
||||||
if (freeDiskPercentage < diskThresholdSettings.getFreeDiskThresholdHigh()) {
|
if (freeDiskPercentage < diskThresholdSettings.getFreeDiskThresholdHigh()) {
|
||||||
if (logger.isDebugEnabled()) {
|
if (logger.isDebugEnabled()) {
|
||||||
|
@ -274,9 +291,11 @@ public class DiskThresholdDecider extends AllocationDecider {
|
||||||
diskThresholdSettings.getFreeDiskThresholdHigh(), freeDiskPercentage, node.nodeId());
|
diskThresholdSettings.getFreeDiskThresholdHigh(), freeDiskPercentage, node.nodeId());
|
||||||
}
|
}
|
||||||
return allocation.decision(Decision.NO, NAME,
|
return allocation.decision(Decision.NO, NAME,
|
||||||
"after allocating this shard this node would be above the high watermark " +
|
"the shard cannot remain on this node because it is above the high watermark [%s=%s] " +
|
||||||
"and there would be less than required [%s%%] free disk on node, free: [%s%%]",
|
"and there is less than the required [%s%%] free disk on node, actual free: [%s%%]",
|
||||||
diskThresholdSettings.getFreeDiskThresholdHigh(), freeDiskPercentage);
|
CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING.getKey(),
|
||||||
|
diskThresholdSettings.getHighWatermarkRaw(),
|
||||||
|
diskThresholdSettings.getFreeDiskThresholdHigh(), freeDiskPercentage);
|
||||||
}
|
}
|
||||||
|
|
||||||
return allocation.decision(Decision.YES, NAME,
|
return allocation.decision(Decision.YES, NAME,
|
||||||
|
|
|
@ -98,7 +98,8 @@ public class EnableAllocationDecider extends AllocationDecider {
|
||||||
@Override
|
@Override
|
||||||
public Decision canAllocate(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) {
|
public Decision canAllocate(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) {
|
||||||
if (allocation.ignoreDisable()) {
|
if (allocation.ignoreDisable()) {
|
||||||
return allocation.decision(Decision.YES, NAME, "allocation is explicitly ignoring any disabling of allocation");
|
return allocation.decision(Decision.YES, NAME,
|
||||||
|
"explicitly ignoring any disabling of allocation due to manual allocation commands via the reroute API");
|
||||||
}
|
}
|
||||||
|
|
||||||
final IndexMetaData indexMetaData = allocation.metaData().getIndexSafe(shardRouting.index());
|
final IndexMetaData indexMetaData = allocation.metaData().getIndexSafe(shardRouting.index());
|
||||||
|
|
|
@ -64,12 +64,15 @@ public class FilterAllocationDecider extends AllocationDecider {
|
||||||
|
|
||||||
public static final String NAME = "filter";
|
public static final String NAME = "filter";
|
||||||
|
|
||||||
|
private static final String CLUSTER_ROUTING_REQUIRE_GROUP_PREFIX = "cluster.routing.allocation.require";
|
||||||
|
private static final String CLUSTER_ROUTING_INCLUDE_GROUP_PREFIX = "cluster.routing.allocation.include";
|
||||||
|
private static final String CLUSTER_ROUTING_EXCLUDE_GROUP_PREFIX = "cluster.routing.allocation.exclude";
|
||||||
public static final Setting<Settings> CLUSTER_ROUTING_REQUIRE_GROUP_SETTING =
|
public static final Setting<Settings> CLUSTER_ROUTING_REQUIRE_GROUP_SETTING =
|
||||||
Setting.groupSetting("cluster.routing.allocation.require.", Property.Dynamic, Property.NodeScope);
|
Setting.groupSetting(CLUSTER_ROUTING_REQUIRE_GROUP_PREFIX + ".", Property.Dynamic, Property.NodeScope);
|
||||||
public static final Setting<Settings> CLUSTER_ROUTING_INCLUDE_GROUP_SETTING =
|
public static final Setting<Settings> CLUSTER_ROUTING_INCLUDE_GROUP_SETTING =
|
||||||
Setting.groupSetting("cluster.routing.allocation.include.", Property.Dynamic, Property.NodeScope);
|
Setting.groupSetting(CLUSTER_ROUTING_INCLUDE_GROUP_PREFIX + ".", Property.Dynamic, Property.NodeScope);
|
||||||
public static final Setting<Settings> CLUSTER_ROUTING_EXCLUDE_GROUP_SETTING =
|
public static final Setting<Settings> CLUSTER_ROUTING_EXCLUDE_GROUP_SETTING =
|
||||||
Setting.groupSetting("cluster.routing.allocation.exclude.", Property.Dynamic, Property.NodeScope);
|
Setting.groupSetting(CLUSTER_ROUTING_EXCLUDE_GROUP_PREFIX + ".", Property.Dynamic, Property.NodeScope);
|
||||||
|
|
||||||
private volatile DiscoveryNodeFilters clusterRequireFilters;
|
private volatile DiscoveryNodeFilters clusterRequireFilters;
|
||||||
private volatile DiscoveryNodeFilters clusterIncludeFilters;
|
private volatile DiscoveryNodeFilters clusterIncludeFilters;
|
||||||
|
@ -96,8 +99,10 @@ public class FilterAllocationDecider extends AllocationDecider {
|
||||||
if (initialRecoveryFilters != null &&
|
if (initialRecoveryFilters != null &&
|
||||||
RecoverySource.isInitialRecovery(shardRouting.recoverySource().getType()) &&
|
RecoverySource.isInitialRecovery(shardRouting.recoverySource().getType()) &&
|
||||||
initialRecoveryFilters.match(node.node()) == false) {
|
initialRecoveryFilters.match(node.node()) == false) {
|
||||||
return allocation.decision(Decision.NO, NAME, "node does not match index initial recovery filters [%s]",
|
String explanation = (shardRouting.recoverySource().getType() == RecoverySource.Type.LOCAL_SHARDS) ?
|
||||||
indexMd.includeFilters());
|
"initial allocation of the shrunken index is only allowed on nodes [%s] that hold a copy of every shard in the index" :
|
||||||
|
"initial allocation of the index is only allowed on nodes [%s]";
|
||||||
|
return allocation.decision(Decision.NO, NAME, explanation, initialRecoveryFilters);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return shouldFilter(shardRouting, node, allocation);
|
return shouldFilter(shardRouting, node, allocation);
|
||||||
|
@ -136,17 +141,20 @@ public class FilterAllocationDecider extends AllocationDecider {
|
||||||
private Decision shouldIndexFilter(IndexMetaData indexMd, RoutingNode node, RoutingAllocation allocation) {
|
private Decision shouldIndexFilter(IndexMetaData indexMd, RoutingNode node, RoutingAllocation allocation) {
|
||||||
if (indexMd.requireFilters() != null) {
|
if (indexMd.requireFilters() != null) {
|
||||||
if (!indexMd.requireFilters().match(node.node())) {
|
if (!indexMd.requireFilters().match(node.node())) {
|
||||||
return allocation.decision(Decision.NO, NAME, "node does not match index required filters [%s]", indexMd.requireFilters());
|
return allocation.decision(Decision.NO, NAME, "node does not match [%s] filters [%s]",
|
||||||
|
IndexMetaData.INDEX_ROUTING_REQUIRE_GROUP_PREFIX, indexMd.requireFilters());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (indexMd.includeFilters() != null) {
|
if (indexMd.includeFilters() != null) {
|
||||||
if (!indexMd.includeFilters().match(node.node())) {
|
if (!indexMd.includeFilters().match(node.node())) {
|
||||||
return allocation.decision(Decision.NO, NAME, "node does not match index include filters [%s]", indexMd.includeFilters());
|
return allocation.decision(Decision.NO, NAME, "node does not match [%s] filters [%s]",
|
||||||
|
IndexMetaData.INDEX_ROUTING_INCLUDE_GROUP_PREFIX, indexMd.includeFilters());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (indexMd.excludeFilters() != null) {
|
if (indexMd.excludeFilters() != null) {
|
||||||
if (indexMd.excludeFilters().match(node.node())) {
|
if (indexMd.excludeFilters().match(node.node())) {
|
||||||
return allocation.decision(Decision.NO, NAME, "node matches index exclude filters [%s]", indexMd.excludeFilters());
|
return allocation.decision(Decision.NO, NAME, "node matches [%s] filters [%s]",
|
||||||
|
IndexMetaData.INDEX_ROUTING_EXCLUDE_GROUP_SETTING.getKey(), indexMd.excludeFilters());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return null;
|
return null;
|
||||||
|
@ -155,17 +163,20 @@ public class FilterAllocationDecider extends AllocationDecider {
|
||||||
private Decision shouldClusterFilter(RoutingNode node, RoutingAllocation allocation) {
|
private Decision shouldClusterFilter(RoutingNode node, RoutingAllocation allocation) {
|
||||||
if (clusterRequireFilters != null) {
|
if (clusterRequireFilters != null) {
|
||||||
if (!clusterRequireFilters.match(node.node())) {
|
if (!clusterRequireFilters.match(node.node())) {
|
||||||
return allocation.decision(Decision.NO, NAME, "node does not match global required filters [%s]", clusterRequireFilters);
|
return allocation.decision(Decision.NO, NAME, "node does not match [%s] filters [%s]",
|
||||||
|
CLUSTER_ROUTING_REQUIRE_GROUP_PREFIX, clusterRequireFilters);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (clusterIncludeFilters != null) {
|
if (clusterIncludeFilters != null) {
|
||||||
if (!clusterIncludeFilters.match(node.node())) {
|
if (!clusterIncludeFilters.match(node.node())) {
|
||||||
return allocation.decision(Decision.NO, NAME, "node does not match global include filters [%s]", clusterIncludeFilters);
|
return allocation.decision(Decision.NO, NAME, "node does not [%s] filters [%s]",
|
||||||
|
CLUSTER_ROUTING_INCLUDE_GROUP_PREFIX, clusterIncludeFilters);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (clusterExcludeFilters != null) {
|
if (clusterExcludeFilters != null) {
|
||||||
if (clusterExcludeFilters.match(node.node())) {
|
if (clusterExcludeFilters.match(node.node())) {
|
||||||
return allocation.decision(Decision.NO, NAME, "node matches global exclude filters [%s]", clusterExcludeFilters);
|
return allocation.decision(Decision.NO, NAME, "node matches [%s] filters [%s]",
|
||||||
|
CLUSTER_ROUTING_EXCLUDE_GROUP_PREFIX, clusterExcludeFilters);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return null;
|
return null;
|
||||||
|
|
|
@ -37,8 +37,8 @@ public class RebalanceOnlyWhenActiveAllocationDecider extends AllocationDecider
|
||||||
@Override
|
@Override
|
||||||
public Decision canRebalance(ShardRouting shardRouting, RoutingAllocation allocation) {
|
public Decision canRebalance(ShardRouting shardRouting, RoutingAllocation allocation) {
|
||||||
if (!allocation.routingNodes().allReplicasActive(shardRouting.shardId(), allocation.metaData())) {
|
if (!allocation.routingNodes().allReplicasActive(shardRouting.shardId(), allocation.metaData())) {
|
||||||
return allocation.decision(Decision.NO, NAME, "rebalancing can not occur if not all replicas are active in the cluster");
|
return allocation.decision(Decision.NO, NAME, "rebalancing is not allowed until all replicas in the cluster are active");
|
||||||
}
|
}
|
||||||
return allocation.decision(Decision.YES, NAME, "all replicas are active in the cluster, rebalancing can occur");
|
return allocation.decision(Decision.YES, NAME, "rebalancing is allowed as all replicas are active in the cluster");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -61,8 +61,15 @@ public class SameShardAllocationDecider extends AllocationDecider {
|
||||||
Iterable<ShardRouting> assignedShards = allocation.routingNodes().assignedShards(shardRouting.shardId());
|
Iterable<ShardRouting> assignedShards = allocation.routingNodes().assignedShards(shardRouting.shardId());
|
||||||
for (ShardRouting assignedShard : assignedShards) {
|
for (ShardRouting assignedShard : assignedShards) {
|
||||||
if (node.nodeId().equals(assignedShard.currentNodeId())) {
|
if (node.nodeId().equals(assignedShard.currentNodeId())) {
|
||||||
return allocation.decision(Decision.NO, NAME,
|
if (assignedShard.isSameAllocation(shardRouting)) {
|
||||||
"the shard cannot be allocated on the same node id [%s] on which it already exists", node.nodeId());
|
return allocation.decision(Decision.NO, NAME,
|
||||||
|
"the shard cannot be allocated to the node on which it already exists [%s]",
|
||||||
|
shardRouting.toString());
|
||||||
|
} else {
|
||||||
|
return allocation.decision(Decision.NO, NAME,
|
||||||
|
"the shard cannot be allocated to the same node on which a copy of the shard [%s] already exists",
|
||||||
|
assignedShard.toString());
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (sameHost) {
|
if (sameHost) {
|
||||||
|
@ -72,27 +79,32 @@ public class SameShardAllocationDecider extends AllocationDecider {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
// check if its on the same host as the one we want to allocate to
|
// check if its on the same host as the one we want to allocate to
|
||||||
boolean checkNodeOnSameHost = false;
|
boolean checkNodeOnSameHostName = false;
|
||||||
|
boolean checkNodeOnSameHostAddress = false;
|
||||||
if (Strings.hasLength(checkNode.node().getHostAddress()) && Strings.hasLength(node.node().getHostAddress())) {
|
if (Strings.hasLength(checkNode.node().getHostAddress()) && Strings.hasLength(node.node().getHostAddress())) {
|
||||||
if (checkNode.node().getHostAddress().equals(node.node().getHostAddress())) {
|
if (checkNode.node().getHostAddress().equals(node.node().getHostAddress())) {
|
||||||
checkNodeOnSameHost = true;
|
checkNodeOnSameHostAddress = true;
|
||||||
}
|
}
|
||||||
} else if (Strings.hasLength(checkNode.node().getHostName()) && Strings.hasLength(node.node().getHostName())) {
|
} else if (Strings.hasLength(checkNode.node().getHostName()) && Strings.hasLength(node.node().getHostName())) {
|
||||||
if (checkNode.node().getHostName().equals(node.node().getHostName())) {
|
if (checkNode.node().getHostName().equals(node.node().getHostName())) {
|
||||||
checkNodeOnSameHost = true;
|
checkNodeOnSameHostName = true;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (checkNodeOnSameHost) {
|
if (checkNodeOnSameHostAddress || checkNodeOnSameHostName) {
|
||||||
for (ShardRouting assignedShard : assignedShards) {
|
for (ShardRouting assignedShard : assignedShards) {
|
||||||
if (checkNode.nodeId().equals(assignedShard.currentNodeId())) {
|
if (checkNode.nodeId().equals(assignedShard.currentNodeId())) {
|
||||||
|
String hostType = checkNodeOnSameHostAddress ? "address" : "name";
|
||||||
|
String host = checkNodeOnSameHostAddress ? node.node().getHostAddress() : node.node().getHostName();
|
||||||
return allocation.decision(Decision.NO, NAME,
|
return allocation.decision(Decision.NO, NAME,
|
||||||
"shard cannot be allocated on the same host [%s] on which it already exists", node.nodeId());
|
"the shard cannot be allocated on host %s [%s], where it already exists on node [%s]; " +
|
||||||
|
"set [%s] to false to allow multiple nodes on the same host to hold the same shard copies",
|
||||||
|
hostType, host, node.nodeId(), CLUSTER_ROUTING_ALLOCATION_SAME_HOST_SETTING.getKey());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return allocation.decision(Decision.YES, NAME, "shard is not allocated to same node or host");
|
return allocation.decision(Decision.YES, NAME, "the shard does not exist on the same " + (sameHost ? "host" : "node"));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -107,17 +107,18 @@ public class ShardsLimitAllocationDecider extends AllocationDecider {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (clusterShardLimit > 0 && nodeShardCount >= clusterShardLimit) {
|
if (clusterShardLimit > 0 && nodeShardCount >= clusterShardLimit) {
|
||||||
return allocation.decision(Decision.NO, NAME, "too many shards for this node [%d], cluster-level limit per node: [%d]",
|
return allocation.decision(Decision.NO, NAME,
|
||||||
nodeShardCount, clusterShardLimit);
|
"too many shards [%d] allocated to this node, [%s=%d]",
|
||||||
|
nodeShardCount, CLUSTER_TOTAL_SHARDS_PER_NODE_SETTING.getKey(), clusterShardLimit);
|
||||||
}
|
}
|
||||||
if (indexShardLimit > 0 && indexShardCount >= indexShardLimit) {
|
if (indexShardLimit > 0 && indexShardCount >= indexShardLimit) {
|
||||||
return allocation.decision(Decision.NO, NAME,
|
return allocation.decision(Decision.NO, NAME,
|
||||||
"too many shards for this index [%s] on node [%d], index-level limit per node: [%d]",
|
"too many shards [%d] allocated to this node for index [%s], [%s=%d]",
|
||||||
shardRouting.index(), indexShardCount, indexShardLimit);
|
indexShardCount, shardRouting.getIndexName(), INDEX_TOTAL_SHARDS_PER_NODE_SETTING.getKey(), indexShardLimit);
|
||||||
}
|
}
|
||||||
return allocation.decision(Decision.YES, NAME,
|
return allocation.decision(Decision.YES, NAME,
|
||||||
"the shard count is under index limit [%d] and cluster level node limit [%d] of total shards per node",
|
"the shard count [%d] for this node is under the index limit [%d] and cluster level node limit [%d]",
|
||||||
indexShardLimit, clusterShardLimit);
|
nodeShardCount, indexShardLimit, clusterShardLimit);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
@ -148,17 +149,18 @@ public class ShardsLimitAllocationDecider extends AllocationDecider {
|
||||||
// Subtle difference between the `canAllocate` and `canRemain` is that
|
// Subtle difference between the `canAllocate` and `canRemain` is that
|
||||||
// this checks > while canAllocate checks >=
|
// this checks > while canAllocate checks >=
|
||||||
if (clusterShardLimit > 0 && nodeShardCount > clusterShardLimit) {
|
if (clusterShardLimit > 0 && nodeShardCount > clusterShardLimit) {
|
||||||
return allocation.decision(Decision.NO, NAME, "too many shards for this node [%d], cluster-level limit per node: [%d]",
|
return allocation.decision(Decision.NO, NAME,
|
||||||
nodeShardCount, clusterShardLimit);
|
"too many shards [%d] allocated to this node, [%s=%d]",
|
||||||
|
nodeShardCount, CLUSTER_TOTAL_SHARDS_PER_NODE_SETTING.getKey(), clusterShardLimit);
|
||||||
}
|
}
|
||||||
if (indexShardLimit > 0 && indexShardCount > indexShardLimit) {
|
if (indexShardLimit > 0 && indexShardCount > indexShardLimit) {
|
||||||
return allocation.decision(Decision.NO, NAME,
|
return allocation.decision(Decision.NO, NAME,
|
||||||
"too many shards for this index [%s] on node [%d], index-level limit per node: [%d]",
|
"too many shards [%d] allocated to this node for index [%s], [%s=%d]",
|
||||||
shardRouting.index(), indexShardCount, indexShardLimit);
|
indexShardCount, shardRouting.getIndexName(), INDEX_TOTAL_SHARDS_PER_NODE_SETTING.getKey(), indexShardLimit);
|
||||||
}
|
}
|
||||||
return allocation.decision(Decision.YES, NAME,
|
return allocation.decision(Decision.YES, NAME,
|
||||||
"the shard count is under index limit [%d] and cluster level node limit [%d] of total shards per node",
|
"the shard count [%d] for this node is under the index limit [%d] and cluster level node limit [%d]",
|
||||||
indexShardLimit, clusterShardLimit);
|
nodeShardCount, indexShardLimit, clusterShardLimit);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
@ -182,10 +184,12 @@ public class ShardsLimitAllocationDecider extends AllocationDecider {
|
||||||
nodeShardCount++;
|
nodeShardCount++;
|
||||||
}
|
}
|
||||||
if (clusterShardLimit >= 0 && nodeShardCount >= clusterShardLimit) {
|
if (clusterShardLimit >= 0 && nodeShardCount >= clusterShardLimit) {
|
||||||
return allocation.decision(Decision.NO, NAME, "too many shards for this node [%d], cluster-level limit per node: [%d]",
|
return allocation.decision(Decision.NO, NAME,
|
||||||
nodeShardCount, clusterShardLimit);
|
"too many shards [%d] allocated to this node, [%s=%d]",
|
||||||
|
nodeShardCount, CLUSTER_TOTAL_SHARDS_PER_NODE_SETTING.getKey(), clusterShardLimit);
|
||||||
}
|
}
|
||||||
return allocation.decision(Decision.YES, NAME, "the shard count is under node limit [%d] of total shards per node",
|
return allocation.decision(Decision.YES, NAME,
|
||||||
clusterShardLimit);
|
"the shard count [%d] for this node is under the cluster level node limit [%d]",
|
||||||
|
nodeShardCount, clusterShardLimit);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -77,15 +77,16 @@ public class SnapshotInProgressAllocationDecider extends AllocationDecider {
|
||||||
if (shardSnapshotStatus != null && !shardSnapshotStatus.state().completed() && shardSnapshotStatus.nodeId() != null &&
|
if (shardSnapshotStatus != null && !shardSnapshotStatus.state().completed() && shardSnapshotStatus.nodeId() != null &&
|
||||||
shardSnapshotStatus.nodeId().equals(shardRouting.currentNodeId())) {
|
shardSnapshotStatus.nodeId().equals(shardRouting.currentNodeId())) {
|
||||||
if (logger.isTraceEnabled()) {
|
if (logger.isTraceEnabled()) {
|
||||||
logger.trace("Preventing snapshotted shard [{}] to be moved from node [{}]",
|
logger.trace("Preventing snapshotted shard [{}] from being moved away from node [{}]",
|
||||||
shardRouting.shardId(), shardSnapshotStatus.nodeId());
|
shardRouting.shardId(), shardSnapshotStatus.nodeId());
|
||||||
}
|
}
|
||||||
return allocation.decision(Decision.NO, NAME, "snapshot for shard [%s] is currently running on node [%s]",
|
return allocation.decision(Decision.THROTTLE, NAME,
|
||||||
shardRouting.shardId(), shardSnapshotStatus.nodeId());
|
"waiting for snapshotting of shard [%s] to complete on this node [%s]",
|
||||||
|
shardRouting.shardId(), shardSnapshotStatus.nodeId());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return allocation.decision(Decision.YES, NAME, "the shard is not primary or relocation is disabled");
|
return allocation.decision(Decision.YES, NAME, "the shard is not being snapshotted");
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -126,8 +126,9 @@ public class ThrottlingAllocationDecider extends AllocationDecider {
|
||||||
}
|
}
|
||||||
if (primariesInRecovery >= primariesInitialRecoveries) {
|
if (primariesInRecovery >= primariesInitialRecoveries) {
|
||||||
// TODO: Should index creation not be throttled for primary shards?
|
// TODO: Should index creation not be throttled for primary shards?
|
||||||
return allocation.decision(THROTTLE, NAME, "too many primaries are currently recovering [%d], limit: [%d]",
|
return allocation.decision(THROTTLE, NAME, "reached the limit of ongoing initial primary recoveries [%d], [%s=%d]",
|
||||||
primariesInRecovery, primariesInitialRecoveries);
|
primariesInRecovery, CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES_SETTING.getKey(),
|
||||||
|
primariesInitialRecoveries);
|
||||||
} else {
|
} else {
|
||||||
return allocation.decision(YES, NAME, "below primary recovery limit of [%d]", primariesInitialRecoveries);
|
return allocation.decision(YES, NAME, "below primary recovery limit of [%d]", primariesInitialRecoveries);
|
||||||
}
|
}
|
||||||
|
@ -138,8 +139,11 @@ public class ThrottlingAllocationDecider extends AllocationDecider {
|
||||||
// Allocating a shard to this node will increase the incoming recoveries
|
// Allocating a shard to this node will increase the incoming recoveries
|
||||||
int currentInRecoveries = allocation.routingNodes().getIncomingRecoveries(node.nodeId());
|
int currentInRecoveries = allocation.routingNodes().getIncomingRecoveries(node.nodeId());
|
||||||
if (currentInRecoveries >= concurrentIncomingRecoveries) {
|
if (currentInRecoveries >= concurrentIncomingRecoveries) {
|
||||||
return allocation.decision(THROTTLE, NAME, "too many incoming shards are currently recovering [%d], limit: [%d]",
|
return allocation.decision(THROTTLE, NAME,
|
||||||
currentInRecoveries, concurrentIncomingRecoveries);
|
"reached the limit of incoming shard recoveries [%d], [%s=%d] (can also be set via [%s])",
|
||||||
|
currentInRecoveries, CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_INCOMING_RECOVERIES_SETTING.getKey(),
|
||||||
|
concurrentIncomingRecoveries,
|
||||||
|
CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES_SETTING.getKey());
|
||||||
} else {
|
} else {
|
||||||
// search for corresponding recovery source (= primary shard) and check number of outgoing recoveries on that node
|
// search for corresponding recovery source (= primary shard) and check number of outgoing recoveries on that node
|
||||||
ShardRouting primaryShard = allocation.routingNodes().activePrimary(shardRouting.shardId());
|
ShardRouting primaryShard = allocation.routingNodes().activePrimary(shardRouting.shardId());
|
||||||
|
@ -148,8 +152,13 @@ public class ThrottlingAllocationDecider extends AllocationDecider {
|
||||||
}
|
}
|
||||||
int primaryNodeOutRecoveries = allocation.routingNodes().getOutgoingRecoveries(primaryShard.currentNodeId());
|
int primaryNodeOutRecoveries = allocation.routingNodes().getOutgoingRecoveries(primaryShard.currentNodeId());
|
||||||
if (primaryNodeOutRecoveries >= concurrentOutgoingRecoveries) {
|
if (primaryNodeOutRecoveries >= concurrentOutgoingRecoveries) {
|
||||||
return allocation.decision(THROTTLE, NAME, "too many outgoing shards are currently recovering [%d], limit: [%d]",
|
return allocation.decision(THROTTLE, NAME,
|
||||||
primaryNodeOutRecoveries, concurrentOutgoingRecoveries);
|
"reached the limit of outgoing shard recoveries [%d] on the node [%s] which holds the primary, " +
|
||||||
|
"[%s=%d] (can also be set via [%s])",
|
||||||
|
primaryNodeOutRecoveries, node.nodeId(),
|
||||||
|
CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_OUTGOING_RECOVERIES_SETTING.getKey(),
|
||||||
|
concurrentOutgoingRecoveries,
|
||||||
|
CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES_SETTING.getKey());
|
||||||
} else {
|
} else {
|
||||||
return allocation.decision(YES, NAME, "below shard recovery limit of outgoing: [%d < %d] incoming: [%d < %d]",
|
return allocation.decision(YES, NAME, "below shard recovery limit of outgoing: [%d < %d] incoming: [%d < %d]",
|
||||||
primaryNodeOutRecoveries,
|
primaryNodeOutRecoveries,
|
||||||
|
|
|
@ -26,7 +26,6 @@ import org.elasticsearch.ElasticsearchParseException;
|
||||||
import org.elasticsearch.common.unit.DistanceUnit;
|
import org.elasticsearch.common.unit.DistanceUnit;
|
||||||
import org.elasticsearch.common.xcontent.XContentParser;
|
import org.elasticsearch.common.xcontent.XContentParser;
|
||||||
import org.elasticsearch.common.xcontent.XContentParser.Token;
|
import org.elasticsearch.common.xcontent.XContentParser.Token;
|
||||||
import org.elasticsearch.index.mapper.GeoPointFieldMapper;
|
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
|
||||||
|
@ -41,9 +40,9 @@ public class GeoUtils {
|
||||||
/** Minimum valid longitude in degrees. */
|
/** Minimum valid longitude in degrees. */
|
||||||
public static final double MIN_LON = -180.0;
|
public static final double MIN_LON = -180.0;
|
||||||
|
|
||||||
public static final String LATITUDE = GeoPointFieldMapper.Names.LAT;
|
public static final String LATITUDE = "lat";
|
||||||
public static final String LONGITUDE = GeoPointFieldMapper.Names.LON;
|
public static final String LONGITUDE = "lon";
|
||||||
public static final String GEOHASH = GeoPointFieldMapper.Names.GEOHASH;
|
public static final String GEOHASH = "geohash";
|
||||||
|
|
||||||
/** Earth ellipsoid major axis defined by WGS 84 in meters */
|
/** Earth ellipsoid major axis defined by WGS 84 in meters */
|
||||||
public static final double EARTH_SEMI_MAJOR_AXIS = 6378137.0; // meters (WGS 84)
|
public static final double EARTH_SEMI_MAJOR_AXIS = 6378137.0; // meters (WGS 84)
|
||||||
|
|
|
@ -75,7 +75,7 @@ public class BytesStreamOutput extends StreamOutput implements BytesStream {
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void writeBytes(byte[] b, int offset, int length) throws IOException {
|
public void writeBytes(byte[] b, int offset, int length) {
|
||||||
// nothing to copy
|
// nothing to copy
|
||||||
if (length == 0) {
|
if (length == 0) {
|
||||||
return;
|
return;
|
||||||
|
|
|
@ -73,7 +73,6 @@ import org.elasticsearch.indices.breaker.HierarchyCircuitBreakerService;
|
||||||
import org.elasticsearch.indices.fielddata.cache.IndicesFieldDataCache;
|
import org.elasticsearch.indices.fielddata.cache.IndicesFieldDataCache;
|
||||||
import org.elasticsearch.indices.recovery.RecoverySettings;
|
import org.elasticsearch.indices.recovery.RecoverySettings;
|
||||||
import org.elasticsearch.indices.store.IndicesStore;
|
import org.elasticsearch.indices.store.IndicesStore;
|
||||||
import org.elasticsearch.indices.ttl.IndicesTTLService;
|
|
||||||
import org.elasticsearch.monitor.fs.FsService;
|
import org.elasticsearch.monitor.fs.FsService;
|
||||||
import org.elasticsearch.monitor.jvm.JvmGcMonitorService;
|
import org.elasticsearch.monitor.jvm.JvmGcMonitorService;
|
||||||
import org.elasticsearch.monitor.jvm.JvmService;
|
import org.elasticsearch.monitor.jvm.JvmService;
|
||||||
|
@ -185,7 +184,6 @@ public final class ClusterSettings extends AbstractScopedSettings {
|
||||||
IndicesQueryCache.INDICES_CACHE_QUERY_SIZE_SETTING,
|
IndicesQueryCache.INDICES_CACHE_QUERY_SIZE_SETTING,
|
||||||
IndicesQueryCache.INDICES_CACHE_QUERY_COUNT_SETTING,
|
IndicesQueryCache.INDICES_CACHE_QUERY_COUNT_SETTING,
|
||||||
IndicesQueryCache.INDICES_QUERIES_CACHE_ALL_SEGMENTS_SETTING,
|
IndicesQueryCache.INDICES_QUERIES_CACHE_ALL_SEGMENTS_SETTING,
|
||||||
IndicesTTLService.INDICES_TTL_INTERVAL_SETTING,
|
|
||||||
MappingUpdatedAction.INDICES_MAPPING_DYNAMIC_TIMEOUT_SETTING,
|
MappingUpdatedAction.INDICES_MAPPING_DYNAMIC_TIMEOUT_SETTING,
|
||||||
MetaData.SETTING_READ_ONLY_SETTING,
|
MetaData.SETTING_READ_ONLY_SETTING,
|
||||||
RecoverySettings.INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING,
|
RecoverySettings.INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING,
|
||||||
|
|
|
@ -78,7 +78,13 @@ public final class TransportAddress implements Writeable {
|
||||||
final int len = in.readByte();
|
final int len = in.readByte();
|
||||||
final byte[] a = new byte[len]; // 4 bytes (IPv4) or 16 bytes (IPv6)
|
final byte[] a = new byte[len]; // 4 bytes (IPv4) or 16 bytes (IPv6)
|
||||||
in.readFully(a);
|
in.readFully(a);
|
||||||
InetAddress inetAddress = InetAddress.getByAddress(a);
|
final InetAddress inetAddress;
|
||||||
|
if (in.getVersion().onOrAfter(Version.V_5_0_3_UNRELEASED)) {
|
||||||
|
String host = in.readString();
|
||||||
|
inetAddress = InetAddress.getByAddress(host, a);
|
||||||
|
} else {
|
||||||
|
inetAddress = InetAddress.getByAddress(a);
|
||||||
|
}
|
||||||
int port = in.readInt();
|
int port = in.readInt();
|
||||||
this.address = new InetSocketAddress(inetAddress, port);
|
this.address = new InetSocketAddress(inetAddress, port);
|
||||||
}
|
}
|
||||||
|
@ -91,6 +97,9 @@ public final class TransportAddress implements Writeable {
|
||||||
byte[] bytes = address.getAddress().getAddress(); // 4 bytes (IPv4) or 16 bytes (IPv6)
|
byte[] bytes = address.getAddress().getAddress(); // 4 bytes (IPv4) or 16 bytes (IPv6)
|
||||||
out.writeByte((byte) bytes.length); // 1 byte
|
out.writeByte((byte) bytes.length); // 1 byte
|
||||||
out.write(bytes, 0, bytes.length);
|
out.write(bytes, 0, bytes.length);
|
||||||
|
if (out.getVersion().onOrAfter(Version.V_5_0_3_UNRELEASED)) {
|
||||||
|
out.writeString(address.getHostString());
|
||||||
|
}
|
||||||
// don't serialize scope ids over the network!!!!
|
// don't serialize scope ids over the network!!!!
|
||||||
// these only make sense with respect to the local machine, and will only formulate
|
// these only make sense with respect to the local machine, and will only formulate
|
||||||
// the address incorrectly remotely.
|
// the address incorrectly remotely.
|
||||||
|
|
|
@ -19,14 +19,16 @@
|
||||||
|
|
||||||
package org.elasticsearch.discovery.zen;
|
package org.elasticsearch.discovery.zen;
|
||||||
|
|
||||||
|
import org.elasticsearch.Version;
|
||||||
import org.elasticsearch.cluster.ClusterState;
|
import org.elasticsearch.cluster.ClusterState;
|
||||||
|
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||||
|
import org.elasticsearch.cluster.metadata.MetaData;
|
||||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||||
import org.elasticsearch.common.component.AbstractComponent;
|
import org.elasticsearch.common.component.AbstractComponent;
|
||||||
import org.elasticsearch.common.io.stream.StreamInput;
|
import org.elasticsearch.common.io.stream.StreamInput;
|
||||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||||
import org.elasticsearch.common.settings.Settings;
|
import org.elasticsearch.common.settings.Settings;
|
||||||
import org.elasticsearch.common.unit.TimeValue;
|
import org.elasticsearch.common.unit.TimeValue;
|
||||||
import org.elasticsearch.discovery.zen.DiscoveryNodesProvider;
|
|
||||||
import org.elasticsearch.threadpool.ThreadPool;
|
import org.elasticsearch.threadpool.ThreadPool;
|
||||||
import org.elasticsearch.transport.EmptyTransportResponseHandler;
|
import org.elasticsearch.transport.EmptyTransportResponseHandler;
|
||||||
import org.elasticsearch.transport.TransportChannel;
|
import org.elasticsearch.transport.TransportChannel;
|
||||||
|
@ -37,6 +39,7 @@ import org.elasticsearch.transport.TransportService;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.util.concurrent.TimeUnit;
|
import java.util.concurrent.TimeUnit;
|
||||||
|
import java.util.function.Supplier;
|
||||||
|
|
||||||
public class MembershipAction extends AbstractComponent {
|
public class MembershipAction extends AbstractComponent {
|
||||||
|
|
||||||
|
@ -58,21 +61,20 @@ public class MembershipAction extends AbstractComponent {
|
||||||
|
|
||||||
private final TransportService transportService;
|
private final TransportService transportService;
|
||||||
|
|
||||||
private final DiscoveryNodesProvider nodesProvider;
|
|
||||||
|
|
||||||
private final MembershipListener listener;
|
private final MembershipListener listener;
|
||||||
|
|
||||||
public MembershipAction(Settings settings, TransportService transportService,
|
public MembershipAction(Settings settings, TransportService transportService,
|
||||||
DiscoveryNodesProvider nodesProvider, MembershipListener listener) {
|
Supplier<DiscoveryNode> localNodeSupplier, MembershipListener listener) {
|
||||||
super(settings);
|
super(settings);
|
||||||
this.transportService = transportService;
|
this.transportService = transportService;
|
||||||
this.nodesProvider = nodesProvider;
|
|
||||||
this.listener = listener;
|
this.listener = listener;
|
||||||
|
|
||||||
|
|
||||||
transportService.registerRequestHandler(DISCOVERY_JOIN_ACTION_NAME, JoinRequest::new,
|
transportService.registerRequestHandler(DISCOVERY_JOIN_ACTION_NAME, JoinRequest::new,
|
||||||
ThreadPool.Names.GENERIC, new JoinRequestRequestHandler());
|
ThreadPool.Names.GENERIC, new JoinRequestRequestHandler());
|
||||||
transportService.registerRequestHandler(DISCOVERY_JOIN_VALIDATE_ACTION_NAME, ValidateJoinRequest::new,
|
transportService.registerRequestHandler(DISCOVERY_JOIN_VALIDATE_ACTION_NAME,
|
||||||
ThreadPool.Names.GENERIC, new ValidateJoinRequestRequestHandler());
|
() -> new ValidateJoinRequest(localNodeSupplier), ThreadPool.Names.GENERIC,
|
||||||
|
new ValidateJoinRequestRequestHandler());
|
||||||
transportService.registerRequestHandler(DISCOVERY_LEAVE_ACTION_NAME, LeaveRequest::new,
|
transportService.registerRequestHandler(DISCOVERY_LEAVE_ACTION_NAME, LeaveRequest::new,
|
||||||
ThreadPool.Names.GENERIC, new LeaveRequestRequestHandler());
|
ThreadPool.Names.GENERIC, new LeaveRequestRequestHandler());
|
||||||
}
|
}
|
||||||
|
@ -152,20 +154,23 @@ public class MembershipAction extends AbstractComponent {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
class ValidateJoinRequest extends TransportRequest {
|
static class ValidateJoinRequest extends TransportRequest {
|
||||||
|
private final Supplier<DiscoveryNode> localNode;
|
||||||
private ClusterState state;
|
private ClusterState state;
|
||||||
|
|
||||||
ValidateJoinRequest() {
|
ValidateJoinRequest(Supplier<DiscoveryNode> localNode) {
|
||||||
|
this.localNode = localNode;
|
||||||
}
|
}
|
||||||
|
|
||||||
ValidateJoinRequest(ClusterState state) {
|
ValidateJoinRequest(ClusterState state) {
|
||||||
this.state = state;
|
this.state = state;
|
||||||
|
this.localNode = state.nodes()::getLocalNode;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void readFrom(StreamInput in) throws IOException {
|
public void readFrom(StreamInput in) throws IOException {
|
||||||
super.readFrom(in);
|
super.readFrom(in);
|
||||||
this.state = ClusterState.Builder.readFrom(in, nodesProvider.nodes().getLocalNode());
|
this.state = ClusterState.Builder.readFrom(in, localNode.get());
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
@ -175,15 +180,31 @@ public class MembershipAction extends AbstractComponent {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
class ValidateJoinRequestRequestHandler implements TransportRequestHandler<ValidateJoinRequest> {
|
static class ValidateJoinRequestRequestHandler implements TransportRequestHandler<ValidateJoinRequest> {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void messageReceived(ValidateJoinRequest request, TransportChannel channel) throws Exception {
|
public void messageReceived(ValidateJoinRequest request, TransportChannel channel) throws Exception {
|
||||||
|
ensureIndexCompatibility(Version.CURRENT.minimumIndexCompatibilityVersion(), request.state.getMetaData());
|
||||||
// for now, the mere fact that we can serialize the cluster state acts as validation....
|
// for now, the mere fact that we can serialize the cluster state acts as validation....
|
||||||
channel.sendResponse(TransportResponse.Empty.INSTANCE);
|
channel.sendResponse(TransportResponse.Empty.INSTANCE);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Ensures that all indices are compatible with the supported index version.
|
||||||
|
* @throws IllegalStateException if any index is incompatible with the given version
|
||||||
|
*/
|
||||||
|
static void ensureIndexCompatibility(final Version supportedIndexVersion, MetaData metaData) {
|
||||||
|
// we ensure that all indices in the cluster we join are compatible with us no matter if they are
|
||||||
|
// closed or not we can't read mappings of these indices so we need to reject the join...
|
||||||
|
for (IndexMetaData idxMetaData : metaData) {
|
||||||
|
if (idxMetaData.getCreationVersion().before(supportedIndexVersion)) {
|
||||||
|
throw new IllegalStateException("index " + idxMetaData.getIndex() + " version not supported: "
|
||||||
|
+ idxMetaData.getCreationVersion() + " minimum compatible index version is: " + supportedIndexVersion);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
public static class LeaveRequest extends TransportRequest {
|
public static class LeaveRequest extends TransportRequest {
|
||||||
|
|
||||||
private DiscoveryNode node;
|
private DiscoveryNode node;
|
||||||
|
|
|
@ -410,7 +410,6 @@ public class NodeJoinController extends AbstractComponent {
|
||||||
@Override
|
@Override
|
||||||
public BatchResult<DiscoveryNode> execute(ClusterState currentState, List<DiscoveryNode> joiningNodes) throws Exception {
|
public BatchResult<DiscoveryNode> execute(ClusterState currentState, List<DiscoveryNode> joiningNodes) throws Exception {
|
||||||
final BatchResult.Builder<DiscoveryNode> results = BatchResult.builder();
|
final BatchResult.Builder<DiscoveryNode> results = BatchResult.builder();
|
||||||
|
|
||||||
final DiscoveryNodes currentNodes = currentState.nodes();
|
final DiscoveryNodes currentNodes = currentState.nodes();
|
||||||
boolean nodesChanged = false;
|
boolean nodesChanged = false;
|
||||||
ClusterState.Builder newState;
|
ClusterState.Builder newState;
|
||||||
|
@ -435,8 +434,10 @@ public class NodeJoinController extends AbstractComponent {
|
||||||
|
|
||||||
assert nodesBuilder.isLocalNodeElectedMaster();
|
assert nodesBuilder.isLocalNodeElectedMaster();
|
||||||
|
|
||||||
|
Version minNodeVersion = Version.CURRENT;
|
||||||
// processing any joins
|
// processing any joins
|
||||||
for (final DiscoveryNode node : joiningNodes) {
|
for (final DiscoveryNode node : joiningNodes) {
|
||||||
|
minNodeVersion = Version.min(minNodeVersion, node.getVersion());
|
||||||
if (node.equals(BECOME_MASTER_TASK) || node.equals(FINISH_ELECTION_TASK)) {
|
if (node.equals(BECOME_MASTER_TASK) || node.equals(FINISH_ELECTION_TASK)) {
|
||||||
// noop
|
// noop
|
||||||
} else if (currentNodes.nodeExists(node)) {
|
} else if (currentNodes.nodeExists(node)) {
|
||||||
|
@ -452,7 +453,9 @@ public class NodeJoinController extends AbstractComponent {
|
||||||
}
|
}
|
||||||
results.success(node);
|
results.success(node);
|
||||||
}
|
}
|
||||||
|
// we do this validation quite late to prevent race conditions between nodes joining and importing dangling indices
|
||||||
|
// we have to reject nodes that don't support all indices we have in this cluster
|
||||||
|
MembershipAction.ensureIndexCompatibility(minNodeVersion.minimumIndexCompatibilityVersion(), currentState.getMetaData());
|
||||||
if (nodesChanged) {
|
if (nodesChanged) {
|
||||||
newState.nodes(nodesBuilder);
|
newState.nodes(nodesBuilder);
|
||||||
return results.build(allocationService.reroute(newState.build(), "node_join"));
|
return results.build(allocationService.reroute(newState.build(), "node_join"));
|
||||||
|
|
|
@ -64,6 +64,7 @@ import java.util.Collection;
|
||||||
import java.util.HashSet;
|
import java.util.HashSet;
|
||||||
import java.util.Iterator;
|
import java.util.Iterator;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
|
import java.util.Locale;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
import java.util.Objects;
|
import java.util.Objects;
|
||||||
import java.util.Queue;
|
import java.util.Queue;
|
||||||
|
@ -468,7 +469,7 @@ public class UnicastZenPing extends AbstractComponent implements ZenPing {
|
||||||
// connect to the node, see if we manage to do it, if not, bail
|
// connect to the node, see if we manage to do it, if not, bail
|
||||||
if (!nodeFoundByAddress) {
|
if (!nodeFoundByAddress) {
|
||||||
logger.trace("[{}] connecting (light) to {}", sendPingsHandler.id(), finalNodeToSend);
|
logger.trace("[{}] connecting (light) to {}", sendPingsHandler.id(), finalNodeToSend);
|
||||||
transportService.connectToNodeLightAndHandshake(finalNodeToSend, timeout.getMillis());
|
transportService.connectToNodeAndHandshake(finalNodeToSend, timeout.getMillis());
|
||||||
} else {
|
} else {
|
||||||
logger.trace("[{}] connecting to {}", sendPingsHandler.id(), finalNodeToSend);
|
logger.trace("[{}] connecting to {}", sendPingsHandler.id(), finalNodeToSend);
|
||||||
transportService.connectToNode(finalNodeToSend);
|
transportService.connectToNode(finalNodeToSend);
|
||||||
|
@ -584,7 +585,6 @@ public class UnicastZenPing extends AbstractComponent implements ZenPing {
|
||||||
List<PingResponse> pingResponses = CollectionUtils.iterableAsArrayList(temporalResponses);
|
List<PingResponse> pingResponses = CollectionUtils.iterableAsArrayList(temporalResponses);
|
||||||
pingResponses.add(createPingResponse(contextProvider.nodes()));
|
pingResponses.add(createPingResponse(contextProvider.nodes()));
|
||||||
|
|
||||||
|
|
||||||
UnicastPingResponse unicastPingResponse = new UnicastPingResponse();
|
UnicastPingResponse unicastPingResponse = new UnicastPingResponse();
|
||||||
unicastPingResponse.id = request.id;
|
unicastPingResponse.id = request.id;
|
||||||
unicastPingResponse.pingResponses = pingResponses.toArray(new PingResponse[pingResponses.size()]);
|
unicastPingResponse.pingResponses = pingResponses.toArray(new PingResponse[pingResponses.size()]);
|
||||||
|
@ -596,8 +596,18 @@ public class UnicastZenPing extends AbstractComponent implements ZenPing {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void messageReceived(UnicastPingRequest request, TransportChannel channel) throws Exception {
|
public void messageReceived(UnicastPingRequest request, TransportChannel channel) throws Exception {
|
||||||
channel.sendResponse(handlePingRequest(request));
|
if (request.pingResponse.clusterName().equals(clusterName)) {
|
||||||
|
channel.sendResponse(handlePingRequest(request));
|
||||||
|
} else {
|
||||||
|
throw new IllegalStateException(
|
||||||
|
String.format(
|
||||||
|
Locale.ROOT,
|
||||||
|
"mismatched cluster names; request: [%s], local: [%s]",
|
||||||
|
request.pingResponse.clusterName().value(),
|
||||||
|
clusterName.value()));
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
public static class UnicastPingRequest extends TransportRequest {
|
public static class UnicastPingRequest extends TransportRequest {
|
||||||
|
|
|
@ -43,12 +43,10 @@ import org.elasticsearch.cluster.service.ClusterService;
|
||||||
import org.elasticsearch.common.Priority;
|
import org.elasticsearch.common.Priority;
|
||||||
import org.elasticsearch.common.component.AbstractLifecycleComponent;
|
import org.elasticsearch.common.component.AbstractLifecycleComponent;
|
||||||
import org.elasticsearch.common.component.Lifecycle;
|
import org.elasticsearch.common.component.Lifecycle;
|
||||||
import org.elasticsearch.common.inject.Inject;
|
|
||||||
import org.elasticsearch.common.inject.internal.Nullable;
|
import org.elasticsearch.common.inject.internal.Nullable;
|
||||||
import org.elasticsearch.common.io.stream.StreamInput;
|
import org.elasticsearch.common.io.stream.StreamInput;
|
||||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||||
import org.elasticsearch.common.lease.Releasables;
|
import org.elasticsearch.common.lease.Releasables;
|
||||||
import org.elasticsearch.common.settings.ClusterSettings;
|
|
||||||
import org.elasticsearch.common.settings.Setting;
|
import org.elasticsearch.common.settings.Setting;
|
||||||
import org.elasticsearch.common.settings.Setting.Property;
|
import org.elasticsearch.common.settings.Setting.Property;
|
||||||
import org.elasticsearch.common.settings.Settings;
|
import org.elasticsearch.common.settings.Settings;
|
||||||
|
@ -185,7 +183,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover
|
||||||
new NewPendingClusterStateListener(),
|
new NewPendingClusterStateListener(),
|
||||||
discoverySettings,
|
discoverySettings,
|
||||||
clusterService.getClusterName());
|
clusterService.getClusterName());
|
||||||
this.membership = new MembershipAction(settings, transportService, this, new MembershipListener());
|
this.membership = new MembershipAction(settings, transportService, this::localNode, new MembershipListener());
|
||||||
this.joinThreadControl = new JoinThreadControl(threadPool);
|
this.joinThreadControl = new JoinThreadControl(threadPool);
|
||||||
|
|
||||||
transportService.registerRequestHandler(
|
transportService.registerRequestHandler(
|
||||||
|
@ -303,7 +301,6 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover
|
||||||
if (!clusterChangedEvent.state().getNodes().isLocalNodeElectedMaster()) {
|
if (!clusterChangedEvent.state().getNodes().isLocalNodeElectedMaster()) {
|
||||||
throw new IllegalStateException("Shouldn't publish state when not master");
|
throw new IllegalStateException("Shouldn't publish state when not master");
|
||||||
}
|
}
|
||||||
|
|
||||||
try {
|
try {
|
||||||
publishClusterState.publish(clusterChangedEvent, electMaster.minimumMasterNodes(), ackListener);
|
publishClusterState.publish(clusterChangedEvent, electMaster.minimumMasterNodes(), ackListener);
|
||||||
} catch (FailedToCommitClusterStateException t) {
|
} catch (FailedToCommitClusterStateException t) {
|
||||||
|
@ -851,12 +848,12 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover
|
||||||
}
|
}
|
||||||
|
|
||||||
void handleJoinRequest(final DiscoveryNode node, final ClusterState state, final MembershipAction.JoinCallback callback) {
|
void handleJoinRequest(final DiscoveryNode node, final ClusterState state, final MembershipAction.JoinCallback callback) {
|
||||||
if (!transportService.addressSupported(node.getAddress().getClass())) {
|
if (nodeJoinController == null) {
|
||||||
// TODO, what should we do now? Maybe inform that node that its crap?
|
|
||||||
logger.warn("received a wrong address type from [{}], ignoring...", node);
|
|
||||||
} else if (nodeJoinController == null) {
|
|
||||||
throw new IllegalStateException("discovery module is not yet started");
|
throw new IllegalStateException("discovery module is not yet started");
|
||||||
} else {
|
} else {
|
||||||
|
// we do this in a couple of places including the cluster update thread. This one here is really just best effort
|
||||||
|
// to ensure we fail as fast as possible.
|
||||||
|
MembershipAction.ensureIndexCompatibility(node.getVersion().minimumIndexCompatibilityVersion(), state.getMetaData());
|
||||||
// try and connect to the node, if it fails, we can raise an exception back to the client...
|
// try and connect to the node, if it fails, we can raise an exception back to the client...
|
||||||
transportService.connectToNode(node);
|
transportService.connectToNode(node);
|
||||||
|
|
||||||
|
|
|
@ -245,7 +245,8 @@ public class GatewayMetaState extends AbstractComponent implements ClusterStateL
|
||||||
boolean changed = false;
|
boolean changed = false;
|
||||||
final MetaData.Builder upgradedMetaData = MetaData.builder(metaData);
|
final MetaData.Builder upgradedMetaData = MetaData.builder(metaData);
|
||||||
for (IndexMetaData indexMetaData : metaData) {
|
for (IndexMetaData indexMetaData : metaData) {
|
||||||
IndexMetaData newMetaData = metaDataIndexUpgradeService.upgradeIndexMetaData(indexMetaData);
|
IndexMetaData newMetaData = metaDataIndexUpgradeService.upgradeIndexMetaData(indexMetaData,
|
||||||
|
Version.CURRENT.minimumIndexCompatibilityVersion());
|
||||||
changed |= indexMetaData != newMetaData;
|
changed |= indexMetaData != newMetaData;
|
||||||
upgradedMetaData.put(newMetaData, false);
|
upgradedMetaData.put(newMetaData, false);
|
||||||
}
|
}
|
||||||
|
|
|
@ -21,6 +21,7 @@ package org.elasticsearch.gateway;
|
||||||
|
|
||||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||||
import org.apache.logging.log4j.util.Supplier;
|
import org.apache.logging.log4j.util.Supplier;
|
||||||
|
import org.elasticsearch.Version;
|
||||||
import org.elasticsearch.cluster.ClusterState;
|
import org.elasticsearch.cluster.ClusterState;
|
||||||
import org.elasticsearch.cluster.ClusterStateUpdateTask;
|
import org.elasticsearch.cluster.ClusterStateUpdateTask;
|
||||||
import org.elasticsearch.cluster.block.ClusterBlocks;
|
import org.elasticsearch.cluster.block.ClusterBlocks;
|
||||||
|
@ -28,6 +29,7 @@ import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||||
import org.elasticsearch.cluster.metadata.MetaData;
|
import org.elasticsearch.cluster.metadata.MetaData;
|
||||||
import org.elasticsearch.cluster.metadata.MetaDataIndexUpgradeService;
|
import org.elasticsearch.cluster.metadata.MetaDataIndexUpgradeService;
|
||||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||||
|
import org.elasticsearch.cluster.node.DiscoveryNodes;
|
||||||
import org.elasticsearch.cluster.routing.RoutingTable;
|
import org.elasticsearch.cluster.routing.RoutingTable;
|
||||||
import org.elasticsearch.cluster.routing.allocation.AllocationService;
|
import org.elasticsearch.cluster.routing.allocation.AllocationService;
|
||||||
import org.elasticsearch.cluster.service.ClusterService;
|
import org.elasticsearch.cluster.service.ClusterService;
|
||||||
|
@ -126,10 +128,18 @@ public class LocalAllocateDangledIndices extends AbstractComponent {
|
||||||
MetaData.Builder metaData = MetaData.builder(currentState.metaData());
|
MetaData.Builder metaData = MetaData.builder(currentState.metaData());
|
||||||
ClusterBlocks.Builder blocks = ClusterBlocks.builder().blocks(currentState.blocks());
|
ClusterBlocks.Builder blocks = ClusterBlocks.builder().blocks(currentState.blocks());
|
||||||
RoutingTable.Builder routingTableBuilder = RoutingTable.builder(currentState.routingTable());
|
RoutingTable.Builder routingTableBuilder = RoutingTable.builder(currentState.routingTable());
|
||||||
|
final Version minIndexCompatibilityVersion = currentState.getNodes().getMaxNodeVersion()
|
||||||
|
.minimumIndexCompatibilityVersion();
|
||||||
boolean importNeeded = false;
|
boolean importNeeded = false;
|
||||||
StringBuilder sb = new StringBuilder();
|
StringBuilder sb = new StringBuilder();
|
||||||
for (IndexMetaData indexMetaData : request.indices) {
|
for (IndexMetaData indexMetaData : request.indices) {
|
||||||
|
if (indexMetaData.getCreationVersion().before(minIndexCompatibilityVersion)) {
|
||||||
|
logger.warn("ignoring dangled index [{}] on node [{}]" +
|
||||||
|
" since it's created version [{}] is not supported by at least one node in the cluster minVersion [{}]",
|
||||||
|
indexMetaData.getIndex(), request.fromNode, indexMetaData.getCreationVersion(),
|
||||||
|
minIndexCompatibilityVersion);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
if (currentState.metaData().hasIndex(indexMetaData.getIndex().getName())) {
|
if (currentState.metaData().hasIndex(indexMetaData.getIndex().getName())) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
@ -144,7 +154,8 @@ public class LocalAllocateDangledIndices extends AbstractComponent {
|
||||||
try {
|
try {
|
||||||
// The dangled index might be from an older version, we need to make sure it's compatible
|
// The dangled index might be from an older version, we need to make sure it's compatible
|
||||||
// with the current version and upgrade it if needed.
|
// with the current version and upgrade it if needed.
|
||||||
upgradedIndexMetaData = metaDataIndexUpgradeService.upgradeIndexMetaData(indexMetaData);
|
upgradedIndexMetaData = metaDataIndexUpgradeService.upgradeIndexMetaData(indexMetaData,
|
||||||
|
minIndexCompatibilityVersion);
|
||||||
} catch (Exception ex) {
|
} catch (Exception ex) {
|
||||||
// upgrade failed - adding index as closed
|
// upgrade failed - adding index as closed
|
||||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("found dangled index [{}] on node [{}]. This index cannot be upgraded to the latest version, adding as closed", indexMetaData.getIndex(), request.fromNode), ex);
|
logger.warn((Supplier<?>) () -> new ParameterizedMessage("found dangled index [{}] on node [{}]. This index cannot be upgraded to the latest version, adding as closed", indexMetaData.getIndex(), request.fromNode), ex);
|
||||||
|
|
|
@ -295,9 +295,7 @@ public final class IndexModule {
|
||||||
NIOFS,
|
NIOFS,
|
||||||
MMAPFS,
|
MMAPFS,
|
||||||
SIMPLEFS,
|
SIMPLEFS,
|
||||||
FS,
|
FS;
|
||||||
@Deprecated
|
|
||||||
DEFAULT;
|
|
||||||
|
|
||||||
public String getSettingsKey() {
|
public String getSettingsKey() {
|
||||||
return this.name().toLowerCase(Locale.ROOT);
|
return this.name().toLowerCase(Locale.ROOT);
|
||||||
|
|
|
@ -24,7 +24,6 @@ import org.elasticsearch.Version;
|
||||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||||
import org.elasticsearch.common.ParseFieldMatcher;
|
import org.elasticsearch.common.ParseFieldMatcher;
|
||||||
import org.elasticsearch.common.logging.Loggers;
|
import org.elasticsearch.common.logging.Loggers;
|
||||||
import org.elasticsearch.common.regex.Regex;
|
|
||||||
import org.elasticsearch.common.settings.IndexScopedSettings;
|
import org.elasticsearch.common.settings.IndexScopedSettings;
|
||||||
import org.elasticsearch.common.settings.Setting;
|
import org.elasticsearch.common.settings.Setting;
|
||||||
import org.elasticsearch.common.settings.Setting.Property;
|
import org.elasticsearch.common.settings.Setting.Property;
|
||||||
|
@ -40,7 +39,6 @@ import java.util.Locale;
|
||||||
import java.util.concurrent.TimeUnit;
|
import java.util.concurrent.TimeUnit;
|
||||||
import java.util.function.Consumer;
|
import java.util.function.Consumer;
|
||||||
import java.util.function.Function;
|
import java.util.function.Function;
|
||||||
import java.util.function.Predicate;
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* This class encapsulates all index level settings and handles settings updates.
|
* This class encapsulates all index level settings and handles settings updates.
|
||||||
|
@ -147,7 +145,6 @@ public final class IndexSettings {
|
||||||
private final boolean queryStringAnalyzeWildcard;
|
private final boolean queryStringAnalyzeWildcard;
|
||||||
private final boolean queryStringAllowLeadingWildcard;
|
private final boolean queryStringAllowLeadingWildcard;
|
||||||
private final boolean defaultAllowUnmappedFields;
|
private final boolean defaultAllowUnmappedFields;
|
||||||
private final Predicate<String> indexNameMatcher;
|
|
||||||
private volatile Translog.Durability durability;
|
private volatile Translog.Durability durability;
|
||||||
private final TimeValue syncInterval;
|
private final TimeValue syncInterval;
|
||||||
private volatile TimeValue refreshInterval;
|
private volatile TimeValue refreshInterval;
|
||||||
|
@ -214,7 +211,7 @@ public final class IndexSettings {
|
||||||
* @param nodeSettings the nodes settings this index is allocated on.
|
* @param nodeSettings the nodes settings this index is allocated on.
|
||||||
*/
|
*/
|
||||||
public IndexSettings(final IndexMetaData indexMetaData, final Settings nodeSettings) {
|
public IndexSettings(final IndexMetaData indexMetaData, final Settings nodeSettings) {
|
||||||
this(indexMetaData, nodeSettings, (index) -> Regex.simpleMatch(index, indexMetaData.getIndex().getName()), IndexScopedSettings.DEFAULT_SCOPED_SETTINGS);
|
this(indexMetaData, nodeSettings, IndexScopedSettings.DEFAULT_SCOPED_SETTINGS);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -223,9 +220,8 @@ public final class IndexSettings {
|
||||||
*
|
*
|
||||||
* @param indexMetaData the index metadata this settings object is associated with
|
* @param indexMetaData the index metadata this settings object is associated with
|
||||||
* @param nodeSettings the nodes settings this index is allocated on.
|
* @param nodeSettings the nodes settings this index is allocated on.
|
||||||
* @param indexNameMatcher a matcher that can resolve an expression to the index name or index alias
|
|
||||||
*/
|
*/
|
||||||
public IndexSettings(final IndexMetaData indexMetaData, final Settings nodeSettings, final Predicate<String> indexNameMatcher, IndexScopedSettings indexScopedSettings) {
|
public IndexSettings(final IndexMetaData indexMetaData, final Settings nodeSettings, IndexScopedSettings indexScopedSettings) {
|
||||||
scopedSettings = indexScopedSettings.copy(nodeSettings, indexMetaData);
|
scopedSettings = indexScopedSettings.copy(nodeSettings, indexMetaData);
|
||||||
this.nodeSettings = nodeSettings;
|
this.nodeSettings = nodeSettings;
|
||||||
this.settings = Settings.builder().put(nodeSettings).put(indexMetaData.getSettings()).build();
|
this.settings = Settings.builder().put(nodeSettings).put(indexMetaData.getSettings()).build();
|
||||||
|
@ -243,7 +239,6 @@ public final class IndexSettings {
|
||||||
this.queryStringAllowLeadingWildcard = QUERY_STRING_ALLOW_LEADING_WILDCARD.get(nodeSettings);
|
this.queryStringAllowLeadingWildcard = QUERY_STRING_ALLOW_LEADING_WILDCARD.get(nodeSettings);
|
||||||
this.parseFieldMatcher = new ParseFieldMatcher(settings);
|
this.parseFieldMatcher = new ParseFieldMatcher(settings);
|
||||||
this.defaultAllowUnmappedFields = scopedSettings.get(ALLOW_UNMAPPED);
|
this.defaultAllowUnmappedFields = scopedSettings.get(ALLOW_UNMAPPED);
|
||||||
this.indexNameMatcher = indexNameMatcher;
|
|
||||||
this.durability = scopedSettings.get(INDEX_TRANSLOG_DURABILITY_SETTING);
|
this.durability = scopedSettings.get(INDEX_TRANSLOG_DURABILITY_SETTING);
|
||||||
syncInterval = INDEX_TRANSLOG_SYNC_INTERVAL_SETTING.get(settings);
|
syncInterval = INDEX_TRANSLOG_SYNC_INTERVAL_SETTING.get(settings);
|
||||||
refreshInterval = scopedSettings.get(INDEX_REFRESH_INTERVAL_SETTING);
|
refreshInterval = scopedSettings.get(INDEX_REFRESH_INTERVAL_SETTING);
|
||||||
|
@ -258,7 +253,6 @@ public final class IndexSettings {
|
||||||
maxRefreshListeners = scopedSettings.get(MAX_REFRESH_LISTENERS_PER_SHARD);
|
maxRefreshListeners = scopedSettings.get(MAX_REFRESH_LISTENERS_PER_SHARD);
|
||||||
maxSlicesPerScroll = scopedSettings.get(MAX_SLICES_PER_SCROLL);
|
maxSlicesPerScroll = scopedSettings.get(MAX_SLICES_PER_SCROLL);
|
||||||
this.mergePolicyConfig = new MergePolicyConfig(logger, this);
|
this.mergePolicyConfig = new MergePolicyConfig(logger, this);
|
||||||
assert indexNameMatcher.test(indexMetaData.getIndex().getName());
|
|
||||||
|
|
||||||
scopedSettings.addSettingsUpdateConsumer(MergePolicyConfig.INDEX_COMPOUND_FORMAT_SETTING, mergePolicyConfig::setNoCFSRatio);
|
scopedSettings.addSettingsUpdateConsumer(MergePolicyConfig.INDEX_COMPOUND_FORMAT_SETTING, mergePolicyConfig::setNoCFSRatio);
|
||||||
scopedSettings.addSettingsUpdateConsumer(MergePolicyConfig.INDEX_MERGE_POLICY_EXPUNGE_DELETES_ALLOWED_SETTING, mergePolicyConfig::setExpungeDeletesAllowed);
|
scopedSettings.addSettingsUpdateConsumer(MergePolicyConfig.INDEX_MERGE_POLICY_EXPUNGE_DELETES_ALLOWED_SETTING, mergePolicyConfig::setExpungeDeletesAllowed);
|
||||||
|
@ -282,7 +276,6 @@ public final class IndexSettings {
|
||||||
scopedSettings.addSettingsUpdateConsumer(INDEX_REFRESH_INTERVAL_SETTING, this::setRefreshInterval);
|
scopedSettings.addSettingsUpdateConsumer(INDEX_REFRESH_INTERVAL_SETTING, this::setRefreshInterval);
|
||||||
scopedSettings.addSettingsUpdateConsumer(MAX_REFRESH_LISTENERS_PER_SHARD, this::setMaxRefreshListeners);
|
scopedSettings.addSettingsUpdateConsumer(MAX_REFRESH_LISTENERS_PER_SHARD, this::setMaxRefreshListeners);
|
||||||
scopedSettings.addSettingsUpdateConsumer(MAX_SLICES_PER_SCROLL, this::setMaxSlicesPerScroll);
|
scopedSettings.addSettingsUpdateConsumer(MAX_SLICES_PER_SCROLL, this::setMaxSlicesPerScroll);
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
private void setTranslogFlushThresholdSize(ByteSizeValue byteSizeValue) {
|
private void setTranslogFlushThresholdSize(ByteSizeValue byteSizeValue) {
|
||||||
|
@ -400,13 +393,6 @@ public final class IndexSettings {
|
||||||
*/
|
*/
|
||||||
public ParseFieldMatcher getParseFieldMatcher() { return parseFieldMatcher; }
|
public ParseFieldMatcher getParseFieldMatcher() { return parseFieldMatcher; }
|
||||||
|
|
||||||
/**
|
|
||||||
* Returns <code>true</code> if the given expression matches the index name or one of it's aliases
|
|
||||||
*/
|
|
||||||
public boolean matchesIndexName(String expression) {
|
|
||||||
return indexNameMatcher.test(expression);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Updates the settings and index metadata and notifies all registered settings consumers with the new settings iff at least one setting has changed.
|
* Updates the settings and index metadata and notifies all registered settings consumers with the new settings iff at least one setting has changed.
|
||||||
*
|
*
|
||||||
|
|
|
@ -158,11 +158,12 @@ public final class AnalysisRegistry implements Closeable {
|
||||||
final Map<String, Settings> tokenFiltersSettings = indexSettings.getSettings().getGroups(INDEX_ANALYSIS_FILTER);
|
final Map<String, Settings> tokenFiltersSettings = indexSettings.getSettings().getGroups(INDEX_ANALYSIS_FILTER);
|
||||||
Map<String, AnalysisModule.AnalysisProvider<TokenFilterFactory>> tokenFilters = new HashMap<>(this.tokenFilters);
|
Map<String, AnalysisModule.AnalysisProvider<TokenFilterFactory>> tokenFilters = new HashMap<>(this.tokenFilters);
|
||||||
/*
|
/*
|
||||||
* synonym is different than everything else since it needs access to the tokenizer factories for this index.
|
* synonym and synonym_graph are different than everything else since they need access to the tokenizer factories for the index.
|
||||||
* instead of building the infrastructure for plugins we rather make it a real exception to not pollute the general interface and
|
* instead of building the infrastructure for plugins we rather make it a real exception to not pollute the general interface and
|
||||||
* hide internal data-structures as much as possible.
|
* hide internal data-structures as much as possible.
|
||||||
*/
|
*/
|
||||||
tokenFilters.put("synonym", requriesAnalysisSettings((is, env, name, settings) -> new SynonymTokenFilterFactory(is, env, this, name, settings)));
|
tokenFilters.put("synonym", requriesAnalysisSettings((is, env, name, settings) -> new SynonymTokenFilterFactory(is, env, this, name, settings)));
|
||||||
|
tokenFilters.put("synonym_graph", requriesAnalysisSettings((is, env, name, settings) -> new SynonymGraphFilterFactory(is, env, this, name, settings)));
|
||||||
return buildMapping(false, "tokenfilter", indexSettings, tokenFiltersSettings, Collections.unmodifiableMap(tokenFilters), prebuiltAnalysis.tokenFilterFactories);
|
return buildMapping(false, "tokenfilter", indexSettings, tokenFiltersSettings, Collections.unmodifiableMap(tokenFilters), prebuiltAnalysis.tokenFilterFactories);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -213,12 +214,14 @@ public final class AnalysisRegistry implements Closeable {
|
||||||
Settings currentSettings = tokenFilterSettings.get(tokenFilter);
|
Settings currentSettings = tokenFilterSettings.get(tokenFilter);
|
||||||
String typeName = currentSettings.get("type");
|
String typeName = currentSettings.get("type");
|
||||||
/*
|
/*
|
||||||
* synonym is different than everything else since it needs access to the tokenizer factories for this index.
|
* synonym and synonym_graph are different than everything else since they need access to the tokenizer factories for the index.
|
||||||
* instead of building the infrastructure for plugins we rather make it a real exception to not pollute the general interface and
|
* instead of building the infrastructure for plugins we rather make it a real exception to not pollute the general interface and
|
||||||
* hide internal data-structures as much as possible.
|
* hide internal data-structures as much as possible.
|
||||||
*/
|
*/
|
||||||
if ("synonym".equals(typeName)) {
|
if ("synonym".equals(typeName)) {
|
||||||
return requriesAnalysisSettings((is, env, name, settings) -> new SynonymTokenFilterFactory(is, env, this, name, settings));
|
return requriesAnalysisSettings((is, env, name, settings) -> new SynonymTokenFilterFactory(is, env, this, name, settings));
|
||||||
|
} else if ("synonym_graph".equals(typeName)) {
|
||||||
|
return requriesAnalysisSettings((is, env, name, settings) -> new SynonymGraphFilterFactory(is, env, this, name, settings));
|
||||||
} else {
|
} else {
|
||||||
return getAnalysisProvider("tokenfilter", tokenFilters, tokenFilter, typeName);
|
return getAnalysisProvider("tokenfilter", tokenFilters, tokenFilter, typeName);
|
||||||
}
|
}
|
||||||
|
|
|
@ -0,0 +1,41 @@
|
||||||
|
/*
|
||||||
|
* Licensed to Elasticsearch under one or more contributor
|
||||||
|
* license agreements. See the NOTICE file distributed with
|
||||||
|
* this work for additional information regarding copyright
|
||||||
|
* ownership. Elasticsearch licenses this file to you under
|
||||||
|
* the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
* not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing,
|
||||||
|
* software distributed under the License is distributed on an
|
||||||
|
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||||
|
* KIND, either express or implied. See the License for the
|
||||||
|
* specific language governing permissions and limitations
|
||||||
|
* under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package org.elasticsearch.index.analysis;
|
||||||
|
|
||||||
|
import org.apache.lucene.analysis.TokenStream;
|
||||||
|
import org.apache.lucene.analysis.synonym.SynonymGraphFilter;
|
||||||
|
import org.elasticsearch.common.settings.Settings;
|
||||||
|
import org.elasticsearch.env.Environment;
|
||||||
|
import org.elasticsearch.index.IndexSettings;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
|
||||||
|
public class SynonymGraphFilterFactory extends SynonymTokenFilterFactory {
|
||||||
|
public SynonymGraphFilterFactory(IndexSettings indexSettings, Environment env, AnalysisRegistry analysisRegistry,
|
||||||
|
String name, Settings settings) throws IOException {
|
||||||
|
super(indexSettings, env, analysisRegistry, name, settings);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public TokenStream create(TokenStream tokenStream) {
|
||||||
|
// fst is null means no synonyms
|
||||||
|
return synonymMap.fst == null ? tokenStream : new SynonymGraphFilter(tokenStream, synonymMap, ignoreCase);
|
||||||
|
}
|
||||||
|
}
|
|
@ -40,8 +40,8 @@ import java.util.List;
|
||||||
|
|
||||||
public class SynonymTokenFilterFactory extends AbstractTokenFilterFactory {
|
public class SynonymTokenFilterFactory extends AbstractTokenFilterFactory {
|
||||||
|
|
||||||
private final SynonymMap synonymMap;
|
protected final SynonymMap synonymMap;
|
||||||
private final boolean ignoreCase;
|
protected final boolean ignoreCase;
|
||||||
|
|
||||||
public SynonymTokenFilterFactory(IndexSettings indexSettings, Environment env, AnalysisRegistry analysisRegistry,
|
public SynonymTokenFilterFactory(IndexSettings indexSettings, Environment env, AnalysisRegistry analysisRegistry,
|
||||||
String name, Settings settings) throws IOException {
|
String name, Settings settings) throws IOException {
|
||||||
|
|
|
@ -26,7 +26,6 @@ import org.apache.lucene.codecs.lucene50.Lucene50StoredFieldsFormat;
|
||||||
import org.apache.lucene.codecs.lucene62.Lucene62Codec;
|
import org.apache.lucene.codecs.lucene62.Lucene62Codec;
|
||||||
import org.elasticsearch.common.lucene.Lucene;
|
import org.elasticsearch.common.lucene.Lucene;
|
||||||
import org.elasticsearch.index.mapper.CompletionFieldMapper;
|
import org.elasticsearch.index.mapper.CompletionFieldMapper;
|
||||||
import org.elasticsearch.index.mapper.CompletionFieldMapper2x;
|
|
||||||
import org.elasticsearch.index.mapper.MappedFieldType;
|
import org.elasticsearch.index.mapper.MappedFieldType;
|
||||||
import org.elasticsearch.index.mapper.MapperService;
|
import org.elasticsearch.index.mapper.MapperService;
|
||||||
|
|
||||||
|
@ -60,9 +59,6 @@ public class PerFieldMappingPostingFormatCodec extends Lucene62Codec {
|
||||||
logger.warn("no index mapper found for field: [{}] returning default postings format", field);
|
logger.warn("no index mapper found for field: [{}] returning default postings format", field);
|
||||||
} else if (fieldType instanceof CompletionFieldMapper.CompletionFieldType) {
|
} else if (fieldType instanceof CompletionFieldMapper.CompletionFieldType) {
|
||||||
return CompletionFieldMapper.CompletionFieldType.postingsFormat();
|
return CompletionFieldMapper.CompletionFieldType.postingsFormat();
|
||||||
} else if (fieldType instanceof CompletionFieldMapper2x.CompletionFieldType) {
|
|
||||||
return ((CompletionFieldMapper2x.CompletionFieldType) fieldType).postingsFormat(
|
|
||||||
super.getPostingsFormatForField(field));
|
|
||||||
}
|
}
|
||||||
return super.getPostingsFormatForField(field);
|
return super.getPostingsFormatForField(field);
|
||||||
}
|
}
|
||||||
|
|
|
@ -69,7 +69,6 @@ import org.elasticsearch.index.shard.ShardId;
|
||||||
import org.elasticsearch.index.store.Store;
|
import org.elasticsearch.index.store.Store;
|
||||||
import org.elasticsearch.index.translog.Translog;
|
import org.elasticsearch.index.translog.Translog;
|
||||||
|
|
||||||
import javax.net.ssl.SNIServerName;
|
|
||||||
import java.io.Closeable;
|
import java.io.Closeable;
|
||||||
import java.io.FileNotFoundException;
|
import java.io.FileNotFoundException;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
@ -1031,14 +1030,6 @@ public abstract class Engine implements Closeable {
|
||||||
return this.doc.routing();
|
return this.doc.routing();
|
||||||
}
|
}
|
||||||
|
|
||||||
public long timestamp() {
|
|
||||||
return this.doc.timestamp();
|
|
||||||
}
|
|
||||||
|
|
||||||
public long ttl() {
|
|
||||||
return this.doc.ttl();
|
|
||||||
}
|
|
||||||
|
|
||||||
public String parent() {
|
public String parent() {
|
||||||
return this.doc.parent();
|
return this.doc.parent();
|
||||||
}
|
}
|
||||||
|
|
|
@ -21,7 +21,6 @@ package org.elasticsearch.index.fielddata.plain;
|
||||||
|
|
||||||
import org.apache.lucene.index.DocValues;
|
import org.apache.lucene.index.DocValues;
|
||||||
import org.apache.lucene.index.LeafReaderContext;
|
import org.apache.lucene.index.LeafReaderContext;
|
||||||
import org.elasticsearch.Version;
|
|
||||||
import org.elasticsearch.common.Nullable;
|
import org.elasticsearch.common.Nullable;
|
||||||
import org.elasticsearch.index.Index;
|
import org.elasticsearch.index.Index;
|
||||||
import org.elasticsearch.index.IndexSettings;
|
import org.elasticsearch.index.IndexSettings;
|
||||||
|
@ -52,19 +51,14 @@ public abstract class AbstractGeoPointDVIndexFieldData extends DocValuesIndexFie
|
||||||
* Lucene 5.4 GeoPointFieldType
|
* Lucene 5.4 GeoPointFieldType
|
||||||
*/
|
*/
|
||||||
public static class GeoPointDVIndexFieldData extends AbstractGeoPointDVIndexFieldData {
|
public static class GeoPointDVIndexFieldData extends AbstractGeoPointDVIndexFieldData {
|
||||||
final boolean indexCreatedBefore2x;
|
|
||||||
|
|
||||||
public GeoPointDVIndexFieldData(Index index, String fieldName, final boolean indexCreatedBefore2x) {
|
public GeoPointDVIndexFieldData(Index index, String fieldName) {
|
||||||
super(index, fieldName);
|
super(index, fieldName);
|
||||||
this.indexCreatedBefore2x = indexCreatedBefore2x;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public AtomicGeoPointFieldData load(LeafReaderContext context) {
|
public AtomicGeoPointFieldData load(LeafReaderContext context) {
|
||||||
try {
|
try {
|
||||||
if (indexCreatedBefore2x) {
|
|
||||||
return new GeoPointLegacyDVAtomicFieldData(DocValues.getBinary(context.reader(), fieldName));
|
|
||||||
}
|
|
||||||
return new GeoPointDVAtomicFieldData(DocValues.getSortedNumeric(context.reader(), fieldName));
|
return new GeoPointDVAtomicFieldData(DocValues.getSortedNumeric(context.reader(), fieldName));
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
throw new IllegalStateException("Cannot load doc values", e);
|
throw new IllegalStateException("Cannot load doc values", e);
|
||||||
|
@ -81,13 +75,8 @@ public abstract class AbstractGeoPointDVIndexFieldData extends DocValuesIndexFie
|
||||||
@Override
|
@Override
|
||||||
public IndexFieldData<?> build(IndexSettings indexSettings, MappedFieldType fieldType, IndexFieldDataCache cache,
|
public IndexFieldData<?> build(IndexSettings indexSettings, MappedFieldType fieldType, IndexFieldDataCache cache,
|
||||||
CircuitBreakerService breakerService, MapperService mapperService) {
|
CircuitBreakerService breakerService, MapperService mapperService) {
|
||||||
if (indexSettings.getIndexVersionCreated().before(Version.V_2_2_0)
|
|
||||||
&& fieldType.hasDocValues() == false) {
|
|
||||||
return new GeoPointArrayIndexFieldData(indexSettings, fieldType.name(), cache, breakerService);
|
|
||||||
}
|
|
||||||
// Ignore breaker
|
// Ignore breaker
|
||||||
return new GeoPointDVIndexFieldData(indexSettings.getIndex(), fieldType.name(),
|
return new GeoPointDVIndexFieldData(indexSettings.getIndex(), fieldType.name());
|
||||||
indexSettings.getIndexVersionCreated().before(Version.V_2_2_0));
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,145 +0,0 @@
|
||||||
/*
|
|
||||||
* Licensed to Elasticsearch under one or more contributor
|
|
||||||
* license agreements. See the NOTICE file distributed with
|
|
||||||
* this work for additional information regarding copyright
|
|
||||||
* ownership. Elasticsearch licenses this file to you under
|
|
||||||
* the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
* not use this file except in compliance with the License.
|
|
||||||
* You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing,
|
|
||||||
* software distributed under the License is distributed on an
|
|
||||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
|
||||||
* KIND, either express or implied. See the License for the
|
|
||||||
* specific language governing permissions and limitations
|
|
||||||
* under the License.
|
|
||||||
*/
|
|
||||||
package org.elasticsearch.index.fielddata.plain;
|
|
||||||
|
|
||||||
import org.apache.lucene.index.DocValues;
|
|
||||||
import org.apache.lucene.index.RandomAccessOrds;
|
|
||||||
import org.apache.lucene.index.SortedDocValues;
|
|
||||||
import org.apache.lucene.util.Accountable;
|
|
||||||
import org.apache.lucene.util.Accountables;
|
|
||||||
import org.apache.lucene.util.BitSet;
|
|
||||||
import org.elasticsearch.common.geo.GeoPoint;
|
|
||||||
import org.elasticsearch.common.util.LongArray;
|
|
||||||
import org.elasticsearch.index.fielddata.FieldData;
|
|
||||||
import org.elasticsearch.index.fielddata.GeoPointValues;
|
|
||||||
import org.elasticsearch.index.fielddata.MultiGeoPointValues;
|
|
||||||
import org.elasticsearch.index.fielddata.ordinals.Ordinals;
|
|
||||||
|
|
||||||
import java.util.ArrayList;
|
|
||||||
import java.util.Collection;
|
|
||||||
import java.util.Collections;
|
|
||||||
import java.util.List;
|
|
||||||
|
|
||||||
public abstract class GeoPointArrayAtomicFieldData extends AbstractAtomicGeoPointFieldData {
|
|
||||||
@Override
|
|
||||||
public void close() {
|
|
||||||
}
|
|
||||||
|
|
||||||
static class WithOrdinals extends GeoPointArrayAtomicFieldData {
|
|
||||||
private final LongArray indexedPoints;
|
|
||||||
private final Ordinals ordinals;
|
|
||||||
private final int maxDoc;
|
|
||||||
|
|
||||||
public WithOrdinals(LongArray indexedPoints, Ordinals ordinals, int maxDoc) {
|
|
||||||
super();
|
|
||||||
this.indexedPoints = indexedPoints;
|
|
||||||
this.ordinals = ordinals;
|
|
||||||
this.maxDoc = maxDoc;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public long ramBytesUsed() {
|
|
||||||
return Integer.BYTES + indexedPoints.ramBytesUsed();
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public Collection<Accountable> getChildResources() {
|
|
||||||
List<Accountable> resources = new ArrayList<>();
|
|
||||||
resources.add(Accountables.namedAccountable("indexedPoints", indexedPoints));
|
|
||||||
return Collections.unmodifiableList(resources);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public MultiGeoPointValues getGeoPointValues() {
|
|
||||||
final RandomAccessOrds ords = ordinals.ordinals();
|
|
||||||
final SortedDocValues singleOrds = DocValues.unwrapSingleton(ords);
|
|
||||||
final GeoPoint point = new GeoPoint(Double.NaN, Double.NaN);
|
|
||||||
if (singleOrds != null) {
|
|
||||||
final GeoPointValues values = new GeoPointValues() {
|
|
||||||
@Override
|
|
||||||
public GeoPoint get(int docID) {
|
|
||||||
final int ord = singleOrds.getOrd(docID);
|
|
||||||
if (ord >= 0) {
|
|
||||||
return point.resetFromIndexHash(indexedPoints.get(ord));
|
|
||||||
}
|
|
||||||
return point.reset(Double.NaN, Double.NaN);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
return FieldData.singleton(values, DocValues.docsWithValue(singleOrds, maxDoc));
|
|
||||||
}
|
|
||||||
return new MultiGeoPointValues() {
|
|
||||||
@Override
|
|
||||||
public GeoPoint valueAt(int index) {
|
|
||||||
return point.resetFromIndexHash(indexedPoints.get(ords.ordAt(index)));
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public void setDocument(int docId) {
|
|
||||||
ords.setDocument(docId);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public int count() {
|
|
||||||
return ords.cardinality();
|
|
||||||
}
|
|
||||||
};
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
public static class Single extends GeoPointArrayAtomicFieldData {
|
|
||||||
private final LongArray indexedPoint;
|
|
||||||
private final BitSet set;
|
|
||||||
|
|
||||||
public Single(LongArray indexedPoint, BitSet set) {
|
|
||||||
this.indexedPoint = indexedPoint;
|
|
||||||
this.set = set;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public long ramBytesUsed() {
|
|
||||||
return Integer.BYTES + indexedPoint.ramBytesUsed()
|
|
||||||
+ (set == null ? 0 : set.ramBytesUsed());
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public Collection<Accountable> getChildResources() {
|
|
||||||
List<Accountable> resources = new ArrayList<>();
|
|
||||||
resources.add(Accountables.namedAccountable("indexedPoints", indexedPoint));
|
|
||||||
if (set != null) {
|
|
||||||
resources.add(Accountables.namedAccountable("missing bitset", set));
|
|
||||||
}
|
|
||||||
return Collections.unmodifiableList(resources);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public MultiGeoPointValues getGeoPointValues() {
|
|
||||||
final GeoPoint point = new GeoPoint();
|
|
||||||
final GeoPointValues values = new GeoPointValues() {
|
|
||||||
@Override
|
|
||||||
public GeoPoint get(int docID) {
|
|
||||||
if (set == null || set.get(docID)) {
|
|
||||||
return point.resetFromIndexHash(indexedPoint.get(docID));
|
|
||||||
}
|
|
||||||
return point.reset(Double.NaN, Double.NaN);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
return FieldData.singleton(values, set);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,180 +0,0 @@
|
||||||
/*
|
|
||||||
* Licensed to Elasticsearch under one or more contributor
|
|
||||||
* license agreements. See the NOTICE file distributed with
|
|
||||||
* this work for additional information regarding copyright
|
|
||||||
* ownership. Elasticsearch licenses this file to you under
|
|
||||||
* the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
* not use this file except in compliance with the License.
|
|
||||||
* You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing,
|
|
||||||
* software distributed under the License is distributed on an
|
|
||||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
|
||||||
* KIND, either express or implied. See the License for the
|
|
||||||
* specific language governing permissions and limitations
|
|
||||||
* under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package org.elasticsearch.index.fielddata.plain;
|
|
||||||
|
|
||||||
import org.apache.lucene.index.LeafReader;
|
|
||||||
import org.apache.lucene.index.LeafReaderContext;
|
|
||||||
import org.apache.lucene.index.RandomAccessOrds;
|
|
||||||
import org.apache.lucene.index.Terms;
|
|
||||||
import org.apache.lucene.index.TermsEnum;
|
|
||||||
import org.apache.lucene.spatial.geopoint.document.GeoPointField;
|
|
||||||
import org.apache.lucene.util.BitSet;
|
|
||||||
import org.elasticsearch.Version;
|
|
||||||
import org.elasticsearch.common.breaker.CircuitBreaker;
|
|
||||||
import org.elasticsearch.common.geo.GeoPoint;
|
|
||||||
import org.elasticsearch.common.util.BigArrays;
|
|
||||||
import org.elasticsearch.common.util.DoubleArray;
|
|
||||||
import org.elasticsearch.common.util.LongArray;
|
|
||||||
import org.elasticsearch.index.IndexSettings;
|
|
||||||
import org.elasticsearch.index.fielddata.AtomicGeoPointFieldData;
|
|
||||||
import org.elasticsearch.index.fielddata.FieldData;
|
|
||||||
import org.elasticsearch.index.fielddata.IndexFieldData;
|
|
||||||
import org.elasticsearch.index.fielddata.IndexFieldDataCache;
|
|
||||||
import org.elasticsearch.index.fielddata.ordinals.Ordinals;
|
|
||||||
import org.elasticsearch.index.fielddata.ordinals.OrdinalsBuilder;
|
|
||||||
import org.elasticsearch.index.mapper.MappedFieldType;
|
|
||||||
import org.elasticsearch.index.mapper.MapperService;
|
|
||||||
import org.elasticsearch.indices.breaker.CircuitBreakerService;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Loads FieldData for an array of GeoPoints supporting both long encoded points and backward compatible double arrays
|
|
||||||
*/
|
|
||||||
public class GeoPointArrayIndexFieldData extends AbstractIndexGeoPointFieldData {
|
|
||||||
private final CircuitBreakerService breakerService;
|
|
||||||
|
|
||||||
public GeoPointArrayIndexFieldData(IndexSettings indexSettings, String fieldName,
|
|
||||||
IndexFieldDataCache cache, CircuitBreakerService breakerService) {
|
|
||||||
super(indexSettings, fieldName, cache);
|
|
||||||
this.breakerService = breakerService;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public AtomicGeoPointFieldData loadDirect(LeafReaderContext context) throws Exception {
|
|
||||||
LeafReader reader = context.reader();
|
|
||||||
|
|
||||||
Terms terms = reader.terms(getFieldName());
|
|
||||||
AtomicGeoPointFieldData data = null;
|
|
||||||
// TODO: Use an actual estimator to estimate before loading.
|
|
||||||
NonEstimatingEstimator estimator = new NonEstimatingEstimator(breakerService.getBreaker(CircuitBreaker.FIELDDATA));
|
|
||||||
if (terms == null) {
|
|
||||||
data = AbstractAtomicGeoPointFieldData.empty(reader.maxDoc());
|
|
||||||
estimator.afterLoad(null, data.ramBytesUsed());
|
|
||||||
return data;
|
|
||||||
}
|
|
||||||
return (indexSettings.getIndexVersionCreated().before(Version.V_2_2_0)) ?
|
|
||||||
loadLegacyFieldData(reader, estimator, terms, data) : loadFieldData22(reader, estimator, terms, data);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* long encoded geopoint field data
|
|
||||||
*/
|
|
||||||
private AtomicGeoPointFieldData loadFieldData22(LeafReader reader, NonEstimatingEstimator estimator, Terms terms,
|
|
||||||
AtomicGeoPointFieldData data) throws Exception {
|
|
||||||
LongArray indexedPoints = BigArrays.NON_RECYCLING_INSTANCE.newLongArray(128);
|
|
||||||
final float acceptableTransientOverheadRatio = OrdinalsBuilder.DEFAULT_ACCEPTABLE_OVERHEAD_RATIO;
|
|
||||||
boolean success = false;
|
|
||||||
try (OrdinalsBuilder builder = new OrdinalsBuilder(reader.maxDoc(), acceptableTransientOverheadRatio)) {
|
|
||||||
final TermsEnum termsEnum;
|
|
||||||
final GeoPointField.TermEncoding termEncoding;
|
|
||||||
if (indexSettings.getIndexVersionCreated().onOrAfter(Version.V_2_3_0)) {
|
|
||||||
termEncoding = GeoPointField.TermEncoding.PREFIX;
|
|
||||||
termsEnum = OrdinalsBuilder.wrapGeoPointTerms(terms.iterator());
|
|
||||||
} else {
|
|
||||||
termEncoding = GeoPointField.TermEncoding.NUMERIC;
|
|
||||||
termsEnum = OrdinalsBuilder.wrapNumeric64Bit(terms.iterator());
|
|
||||||
}
|
|
||||||
|
|
||||||
final GeoPointTermsEnum iter = new GeoPointTermsEnum(builder.buildFromTerms(termsEnum), termEncoding);
|
|
||||||
|
|
||||||
Long hashedPoint;
|
|
||||||
long numTerms = 0;
|
|
||||||
while ((hashedPoint = iter.next()) != null) {
|
|
||||||
indexedPoints = BigArrays.NON_RECYCLING_INSTANCE.resize(indexedPoints, numTerms + 1);
|
|
||||||
indexedPoints.set(numTerms++, hashedPoint);
|
|
||||||
}
|
|
||||||
indexedPoints = BigArrays.NON_RECYCLING_INSTANCE.resize(indexedPoints, numTerms);
|
|
||||||
|
|
||||||
Ordinals build = builder.build();
|
|
||||||
RandomAccessOrds ordinals = build.ordinals();
|
|
||||||
if (FieldData.isMultiValued(ordinals) == false) {
|
|
||||||
int maxDoc = reader.maxDoc();
|
|
||||||
LongArray sIndexedPoint = BigArrays.NON_RECYCLING_INSTANCE.newLongArray(reader.maxDoc());
|
|
||||||
for (int i=0; i<maxDoc; ++i) {
|
|
||||||
ordinals.setDocument(i);
|
|
||||||
long nativeOrdinal = ordinals.nextOrd();
|
|
||||||
if (nativeOrdinal != RandomAccessOrds.NO_MORE_ORDS) {
|
|
||||||
sIndexedPoint.set(i, indexedPoints.get(nativeOrdinal));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
BitSet set = builder.buildDocsWithValuesSet();
|
|
||||||
data = new GeoPointArrayAtomicFieldData.Single(sIndexedPoint, set);
|
|
||||||
} else {
|
|
||||||
data = new GeoPointArrayAtomicFieldData.WithOrdinals(indexedPoints, build, reader.maxDoc());
|
|
||||||
}
|
|
||||||
success = true;
|
|
||||||
return data;
|
|
||||||
} finally {
|
|
||||||
if (success) {
|
|
||||||
estimator.afterLoad(null, data.ramBytesUsed());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Backward compatibility support for legacy lat/lon double arrays
|
|
||||||
*/
|
|
||||||
private AtomicGeoPointFieldData loadLegacyFieldData(LeafReader reader, NonEstimatingEstimator estimator, Terms terms,
|
|
||||||
AtomicGeoPointFieldData data) throws Exception {
|
|
||||||
DoubleArray lat = BigArrays.NON_RECYCLING_INSTANCE.newDoubleArray(128);
|
|
||||||
DoubleArray lon = BigArrays.NON_RECYCLING_INSTANCE.newDoubleArray(128);
|
|
||||||
final float acceptableTransientOverheadRatio = OrdinalsBuilder.DEFAULT_ACCEPTABLE_OVERHEAD_RATIO;
|
|
||||||
boolean success = false;
|
|
||||||
try (OrdinalsBuilder builder = new OrdinalsBuilder(reader.maxDoc(), acceptableTransientOverheadRatio)) {
|
|
||||||
final GeoPointTermsEnumLegacy iter = new GeoPointTermsEnumLegacy(builder.buildFromTerms(terms.iterator()));
|
|
||||||
GeoPoint point;
|
|
||||||
long numTerms = 0;
|
|
||||||
while ((point = iter.next()) != null) {
|
|
||||||
lat = BigArrays.NON_RECYCLING_INSTANCE.resize(lat, numTerms + 1);
|
|
||||||
lon = BigArrays.NON_RECYCLING_INSTANCE.resize(lon, numTerms + 1);
|
|
||||||
lat.set(numTerms, point.getLat());
|
|
||||||
lon.set(numTerms, point.getLon());
|
|
||||||
++numTerms;
|
|
||||||
}
|
|
||||||
lat = BigArrays.NON_RECYCLING_INSTANCE.resize(lat, numTerms);
|
|
||||||
lon = BigArrays.NON_RECYCLING_INSTANCE.resize(lon, numTerms);
|
|
||||||
|
|
||||||
Ordinals build = builder.build();
|
|
||||||
RandomAccessOrds ordinals = build.ordinals();
|
|
||||||
if (FieldData.isMultiValued(ordinals) == false) {
|
|
||||||
int maxDoc = reader.maxDoc();
|
|
||||||
DoubleArray sLat = BigArrays.NON_RECYCLING_INSTANCE.newDoubleArray(reader.maxDoc());
|
|
||||||
DoubleArray sLon = BigArrays.NON_RECYCLING_INSTANCE.newDoubleArray(reader.maxDoc());
|
|
||||||
for (int i = 0; i < maxDoc; i++) {
|
|
||||||
ordinals.setDocument(i);
|
|
||||||
long nativeOrdinal = ordinals.nextOrd();
|
|
||||||
if (nativeOrdinal != RandomAccessOrds.NO_MORE_ORDS) {
|
|
||||||
sLat.set(i, lat.get(nativeOrdinal));
|
|
||||||
sLon.set(i, lon.get(nativeOrdinal));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
BitSet set = builder.buildDocsWithValuesSet();
|
|
||||||
data = new GeoPointArrayLegacyAtomicFieldData.Single(sLon, sLat, set);
|
|
||||||
} else {
|
|
||||||
data = new GeoPointArrayLegacyAtomicFieldData.WithOrdinals(lon, lat, build, reader.maxDoc());
|
|
||||||
}
|
|
||||||
success = true;
|
|
||||||
return data;
|
|
||||||
} finally {
|
|
||||||
if (success) {
|
|
||||||
estimator.afterLoad(null, data.ramBytesUsed());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,162 +0,0 @@
|
||||||
/*
|
|
||||||
* Licensed to Elasticsearch under one or more contributor
|
|
||||||
* license agreements. See the NOTICE file distributed with
|
|
||||||
* this work for additional information regarding copyright
|
|
||||||
* ownership. Elasticsearch licenses this file to you under
|
|
||||||
* the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
* not use this file except in compliance with the License.
|
|
||||||
* You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing,
|
|
||||||
* software distributed under the License is distributed on an
|
|
||||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
|
||||||
* KIND, either express or implied. See the License for the
|
|
||||||
* specific language governing permissions and limitations
|
|
||||||
* under the License.
|
|
||||||
*/
|
|
||||||
package org.elasticsearch.index.fielddata.plain;
|
|
||||||
|
|
||||||
import org.apache.lucene.index.DocValues;
|
|
||||||
import org.apache.lucene.index.RandomAccessOrds;
|
|
||||||
import org.apache.lucene.index.SortedDocValues;
|
|
||||||
import org.apache.lucene.util.Accountable;
|
|
||||||
import org.apache.lucene.util.Accountables;
|
|
||||||
import org.apache.lucene.util.BitSet;
|
|
||||||
import org.elasticsearch.common.geo.GeoPoint;
|
|
||||||
import org.elasticsearch.common.util.DoubleArray;
|
|
||||||
import org.elasticsearch.index.fielddata.FieldData;
|
|
||||||
import org.elasticsearch.index.fielddata.GeoPointValues;
|
|
||||||
import org.elasticsearch.index.fielddata.MultiGeoPointValues;
|
|
||||||
import org.elasticsearch.index.fielddata.ordinals.Ordinals;
|
|
||||||
|
|
||||||
import java.util.ArrayList;
|
|
||||||
import java.util.Collection;
|
|
||||||
import java.util.Collections;
|
|
||||||
import java.util.List;
|
|
||||||
|
|
||||||
public abstract class GeoPointArrayLegacyAtomicFieldData extends AbstractAtomicGeoPointFieldData {
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public void close() {
|
|
||||||
}
|
|
||||||
|
|
||||||
static class WithOrdinals extends GeoPointArrayLegacyAtomicFieldData {
|
|
||||||
|
|
||||||
private final DoubleArray lon, lat;
|
|
||||||
private final Ordinals ordinals;
|
|
||||||
private final int maxDoc;
|
|
||||||
|
|
||||||
public WithOrdinals(DoubleArray lon, DoubleArray lat, Ordinals ordinals, int maxDoc) {
|
|
||||||
super();
|
|
||||||
this.lon = lon;
|
|
||||||
this.lat = lat;
|
|
||||||
this.ordinals = ordinals;
|
|
||||||
this.maxDoc = maxDoc;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public long ramBytesUsed() {
|
|
||||||
return Integer.BYTES/*size*/ + lon.ramBytesUsed() + lat.ramBytesUsed();
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public Collection<Accountable> getChildResources() {
|
|
||||||
List<Accountable> resources = new ArrayList<>();
|
|
||||||
resources.add(Accountables.namedAccountable("latitude", lat));
|
|
||||||
resources.add(Accountables.namedAccountable("longitude", lon));
|
|
||||||
return Collections.unmodifiableList(resources);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public MultiGeoPointValues getGeoPointValues() {
|
|
||||||
final RandomAccessOrds ords = ordinals.ordinals();
|
|
||||||
final SortedDocValues singleOrds = DocValues.unwrapSingleton(ords);
|
|
||||||
if (singleOrds != null) {
|
|
||||||
final GeoPoint point = new GeoPoint();
|
|
||||||
final GeoPointValues values = new GeoPointValues() {
|
|
||||||
@Override
|
|
||||||
public GeoPoint get(int docID) {
|
|
||||||
final int ord = singleOrds.getOrd(docID);
|
|
||||||
if (ord >= 0) {
|
|
||||||
return point.reset(lat.get(ord), lon.get(ord));
|
|
||||||
}
|
|
||||||
return point.reset(Double.NaN, Double.NaN);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
return FieldData.singleton(values, DocValues.docsWithValue(singleOrds, maxDoc));
|
|
||||||
} else {
|
|
||||||
final GeoPoint point = new GeoPoint();
|
|
||||||
return new MultiGeoPointValues() {
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public GeoPoint valueAt(int index) {
|
|
||||||
final long ord = ords.ordAt(index);
|
|
||||||
if (ord >= 0) {
|
|
||||||
return point.reset(lat.get(ord), lon.get(ord));
|
|
||||||
}
|
|
||||||
return point.reset(Double.NaN, Double.NaN);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public void setDocument(int docId) {
|
|
||||||
ords.setDocument(docId);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public int count() {
|
|
||||||
return ords.cardinality();
|
|
||||||
}
|
|
||||||
};
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Assumes unset values are marked in bitset, and docId is used as the index to the value array.
|
|
||||||
*/
|
|
||||||
public static class Single extends GeoPointArrayLegacyAtomicFieldData {
|
|
||||||
|
|
||||||
private final DoubleArray lon, lat;
|
|
||||||
private final BitSet set;
|
|
||||||
|
|
||||||
public Single(DoubleArray lon, DoubleArray lat, BitSet set) {
|
|
||||||
this.lon = lon;
|
|
||||||
this.lat = lat;
|
|
||||||
this.set = set;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public long ramBytesUsed() {
|
|
||||||
return Integer.BYTES + lon.ramBytesUsed() + lat.ramBytesUsed() + (set == null ? 0 : set.ramBytesUsed());
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public Collection<Accountable> getChildResources() {
|
|
||||||
List<Accountable> resources = new ArrayList<>();
|
|
||||||
resources.add(Accountables.namedAccountable("latitude", lat));
|
|
||||||
resources.add(Accountables.namedAccountable("longitude", lon));
|
|
||||||
if (set != null) {
|
|
||||||
resources.add(Accountables.namedAccountable("missing bitset", set));
|
|
||||||
}
|
|
||||||
return Collections.unmodifiableList(resources);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public MultiGeoPointValues getGeoPointValues() {
|
|
||||||
final GeoPoint point = new GeoPoint();
|
|
||||||
final GeoPointValues values = new GeoPointValues() {
|
|
||||||
@Override
|
|
||||||
public GeoPoint get(int docID) {
|
|
||||||
if (set == null || set.get(docID)) {
|
|
||||||
return point.reset(lat.get(docID), lon.get(docID));
|
|
||||||
}
|
|
||||||
return point.reset(Double.NaN, Double.NaN);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
return FieldData.singleton(values, set);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
|
@ -1,100 +0,0 @@
|
||||||
/*
|
|
||||||
* Licensed to Elasticsearch under one or more contributor
|
|
||||||
* license agreements. See the NOTICE file distributed with
|
|
||||||
* this work for additional information regarding copyright
|
|
||||||
* ownership. Elasticsearch licenses this file to you under
|
|
||||||
* the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
* not use this file except in compliance with the License.
|
|
||||||
* You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing,
|
|
||||||
* software distributed under the License is distributed on an
|
|
||||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
|
||||||
* KIND, either express or implied. See the License for the
|
|
||||||
* specific language governing permissions and limitations
|
|
||||||
* under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package org.elasticsearch.index.fielddata.plain;
|
|
||||||
|
|
||||||
import org.apache.lucene.index.BinaryDocValues;
|
|
||||||
import org.apache.lucene.util.Accountable;
|
|
||||||
import org.apache.lucene.util.ArrayUtil;
|
|
||||||
import org.apache.lucene.util.BytesRef;
|
|
||||||
import org.apache.lucene.util.RamUsageEstimator;
|
|
||||||
import org.elasticsearch.common.geo.GeoPoint;
|
|
||||||
import org.elasticsearch.common.util.ByteUtils;
|
|
||||||
import org.elasticsearch.index.fielddata.MultiGeoPointValues;
|
|
||||||
|
|
||||||
import java.util.Arrays;
|
|
||||||
import java.util.Collection;
|
|
||||||
import java.util.Collections;
|
|
||||||
|
|
||||||
final class GeoPointLegacyDVAtomicFieldData extends AbstractAtomicGeoPointFieldData {
|
|
||||||
|
|
||||||
private static final int COORDINATE_SIZE = 8; // number of bytes per coordinate
|
|
||||||
private static final int GEOPOINT_SIZE = COORDINATE_SIZE * 2; // lat + lon
|
|
||||||
|
|
||||||
private final BinaryDocValues values;
|
|
||||||
|
|
||||||
GeoPointLegacyDVAtomicFieldData(BinaryDocValues values) {
|
|
||||||
super();
|
|
||||||
this.values = values;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public long ramBytesUsed() {
|
|
||||||
return 0; // not exposed by Lucene
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public Collection<Accountable> getChildResources() {
|
|
||||||
return Collections.emptyList();
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public void close() {
|
|
||||||
// no-op
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public MultiGeoPointValues getGeoPointValues() {
|
|
||||||
return new MultiGeoPointValues() {
|
|
||||||
|
|
||||||
int count;
|
|
||||||
GeoPoint[] points = new GeoPoint[0];
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public void setDocument(int docId) {
|
|
||||||
final BytesRef bytes = values.get(docId);
|
|
||||||
assert bytes.length % GEOPOINT_SIZE == 0;
|
|
||||||
count = (bytes.length >>> 4);
|
|
||||||
if (count > points.length) {
|
|
||||||
final int previousLength = points.length;
|
|
||||||
points = Arrays.copyOf(points, ArrayUtil.oversize(count, RamUsageEstimator.NUM_BYTES_OBJECT_REF));
|
|
||||||
for (int i = previousLength; i < points.length; ++i) {
|
|
||||||
points[i] = new GeoPoint(Double.NaN, Double.NaN);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
for (int i = 0; i < count; ++i) {
|
|
||||||
final double lat = ByteUtils.readDoubleLE(bytes.bytes, bytes.offset + i * GEOPOINT_SIZE);
|
|
||||||
final double lon = ByteUtils.readDoubleLE(bytes.bytes, bytes.offset + i * GEOPOINT_SIZE + COORDINATE_SIZE);
|
|
||||||
points[i].reset(lat, lon);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public int count() {
|
|
||||||
return count;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public GeoPoint valueAt(int index) {
|
|
||||||
return points[index];
|
|
||||||
}
|
|
||||||
|
|
||||||
};
|
|
||||||
}
|
|
||||||
}
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue