Merge branch 'master' into feature/rank-eval
This commit is contained in:
commit
619e4c1a44
|
@ -28,6 +28,7 @@ import org.gradle.api.Task
|
|||
import org.gradle.api.XmlProvider
|
||||
import org.gradle.api.artifacts.Configuration
|
||||
import org.gradle.api.artifacts.ModuleDependency
|
||||
import org.gradle.api.artifacts.ModuleVersionIdentifier
|
||||
import org.gradle.api.artifacts.ProjectDependency
|
||||
import org.gradle.api.artifacts.ResolvedArtifact
|
||||
import org.gradle.api.artifacts.dsl.RepositoryHandler
|
||||
|
@ -294,12 +295,15 @@ class BuildPlugin implements Plugin<Project> {
|
|||
* Returns a closure which can be used with a MavenPom for fixing problems with gradle generated poms.
|
||||
*
|
||||
* <ul>
|
||||
* <li>Remove transitive dependencies (using wildcard exclusions, fixed in gradle 2.14)</li>
|
||||
* <li>Set compile time deps back to compile from runtime (known issue with maven-publish plugin)
|
||||
* <li>Remove transitive dependencies. We currently exclude all artifacts explicitly instead of using wildcards
|
||||
* as Ivy incorrectly translates POMs with * excludes to Ivy XML with * excludes which results in the main artifact
|
||||
* being excluded as well (see https://issues.apache.org/jira/browse/IVY-1531). Note that Gradle 2.14+ automatically
|
||||
* translates non-transitive dependencies to * excludes. We should revisit this when upgrading Gradle.</li>
|
||||
* <li>Set compile time deps back to compile from runtime (known issue with maven-publish plugin)</li>
|
||||
* </ul>
|
||||
*/
|
||||
private static Closure fixupDependencies(Project project) {
|
||||
// TODO: remove this when enforcing gradle 2.14+, it now properly handles exclusions
|
||||
// TODO: revisit this when upgrading to Gradle 2.14+, see Javadoc comment above
|
||||
return { XmlProvider xml ->
|
||||
// first find if we have dependencies at all, and grab the node
|
||||
NodeList depsNodes = xml.asNode().get('dependencies')
|
||||
|
@ -334,10 +338,19 @@ class BuildPlugin implements Plugin<Project> {
|
|||
continue
|
||||
}
|
||||
|
||||
// we now know we have something to exclude, so add a wildcard exclusion element
|
||||
Node exclusion = depNode.appendNode('exclusions').appendNode('exclusion')
|
||||
exclusion.appendNode('groupId', '*')
|
||||
exclusion.appendNode('artifactId', '*')
|
||||
// we now know we have something to exclude, so add exclusions for all artifacts except the main one
|
||||
Node exclusions = depNode.appendNode('exclusions')
|
||||
for (ResolvedArtifact artifact : artifacts) {
|
||||
ModuleVersionIdentifier moduleVersionIdentifier = artifact.moduleVersion.id;
|
||||
String depGroupId = moduleVersionIdentifier.group
|
||||
String depArtifactId = moduleVersionIdentifier.name
|
||||
// add exclusions for all artifacts except the main one
|
||||
if (depGroupId != groupId || depArtifactId != artifactId) {
|
||||
Node exclusion = exclusions.appendNode('exclusion')
|
||||
exclusion.appendNode('groupId', depGroupId)
|
||||
exclusion.appendNode('artifactId', depArtifactId)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -38,7 +38,7 @@ public class DocsTestPlugin extends RestTestPlugin {
|
|||
* the last released version for docs. */
|
||||
'\\{version\\}':
|
||||
VersionProperties.elasticsearch.replace('-SNAPSHOT', ''),
|
||||
'\\{lucene_version\\}' : VersionProperties.lucene,
|
||||
'\\{lucene_version\\}' : VersionProperties.lucene.replaceAll('-snapshot-\\w+$', ''),
|
||||
]
|
||||
Task listSnippets = project.tasks.create('listSnippets', SnippetsTask)
|
||||
listSnippets.group 'Docs'
|
||||
|
|
|
@ -62,6 +62,15 @@ class ClusterConfiguration {
|
|||
@Input
|
||||
boolean debug = false
|
||||
|
||||
/**
|
||||
* if <code>true</code> each node will be configured with <tt>discovery.zen.minimum_master_nodes</tt> set
|
||||
* to the total number of nodes in the cluster. This will also cause that each node has `0s` state recovery
|
||||
* timeout which can lead to issues if for instance an existing clusterstate is expected to be recovered
|
||||
* before any tests start
|
||||
*/
|
||||
@Input
|
||||
boolean useMinimumMasterNodes = true
|
||||
|
||||
@Input
|
||||
String jvmArgs = "-Xms" + System.getProperty('tests.heap.size', '512m') +
|
||||
" " + "-Xmx" + System.getProperty('tests.heap.size', '512m') +
|
||||
|
@ -95,11 +104,11 @@ class ClusterConfiguration {
|
|||
@Input
|
||||
Closure waitCondition = { NodeInfo node, AntBuilder ant ->
|
||||
File tmpFile = new File(node.cwd, 'wait.success')
|
||||
ant.echo("==> [${new Date()}] checking health: http://${node.httpUri()}/_cluster/health?wait_for_nodes=>=${numNodes}")
|
||||
ant.echo("==> [${new Date()}] checking health: http://${node.httpUri()}/_cluster/health?wait_for_nodes=>=${numNodes}&wait_for_status=yellow")
|
||||
// checking here for wait_for_nodes to be >= the number of nodes because its possible
|
||||
// this cluster is attempting to connect to nodes created by another task (same cluster name),
|
||||
// so there will be more nodes in that case in the cluster state
|
||||
ant.get(src: "http://${node.httpUri()}/_cluster/health?wait_for_nodes=>=${numNodes}",
|
||||
ant.get(src: "http://${node.httpUri()}/_cluster/health?wait_for_nodes=>=${numNodes}&wait_for_status=yellow",
|
||||
dest: tmpFile.toString(),
|
||||
ignoreerrors: true, // do not fail on error, so logging buffers can be flushed by the wait task
|
||||
retries: 10)
|
||||
|
|
|
@ -73,8 +73,8 @@ class ClusterFormationTasks {
|
|||
}
|
||||
// this is our current version distribution configuration we use for all kinds of REST tests etc.
|
||||
String distroConfigName = "${task.name}_elasticsearchDistro"
|
||||
Configuration distro = project.configurations.create(distroConfigName)
|
||||
configureDistributionDependency(project, config.distribution, distro, VersionProperties.elasticsearch)
|
||||
Configuration currentDistro = project.configurations.create(distroConfigName)
|
||||
configureDistributionDependency(project, config.distribution, currentDistro, VersionProperties.elasticsearch)
|
||||
if (config.bwcVersion != null && config.numBwcNodes > 0) {
|
||||
// if we have a cluster that has a BWC cluster we also need to configure a dependency on the BWC version
|
||||
// this version uses the same distribution etc. and only differs in the version we depend on.
|
||||
|
@ -85,11 +85,11 @@ class ClusterFormationTasks {
|
|||
}
|
||||
configureDistributionDependency(project, config.distribution, project.configurations.elasticsearchBwcDistro, config.bwcVersion)
|
||||
}
|
||||
|
||||
for (int i = 0; i < config.numNodes; ++i) {
|
||||
for (int i = 0; i < config.numNodes; i++) {
|
||||
// we start N nodes and out of these N nodes there might be M bwc nodes.
|
||||
// for each of those nodes we might have a different configuratioon
|
||||
String elasticsearchVersion = VersionProperties.elasticsearch
|
||||
Configuration distro = currentDistro
|
||||
if (i < config.numBwcNodes) {
|
||||
elasticsearchVersion = config.bwcVersion
|
||||
distro = project.configurations.elasticsearchBwcDistro
|
||||
|
@ -252,9 +252,17 @@ class ClusterFormationTasks {
|
|||
'path.repo' : "${node.sharedDir}/repo",
|
||||
'path.shared_data' : "${node.sharedDir}/",
|
||||
// Define a node attribute so we can test that it exists
|
||||
'node.attr.testattr' : 'test',
|
||||
'node.attr.testattr' : 'test',
|
||||
'repositories.url.allowed_urls': 'http://snapshot.test*'
|
||||
]
|
||||
// we set min master nodes to the total number of nodes in the cluster and
|
||||
// basically skip initial state recovery to allow the cluster to form using a realistic master election
|
||||
// this means all nodes must be up, join the seed node and do a master election. This will also allow new and
|
||||
// old nodes in the BWC case to become the master
|
||||
if (node.config.useMinimumMasterNodes && node.config.numNodes > 1) {
|
||||
esConfig['discovery.zen.minimum_master_nodes'] = node.config.numNodes
|
||||
esConfig['discovery.initial_state_timeout'] = '0s' // don't wait for state.. just start up quickly
|
||||
}
|
||||
esConfig['node.max_local_storage_nodes'] = node.config.numNodes
|
||||
esConfig['http.port'] = node.config.httpPort
|
||||
esConfig['transport.tcp.port'] = node.config.transportPort
|
||||
|
|
|
@ -55,7 +55,9 @@ public class RestIntegTestTask extends RandomizedTestingTask {
|
|||
parallelism = '1'
|
||||
include('**/*IT.class')
|
||||
systemProperty('tests.rest.load_packaged', 'false')
|
||||
systemProperty('tests.rest.cluster', "${-> nodes[0].httpUri()}")
|
||||
// we pass all nodes to the rest cluster to allow the clients to round-robin between them
|
||||
// this is more realistic than just talking to a single node
|
||||
systemProperty('tests.rest.cluster', "${-> nodes.collect{it.httpUri()}.join(",")}")
|
||||
systemProperty('tests.config.dir', "${-> nodes[0].confDir}")
|
||||
// TODO: our "client" qa tests currently use the rest-test plugin. instead they should have their own plugin
|
||||
// that sets up the test cluster and passes this transport uri instead of http uri. Until then, we pass
|
||||
|
|
|
@ -459,7 +459,6 @@
|
|||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]rest[/\\]RestController.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]rest[/\\]action[/\\]cat[/\\]RestCountAction.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]rest[/\\]action[/\\]cat[/\\]RestIndicesAction.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]rest[/\\]action[/\\]cat[/\\]RestNodesAction.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]rest[/\\]action[/\\]cat[/\\]RestShardsAction.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]rest[/\\]action[/\\]cat[/\\]RestThreadPoolAction.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]script[/\\]ScriptContextRegistry.java" checks="LineLength" />
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
elasticsearch = 6.0.0-alpha1
|
||||
lucene = 6.2.0
|
||||
lucene = 6.3.0-snapshot-a66a445
|
||||
|
||||
# optional dependencies
|
||||
spatial4j = 0.6
|
||||
|
@ -11,7 +11,7 @@ slf4j = 1.6.2
|
|||
jna = 4.2.2
|
||||
|
||||
# test dependencies
|
||||
randomizedrunner = 2.3.2
|
||||
randomizedrunner = 2.4.0
|
||||
junit = 4.11
|
||||
httpclient = 4.5.2
|
||||
httpcore = 4.4.5
|
||||
|
|
|
@ -46,7 +46,7 @@ public class HeapBufferedAsyncResponseConsumer extends AbstractAsyncResponseCons
|
|||
//default buffer limit is 10MB
|
||||
public static final int DEFAULT_BUFFER_LIMIT = 10 * 1024 * 1024;
|
||||
|
||||
private final int bufferLimit;
|
||||
private final int bufferLimitBytes;
|
||||
private volatile HttpResponse response;
|
||||
private volatile SimpleInputBuffer buf;
|
||||
|
||||
|
@ -54,7 +54,7 @@ public class HeapBufferedAsyncResponseConsumer extends AbstractAsyncResponseCons
|
|||
* Creates a new instance of this consumer with a buffer limit of {@link #DEFAULT_BUFFER_LIMIT}
|
||||
*/
|
||||
public HeapBufferedAsyncResponseConsumer() {
|
||||
this.bufferLimit = DEFAULT_BUFFER_LIMIT;
|
||||
this.bufferLimitBytes = DEFAULT_BUFFER_LIMIT;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -64,7 +64,14 @@ public class HeapBufferedAsyncResponseConsumer extends AbstractAsyncResponseCons
|
|||
if (bufferLimit <= 0) {
|
||||
throw new IllegalArgumentException("bufferLimit must be greater than 0");
|
||||
}
|
||||
this.bufferLimit = bufferLimit;
|
||||
this.bufferLimitBytes = bufferLimit;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the limit of the buffer.
|
||||
*/
|
||||
public int getBufferLimit() {
|
||||
return bufferLimitBytes;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -75,9 +82,9 @@ public class HeapBufferedAsyncResponseConsumer extends AbstractAsyncResponseCons
|
|||
@Override
|
||||
protected void onEntityEnclosed(HttpEntity entity, ContentType contentType) throws IOException {
|
||||
long len = entity.getContentLength();
|
||||
if (len > bufferLimit) {
|
||||
if (len > bufferLimitBytes) {
|
||||
throw new ContentTooLongException("entity content is too long [" + len +
|
||||
"] for the configured buffer limit [" + bufferLimit + "]");
|
||||
"] for the configured buffer limit [" + bufferLimitBytes + "]");
|
||||
}
|
||||
if (len < 0) {
|
||||
len = 4096;
|
||||
|
|
|
@ -510,6 +510,7 @@ public class RestClient implements Closeable {
|
|||
|
||||
private static URI buildUri(String pathPrefix, String path, Map<String, String> params) {
|
||||
Objects.requireNonNull(params, "params must not be null");
|
||||
Objects.requireNonNull(path, "path must not be null");
|
||||
try {
|
||||
String fullPath;
|
||||
if (pathPrefix != null) {
|
||||
|
|
|
@ -19,7 +19,7 @@
|
|||
|
||||
package org.elasticsearch.client;
|
||||
|
||||
import com.carrotsearch.randomizedtesting.generators.RandomInts;
|
||||
import com.carrotsearch.randomizedtesting.generators.RandomNumbers;
|
||||
import org.apache.http.HttpEntity;
|
||||
import org.apache.http.HttpEntityEnclosingRequest;
|
||||
import org.apache.http.HttpHost;
|
||||
|
@ -62,7 +62,7 @@ public class RequestLoggerTests extends RestClientTestCase {
|
|||
}
|
||||
|
||||
HttpRequestBase request;
|
||||
int requestType = RandomInts.randomIntBetween(getRandom(), 0, 7);
|
||||
int requestType = RandomNumbers.randomIntBetween(getRandom(), 0, 7);
|
||||
switch(requestType) {
|
||||
case 0:
|
||||
request = new HttpGetWithEntity(uri);
|
||||
|
@ -99,7 +99,7 @@ public class RequestLoggerTests extends RestClientTestCase {
|
|||
expected += " -d '" + requestBody + "'";
|
||||
HttpEntityEnclosingRequest enclosingRequest = (HttpEntityEnclosingRequest) request;
|
||||
HttpEntity entity;
|
||||
switch(RandomInts.randomIntBetween(getRandom(), 0, 3)) {
|
||||
switch(RandomNumbers.randomIntBetween(getRandom(), 0, 3)) {
|
||||
case 0:
|
||||
entity = new StringEntity(requestBody, StandardCharsets.UTF_8);
|
||||
break;
|
||||
|
@ -128,12 +128,12 @@ public class RequestLoggerTests extends RestClientTestCase {
|
|||
|
||||
public void testTraceResponse() throws IOException {
|
||||
ProtocolVersion protocolVersion = new ProtocolVersion("HTTP", 1, 1);
|
||||
int statusCode = RandomInts.randomIntBetween(getRandom(), 200, 599);
|
||||
int statusCode = RandomNumbers.randomIntBetween(getRandom(), 200, 599);
|
||||
String reasonPhrase = "REASON";
|
||||
BasicStatusLine statusLine = new BasicStatusLine(protocolVersion, statusCode, reasonPhrase);
|
||||
String expected = "# " + statusLine.toString();
|
||||
BasicHttpResponse httpResponse = new BasicHttpResponse(statusLine);
|
||||
int numHeaders = RandomInts.randomIntBetween(getRandom(), 0, 3);
|
||||
int numHeaders = RandomNumbers.randomIntBetween(getRandom(), 0, 3);
|
||||
for (int i = 0; i < numHeaders; i++) {
|
||||
httpResponse.setHeader("header" + i, "value");
|
||||
expected += "\n# header" + i + ": value";
|
||||
|
|
|
@ -229,6 +229,17 @@ public class RestClientIntegTests extends RestClientTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
public void testPath() throws IOException {
|
||||
for (String method : getHttpMethods()) {
|
||||
try {
|
||||
restClient.performRequest(method, null);
|
||||
fail("path set to null should fail!");
|
||||
} catch (NullPointerException e) {
|
||||
assertEquals("path must not be null", e.getMessage());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private void bodyTest(String method) throws IOException {
|
||||
String requestBody = "{ \"field\": \"value\" }";
|
||||
StringEntity entity = new StringEntity(requestBody);
|
||||
|
|
|
@ -19,7 +19,7 @@
|
|||
|
||||
package org.elasticsearch.client;
|
||||
|
||||
import com.carrotsearch.randomizedtesting.generators.RandomInts;
|
||||
import com.carrotsearch.randomizedtesting.generators.RandomNumbers;
|
||||
import org.apache.http.Header;
|
||||
import org.apache.http.HttpHost;
|
||||
import org.apache.http.HttpResponse;
|
||||
|
@ -95,7 +95,7 @@ public class RestClientMultipleHostsTests extends RestClientTestCase {
|
|||
return null;
|
||||
}
|
||||
});
|
||||
int numHosts = RandomInts.randomIntBetween(getRandom(), 2, 5);
|
||||
int numHosts = RandomNumbers.randomIntBetween(getRandom(), 2, 5);
|
||||
httpHosts = new HttpHost[numHosts];
|
||||
for (int i = 0; i < numHosts; i++) {
|
||||
httpHosts[i] = new HttpHost("localhost", 9200 + i);
|
||||
|
@ -105,7 +105,7 @@ public class RestClientMultipleHostsTests extends RestClientTestCase {
|
|||
}
|
||||
|
||||
public void testRoundRobinOkStatusCodes() throws IOException {
|
||||
int numIters = RandomInts.randomIntBetween(getRandom(), 1, 5);
|
||||
int numIters = RandomNumbers.randomIntBetween(getRandom(), 1, 5);
|
||||
for (int i = 0; i < numIters; i++) {
|
||||
Set<HttpHost> hostsSet = new HashSet<>();
|
||||
Collections.addAll(hostsSet, httpHosts);
|
||||
|
@ -121,7 +121,7 @@ public class RestClientMultipleHostsTests extends RestClientTestCase {
|
|||
}
|
||||
|
||||
public void testRoundRobinNoRetryErrors() throws IOException {
|
||||
int numIters = RandomInts.randomIntBetween(getRandom(), 1, 5);
|
||||
int numIters = RandomNumbers.randomIntBetween(getRandom(), 1, 5);
|
||||
for (int i = 0; i < numIters; i++) {
|
||||
Set<HttpHost> hostsSet = new HashSet<>();
|
||||
Collections.addAll(hostsSet, httpHosts);
|
||||
|
@ -198,7 +198,7 @@ public class RestClientMultipleHostsTests extends RestClientTestCase {
|
|||
assertEquals("every host should have been used but some weren't: " + hostsSet, 0, hostsSet.size());
|
||||
}
|
||||
|
||||
int numIters = RandomInts.randomIntBetween(getRandom(), 2, 5);
|
||||
int numIters = RandomNumbers.randomIntBetween(getRandom(), 2, 5);
|
||||
for (int i = 1; i <= numIters; i++) {
|
||||
//check that one different host is resurrected at each new attempt
|
||||
Set<HttpHost> hostsSet = new HashSet<>();
|
||||
|
@ -228,7 +228,7 @@ public class RestClientMultipleHostsTests extends RestClientTestCase {
|
|||
if (getRandom().nextBoolean()) {
|
||||
//mark one host back alive through a successful request and check that all requests after that are sent to it
|
||||
HttpHost selectedHost = null;
|
||||
int iters = RandomInts.randomIntBetween(getRandom(), 2, 10);
|
||||
int iters = RandomNumbers.randomIntBetween(getRandom(), 2, 10);
|
||||
for (int y = 0; y < iters; y++) {
|
||||
int statusCode = randomErrorNoRetryStatusCode(getRandom());
|
||||
Response response;
|
||||
|
@ -269,7 +269,7 @@ public class RestClientMultipleHostsTests extends RestClientTestCase {
|
|||
}
|
||||
|
||||
private static String randomErrorRetryEndpoint() {
|
||||
switch(RandomInts.randomIntBetween(getRandom(), 0, 3)) {
|
||||
switch(RandomNumbers.randomIntBetween(getRandom(), 0, 3)) {
|
||||
case 0:
|
||||
return "/" + randomErrorRetryStatusCode(getRandom());
|
||||
case 1:
|
||||
|
|
|
@ -19,7 +19,7 @@
|
|||
|
||||
package org.elasticsearch.client.sniff;
|
||||
|
||||
import com.carrotsearch.randomizedtesting.generators.RandomInts;
|
||||
import com.carrotsearch.randomizedtesting.generators.RandomNumbers;
|
||||
import com.carrotsearch.randomizedtesting.generators.RandomPicks;
|
||||
import com.carrotsearch.randomizedtesting.generators.RandomStrings;
|
||||
import com.fasterxml.jackson.core.JsonFactory;
|
||||
|
@ -69,7 +69,7 @@ public class ElasticsearchHostsSnifferTests extends RestClientTestCase {
|
|||
|
||||
@Before
|
||||
public void startHttpServer() throws IOException {
|
||||
this.sniffRequestTimeout = RandomInts.randomIntBetween(getRandom(), 1000, 10000);
|
||||
this.sniffRequestTimeout = RandomNumbers.randomIntBetween(getRandom(), 1000, 10000);
|
||||
this.scheme = RandomPicks.randomFrom(getRandom(), ElasticsearchHostsSniffer.Scheme.values());
|
||||
if (rarely()) {
|
||||
this.sniffResponse = SniffResponse.buildFailure();
|
||||
|
@ -101,7 +101,7 @@ public class ElasticsearchHostsSnifferTests extends RestClientTestCase {
|
|||
assertEquals(e.getMessage(), "scheme cannot be null");
|
||||
}
|
||||
try {
|
||||
new ElasticsearchHostsSniffer(restClient, RandomInts.randomIntBetween(getRandom(), Integer.MIN_VALUE, 0),
|
||||
new ElasticsearchHostsSniffer(restClient, RandomNumbers.randomIntBetween(getRandom(), Integer.MIN_VALUE, 0),
|
||||
ElasticsearchHostsSniffer.Scheme.HTTP);
|
||||
fail("should have failed");
|
||||
} catch (IllegalArgumentException e) {
|
||||
|
@ -175,7 +175,7 @@ public class ElasticsearchHostsSnifferTests extends RestClientTestCase {
|
|||
}
|
||||
|
||||
private static SniffResponse buildSniffResponse(ElasticsearchHostsSniffer.Scheme scheme) throws IOException {
|
||||
int numNodes = RandomInts.randomIntBetween(getRandom(), 1, 5);
|
||||
int numNodes = RandomNumbers.randomIntBetween(getRandom(), 1, 5);
|
||||
List<HttpHost> hosts = new ArrayList<>(numNodes);
|
||||
JsonFactory jsonFactory = new JsonFactory();
|
||||
StringWriter writer = new StringWriter();
|
||||
|
@ -205,7 +205,7 @@ public class ElasticsearchHostsSnifferTests extends RestClientTestCase {
|
|||
boolean isHttpEnabled = rarely() == false;
|
||||
if (isHttpEnabled) {
|
||||
String host = "host" + i;
|
||||
int port = RandomInts.randomIntBetween(getRandom(), 9200, 9299);
|
||||
int port = RandomNumbers.randomIntBetween(getRandom(), 9200, 9299);
|
||||
HttpHost httpHost = new HttpHost(host, port, scheme.toString());
|
||||
hosts.add(httpHost);
|
||||
generator.writeObjectFieldStart("http");
|
||||
|
@ -228,7 +228,7 @@ public class ElasticsearchHostsSnifferTests extends RestClientTestCase {
|
|||
}
|
||||
if (getRandom().nextBoolean()) {
|
||||
String[] roles = {"master", "data", "ingest"};
|
||||
int numRoles = RandomInts.randomIntBetween(getRandom(), 0, 3);
|
||||
int numRoles = RandomNumbers.randomIntBetween(getRandom(), 0, 3);
|
||||
Set<String> nodeRoles = new HashSet<>(numRoles);
|
||||
for (int j = 0; j < numRoles; j++) {
|
||||
String role;
|
||||
|
@ -242,7 +242,7 @@ public class ElasticsearchHostsSnifferTests extends RestClientTestCase {
|
|||
}
|
||||
generator.writeEndArray();
|
||||
}
|
||||
int numAttributes = RandomInts.randomIntBetween(getRandom(), 0, 3);
|
||||
int numAttributes = RandomNumbers.randomIntBetween(getRandom(), 0, 3);
|
||||
Map<String, String> attributes = new HashMap<>(numAttributes);
|
||||
for (int j = 0; j < numAttributes; j++) {
|
||||
attributes.put("attr" + j, "value" + j);
|
||||
|
@ -291,6 +291,6 @@ public class ElasticsearchHostsSnifferTests extends RestClientTestCase {
|
|||
}
|
||||
|
||||
private static int randomErrorResponseCode() {
|
||||
return RandomInts.randomIntBetween(getRandom(), 400, 599);
|
||||
return RandomNumbers.randomIntBetween(getRandom(), 400, 599);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -19,7 +19,7 @@
|
|||
|
||||
package org.elasticsearch.client.sniff;
|
||||
|
||||
import com.carrotsearch.randomizedtesting.generators.RandomInts;
|
||||
import com.carrotsearch.randomizedtesting.generators.RandomNumbers;
|
||||
import org.apache.http.HttpHost;
|
||||
import org.elasticsearch.client.RestClient;
|
||||
import org.elasticsearch.client.RestClientTestCase;
|
||||
|
@ -31,7 +31,7 @@ import static org.junit.Assert.fail;
|
|||
public class SnifferBuilderTests extends RestClientTestCase {
|
||||
|
||||
public void testBuild() throws Exception {
|
||||
int numNodes = RandomInts.randomIntBetween(getRandom(), 1, 5);
|
||||
int numNodes = RandomNumbers.randomIntBetween(getRandom(), 1, 5);
|
||||
HttpHost[] hosts = new HttpHost[numNodes];
|
||||
for (int i = 0; i < numNodes; i++) {
|
||||
hosts[i] = new HttpHost("localhost", 9200 + i);
|
||||
|
@ -46,14 +46,14 @@ public class SnifferBuilderTests extends RestClientTestCase {
|
|||
}
|
||||
|
||||
try {
|
||||
Sniffer.builder(client).setSniffIntervalMillis(RandomInts.randomIntBetween(getRandom(), Integer.MIN_VALUE, 0));
|
||||
Sniffer.builder(client).setSniffIntervalMillis(RandomNumbers.randomIntBetween(getRandom(), Integer.MIN_VALUE, 0));
|
||||
fail("should have failed");
|
||||
} catch(IllegalArgumentException e) {
|
||||
assertEquals("sniffIntervalMillis must be greater than 0", e.getMessage());
|
||||
}
|
||||
|
||||
try {
|
||||
Sniffer.builder(client).setSniffAfterFailureDelayMillis(RandomInts.randomIntBetween(getRandom(), Integer.MIN_VALUE, 0));
|
||||
Sniffer.builder(client).setSniffAfterFailureDelayMillis(RandomNumbers.randomIntBetween(getRandom(), Integer.MIN_VALUE, 0));
|
||||
fail("should have failed");
|
||||
} catch(IllegalArgumentException e) {
|
||||
assertEquals("sniffAfterFailureDelayMillis must be greater than 0", e.getMessage());
|
||||
|
@ -74,10 +74,10 @@ public class SnifferBuilderTests extends RestClientTestCase {
|
|||
|
||||
SnifferBuilder builder = Sniffer.builder(client);
|
||||
if (getRandom().nextBoolean()) {
|
||||
builder.setSniffIntervalMillis(RandomInts.randomIntBetween(getRandom(), 1, Integer.MAX_VALUE));
|
||||
builder.setSniffIntervalMillis(RandomNumbers.randomIntBetween(getRandom(), 1, Integer.MAX_VALUE));
|
||||
}
|
||||
if (getRandom().nextBoolean()) {
|
||||
builder.setSniffAfterFailureDelayMillis(RandomInts.randomIntBetween(getRandom(), 1, Integer.MAX_VALUE));
|
||||
builder.setSniffAfterFailureDelayMillis(RandomNumbers.randomIntBetween(getRandom(), 1, Integer.MAX_VALUE));
|
||||
}
|
||||
if (getRandom().nextBoolean()) {
|
||||
builder.setHostsSniffer(new MockHostsSniffer());
|
||||
|
|
|
@ -24,6 +24,7 @@ import org.apache.lucene.analysis.TokenStream;
|
|||
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
|
||||
import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
|
||||
import org.apache.lucene.index.Term;
|
||||
import org.apache.lucene.queryparser.analyzing.AnalyzingQueryParser;
|
||||
import org.apache.lucene.search.BooleanClause;
|
||||
import org.apache.lucene.search.BooleanQuery;
|
||||
import org.apache.lucene.search.BoostQuery;
|
||||
|
@ -34,6 +35,7 @@ import org.apache.lucene.search.MultiPhraseQuery;
|
|||
import org.apache.lucene.search.PhraseQuery;
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.apache.lucene.search.SynonymQuery;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.apache.lucene.util.IOUtils;
|
||||
import org.apache.lucene.util.automaton.RegExp;
|
||||
import org.elasticsearch.common.lucene.search.Queries;
|
||||
|
@ -42,6 +44,7 @@ import org.elasticsearch.index.mapper.DateFieldMapper;
|
|||
import org.elasticsearch.index.mapper.LegacyDateFieldMapper;
|
||||
import org.elasticsearch.index.mapper.MappedFieldType;
|
||||
import org.elasticsearch.index.mapper.MapperService;
|
||||
import org.elasticsearch.index.mapper.StringFieldType;
|
||||
import org.elasticsearch.index.query.QueryShardContext;
|
||||
import org.elasticsearch.index.query.support.QueryParsers;
|
||||
|
||||
|
@ -63,7 +66,7 @@ import static org.elasticsearch.common.lucene.search.Queries.fixNegativeQueryIfN
|
|||
* Also breaks fields with [type].[name] into a boolean query that must include the type
|
||||
* as well as the query on the name.
|
||||
*/
|
||||
public class MapperQueryParser extends QueryParser {
|
||||
public class MapperQueryParser extends AnalyzingQueryParser {
|
||||
|
||||
public static final Map<String, FieldQueryExtension> FIELD_QUERY_EXTENSIONS;
|
||||
|
||||
|
@ -99,11 +102,11 @@ public class MapperQueryParser extends QueryParser {
|
|||
setAutoGeneratePhraseQueries(settings.autoGeneratePhraseQueries());
|
||||
setMaxDeterminizedStates(settings.maxDeterminizedStates());
|
||||
setAllowLeadingWildcard(settings.allowLeadingWildcard());
|
||||
setLowercaseExpandedTerms(settings.lowercaseExpandedTerms());
|
||||
setLowercaseExpandedTerms(false);
|
||||
setPhraseSlop(settings.phraseSlop());
|
||||
setDefaultOperator(settings.defaultOperator());
|
||||
setFuzzyPrefixLength(settings.fuzzyPrefixLength());
|
||||
setLocale(settings.locale());
|
||||
setSplitOnWhitespace(settings.splitOnWhitespace());
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -329,21 +332,20 @@ public class MapperQueryParser extends QueryParser {
|
|||
boolean startInclusive, boolean endInclusive, QueryShardContext context) {
|
||||
currentFieldType = context.fieldMapper(field);
|
||||
if (currentFieldType != null) {
|
||||
if (lowercaseExpandedTerms && currentFieldType.tokenized()) {
|
||||
part1 = part1 == null ? null : part1.toLowerCase(locale);
|
||||
part2 = part2 == null ? null : part2.toLowerCase(locale);
|
||||
}
|
||||
|
||||
try {
|
||||
BytesRef part1Binary = part1 == null ? null : getAnalyzer().normalize(field, part1);
|
||||
BytesRef part2Binary = part2 == null ? null : getAnalyzer().normalize(field, part2);
|
||||
Query rangeQuery;
|
||||
if (currentFieldType instanceof LegacyDateFieldMapper.DateFieldType && settings.timeZone() != null) {
|
||||
LegacyDateFieldMapper.DateFieldType dateFieldType = (LegacyDateFieldMapper.DateFieldType) this.currentFieldType;
|
||||
rangeQuery = dateFieldType.rangeQuery(part1, part2, startInclusive, endInclusive, settings.timeZone(), null, context);
|
||||
rangeQuery = dateFieldType.rangeQuery(part1Binary, part2Binary,
|
||||
startInclusive, endInclusive, settings.timeZone(), null, context);
|
||||
} else if (currentFieldType instanceof DateFieldMapper.DateFieldType && settings.timeZone() != null) {
|
||||
DateFieldMapper.DateFieldType dateFieldType = (DateFieldMapper.DateFieldType) this.currentFieldType;
|
||||
rangeQuery = dateFieldType.rangeQuery(part1, part2, startInclusive, endInclusive, settings.timeZone(), null, context);
|
||||
rangeQuery = dateFieldType.rangeQuery(part1Binary, part2Binary,
|
||||
startInclusive, endInclusive, settings.timeZone(), null, context);
|
||||
} else {
|
||||
rangeQuery = currentFieldType.rangeQuery(part1, part2, startInclusive, endInclusive, context);
|
||||
rangeQuery = currentFieldType.rangeQuery(part1Binary, part2Binary, startInclusive, endInclusive, context);
|
||||
}
|
||||
return rangeQuery;
|
||||
} catch (RuntimeException e) {
|
||||
|
@ -357,9 +359,6 @@ public class MapperQueryParser extends QueryParser {
|
|||
}
|
||||
|
||||
protected Query getFuzzyQuery(String field, String termStr, String minSimilarity) throws ParseException {
|
||||
if (lowercaseExpandedTerms) {
|
||||
termStr = termStr.toLowerCase(locale);
|
||||
}
|
||||
Collection<String> fields = extractMultiFields(field);
|
||||
if (fields != null) {
|
||||
if (fields.size() == 1) {
|
||||
|
@ -398,8 +397,9 @@ public class MapperQueryParser extends QueryParser {
|
|||
currentFieldType = context.fieldMapper(field);
|
||||
if (currentFieldType != null) {
|
||||
try {
|
||||
return currentFieldType.fuzzyQuery(termStr, Fuzziness.build(minSimilarity),
|
||||
fuzzyPrefixLength, settings.fuzzyMaxExpansions(), FuzzyQuery.defaultTranspositions);
|
||||
BytesRef term = termStr == null ? null : getAnalyzer().normalize(field, termStr);
|
||||
return currentFieldType.fuzzyQuery(term, Fuzziness.build(minSimilarity),
|
||||
getFuzzyPrefixLength(), settings.fuzzyMaxExpansions(), FuzzyQuery.defaultTranspositions);
|
||||
} catch (RuntimeException e) {
|
||||
if (settings.lenient()) {
|
||||
return null;
|
||||
|
@ -422,9 +422,6 @@ public class MapperQueryParser extends QueryParser {
|
|||
|
||||
@Override
|
||||
protected Query getPrefixQuery(String field, String termStr) throws ParseException {
|
||||
if (lowercaseExpandedTerms) {
|
||||
termStr = termStr.toLowerCase(locale);
|
||||
}
|
||||
Collection<String> fields = extractMultiFields(field);
|
||||
if (fields != null) {
|
||||
if (fields.size() == 1) {
|
||||
|
@ -470,8 +467,8 @@ public class MapperQueryParser extends QueryParser {
|
|||
setAnalyzer(context.getSearchAnalyzer(currentFieldType));
|
||||
}
|
||||
Query query = null;
|
||||
if (currentFieldType.tokenized() == false) {
|
||||
query = currentFieldType.prefixQuery(termStr, multiTermRewriteMethod, context);
|
||||
if (currentFieldType instanceof StringFieldType == false) {
|
||||
query = currentFieldType.prefixQuery(termStr, getMultiTermRewriteMethod(), context);
|
||||
}
|
||||
if (query == null) {
|
||||
query = getPossiblyAnalyzedPrefixQuery(currentFieldType.name(), termStr);
|
||||
|
@ -589,9 +586,6 @@ public class MapperQueryParser extends QueryParser {
|
|||
return FIELD_QUERY_EXTENSIONS.get(ExistsFieldQueryExtension.NAME).query(context, actualField);
|
||||
}
|
||||
}
|
||||
if (lowercaseExpandedTerms) {
|
||||
termStr = termStr.toLowerCase(locale);
|
||||
}
|
||||
Collection<String> fields = extractMultiFields(field);
|
||||
if (fields != null) {
|
||||
if (fields.size() == 1) {
|
||||
|
@ -638,9 +632,8 @@ public class MapperQueryParser extends QueryParser {
|
|||
setAnalyzer(context.getSearchAnalyzer(currentFieldType));
|
||||
}
|
||||
indexedNameField = currentFieldType.name();
|
||||
return getPossiblyAnalyzedWildcardQuery(indexedNameField, termStr);
|
||||
}
|
||||
return getPossiblyAnalyzedWildcardQuery(indexedNameField, termStr);
|
||||
return super.getWildcardQuery(indexedNameField, termStr);
|
||||
} catch (RuntimeException e) {
|
||||
if (settings.lenient()) {
|
||||
return null;
|
||||
|
@ -651,75 +644,8 @@ public class MapperQueryParser extends QueryParser {
|
|||
}
|
||||
}
|
||||
|
||||
private Query getPossiblyAnalyzedWildcardQuery(String field, String termStr) throws ParseException {
|
||||
if (!settings.analyzeWildcard()) {
|
||||
return super.getWildcardQuery(field, termStr);
|
||||
}
|
||||
boolean isWithinToken = (!termStr.startsWith("?") && !termStr.startsWith("*"));
|
||||
StringBuilder aggStr = new StringBuilder();
|
||||
StringBuilder tmp = new StringBuilder();
|
||||
for (int i = 0; i < termStr.length(); i++) {
|
||||
char c = termStr.charAt(i);
|
||||
if (c == '?' || c == '*') {
|
||||
if (isWithinToken) {
|
||||
try (TokenStream source = getAnalyzer().tokenStream(field, tmp.toString())) {
|
||||
source.reset();
|
||||
CharTermAttribute termAtt = source.addAttribute(CharTermAttribute.class);
|
||||
if (source.incrementToken()) {
|
||||
String term = termAtt.toString();
|
||||
if (term.length() == 0) {
|
||||
// no tokens, just use what we have now
|
||||
aggStr.append(tmp);
|
||||
} else {
|
||||
aggStr.append(term);
|
||||
}
|
||||
} else {
|
||||
// no tokens, just use what we have now
|
||||
aggStr.append(tmp);
|
||||
}
|
||||
} catch (IOException e) {
|
||||
aggStr.append(tmp);
|
||||
}
|
||||
tmp.setLength(0);
|
||||
}
|
||||
isWithinToken = false;
|
||||
aggStr.append(c);
|
||||
} else {
|
||||
tmp.append(c);
|
||||
isWithinToken = true;
|
||||
}
|
||||
}
|
||||
if (isWithinToken) {
|
||||
try {
|
||||
try (TokenStream source = getAnalyzer().tokenStream(field, tmp.toString())) {
|
||||
source.reset();
|
||||
CharTermAttribute termAtt = source.addAttribute(CharTermAttribute.class);
|
||||
if (source.incrementToken()) {
|
||||
String term = termAtt.toString();
|
||||
if (term.length() == 0) {
|
||||
// no tokens, just use what we have now
|
||||
aggStr.append(tmp);
|
||||
} else {
|
||||
aggStr.append(term);
|
||||
}
|
||||
} else {
|
||||
// no tokens, just use what we have now
|
||||
aggStr.append(tmp);
|
||||
}
|
||||
}
|
||||
} catch (IOException e) {
|
||||
aggStr.append(tmp);
|
||||
}
|
||||
}
|
||||
|
||||
return super.getWildcardQuery(field, aggStr.toString());
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Query getRegexpQuery(String field, String termStr) throws ParseException {
|
||||
if (lowercaseExpandedTerms) {
|
||||
termStr = termStr.toLowerCase(locale);
|
||||
}
|
||||
Collection<String> fields = extractMultiFields(field);
|
||||
if (fields != null) {
|
||||
if (fields.size() == 1) {
|
||||
|
@ -767,7 +693,7 @@ public class MapperQueryParser extends QueryParser {
|
|||
Query query = null;
|
||||
if (currentFieldType.tokenized() == false) {
|
||||
query = currentFieldType.regexpQuery(termStr, RegExp.ALL,
|
||||
maxDeterminizedStates, multiTermRewriteMethod, context);
|
||||
getMaxDeterminizedStates(), getMultiTermRewriteMethod(), context);
|
||||
}
|
||||
if (query == null) {
|
||||
query = super.getRegexpQuery(field, termStr);
|
||||
|
|
|
@ -24,7 +24,6 @@ import org.apache.lucene.search.MultiTermQuery;
|
|||
import org.elasticsearch.common.unit.Fuzziness;
|
||||
import org.joda.time.DateTimeZone;
|
||||
|
||||
import java.util.Locale;
|
||||
import java.util.Map;
|
||||
|
||||
/**
|
||||
|
@ -53,12 +52,8 @@ public class QueryParserSettings {
|
|||
|
||||
private boolean analyzeWildcard;
|
||||
|
||||
private boolean lowercaseExpandedTerms;
|
||||
|
||||
private boolean enablePositionIncrements;
|
||||
|
||||
private Locale locale;
|
||||
|
||||
private Fuzziness fuzziness;
|
||||
private int fuzzyPrefixLength;
|
||||
private int fuzzyMaxExpansions;
|
||||
|
@ -79,6 +74,8 @@ public class QueryParserSettings {
|
|||
/** To limit effort spent determinizing regexp queries. */
|
||||
private int maxDeterminizedStates;
|
||||
|
||||
private boolean splitOnWhitespace;
|
||||
|
||||
public QueryParserSettings(String queryString) {
|
||||
this.queryString = queryString;
|
||||
}
|
||||
|
@ -135,14 +132,6 @@ public class QueryParserSettings {
|
|||
this.allowLeadingWildcard = allowLeadingWildcard;
|
||||
}
|
||||
|
||||
public boolean lowercaseExpandedTerms() {
|
||||
return lowercaseExpandedTerms;
|
||||
}
|
||||
|
||||
public void lowercaseExpandedTerms(boolean lowercaseExpandedTerms) {
|
||||
this.lowercaseExpandedTerms = lowercaseExpandedTerms;
|
||||
}
|
||||
|
||||
public boolean enablePositionIncrements() {
|
||||
return enablePositionIncrements;
|
||||
}
|
||||
|
@ -267,14 +256,6 @@ public class QueryParserSettings {
|
|||
this.useDisMax = useDisMax;
|
||||
}
|
||||
|
||||
public void locale(Locale locale) {
|
||||
this.locale = locale;
|
||||
}
|
||||
|
||||
public Locale locale() {
|
||||
return this.locale;
|
||||
}
|
||||
|
||||
public void timeZone(DateTimeZone timeZone) {
|
||||
this.timeZone = timeZone;
|
||||
}
|
||||
|
@ -290,4 +271,12 @@ public class QueryParserSettings {
|
|||
public Fuzziness fuzziness() {
|
||||
return fuzziness;
|
||||
}
|
||||
|
||||
public void splitOnWhitespace(boolean value) {
|
||||
this.splitOnWhitespace = value;
|
||||
}
|
||||
|
||||
public boolean splitOnWhitespace() {
|
||||
return splitOnWhitespace;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -19,7 +19,6 @@
|
|||
|
||||
package org.elasticsearch;
|
||||
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.elasticsearch.action.support.replication.ReplicationOperation;
|
||||
import org.elasticsearch.cluster.action.shard.ShardStateAction;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
|
@ -488,7 +487,7 @@ public class ElasticsearchException extends RuntimeException implements ToXConte
|
|||
SNAPSHOT_CREATION_EXCEPTION(org.elasticsearch.snapshots.SnapshotCreationException.class,
|
||||
org.elasticsearch.snapshots.SnapshotCreationException::new, 27),
|
||||
DELETE_FAILED_ENGINE_EXCEPTION(org.elasticsearch.index.engine.DeleteFailedEngineException.class,
|
||||
org.elasticsearch.index.engine.DeleteFailedEngineException::new, 28),
|
||||
org.elasticsearch.index.engine.DeleteFailedEngineException::new, 28),// deprecated in 6.0, remove in 7.0
|
||||
DOCUMENT_MISSING_EXCEPTION(org.elasticsearch.index.engine.DocumentMissingException.class,
|
||||
org.elasticsearch.index.engine.DocumentMissingException::new, 29),
|
||||
SNAPSHOT_EXCEPTION(org.elasticsearch.snapshots.SnapshotException.class,
|
||||
|
@ -582,7 +581,7 @@ public class ElasticsearchException extends RuntimeException implements ToXConte
|
|||
ROUTING_MISSING_EXCEPTION(org.elasticsearch.action.RoutingMissingException.class,
|
||||
org.elasticsearch.action.RoutingMissingException::new, 79),
|
||||
INDEX_FAILED_ENGINE_EXCEPTION(org.elasticsearch.index.engine.IndexFailedEngineException.class,
|
||||
org.elasticsearch.index.engine.IndexFailedEngineException::new, 80),
|
||||
org.elasticsearch.index.engine.IndexFailedEngineException::new, 80), // deprecated in 6.0, remove in 7.0
|
||||
INDEX_SHARD_RESTORE_FAILED_EXCEPTION(org.elasticsearch.index.snapshots.IndexShardRestoreFailedException.class,
|
||||
org.elasticsearch.index.snapshots.IndexShardRestoreFailedException::new, 81),
|
||||
REPOSITORY_EXCEPTION(org.elasticsearch.repositories.RepositoryException.class,
|
||||
|
|
|
@ -89,8 +89,10 @@ public class Version {
|
|||
public static final Version V_5_0_0_beta1 = new Version(V_5_0_0_beta1_ID, org.apache.lucene.util.Version.LUCENE_6_2_0);
|
||||
public static final int V_5_0_0_rc1_ID = 5000051;
|
||||
public static final Version V_5_0_0_rc1 = new Version(V_5_0_0_rc1_ID, org.apache.lucene.util.Version.LUCENE_6_2_0);
|
||||
public static final int V_5_0_0_ID = 5000099;
|
||||
public static final Version V_5_0_0 = new Version(V_5_0_0_ID, org.apache.lucene.util.Version.LUCENE_6_2_0);
|
||||
public static final int V_6_0_0_alpha1_ID = 6000001;
|
||||
public static final Version V_6_0_0_alpha1 = new Version(V_6_0_0_alpha1_ID, org.apache.lucene.util.Version.LUCENE_6_2_0);
|
||||
public static final Version V_6_0_0_alpha1 = new Version(V_6_0_0_alpha1_ID, org.apache.lucene.util.Version.LUCENE_6_3_0);
|
||||
public static final Version CURRENT = V_6_0_0_alpha1;
|
||||
|
||||
/* NOTE: don't add unreleased version to this list except of the version assigned to CURRENT.
|
||||
|
@ -115,6 +117,8 @@ public class Version {
|
|||
switch (id) {
|
||||
case V_6_0_0_alpha1_ID:
|
||||
return V_6_0_0_alpha1;
|
||||
case V_5_0_0_ID:
|
||||
return V_5_0_0;
|
||||
case V_5_0_0_rc1_ID:
|
||||
return V_5_0_0_rc1;
|
||||
case V_5_0_0_beta1_ID:
|
||||
|
|
|
@ -25,7 +25,6 @@ import org.elasticsearch.common.io.stream.StreamInput;
|
|||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentFactory;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
|
@ -59,7 +58,8 @@ public class PendingClusterTasksResponse extends ActionResponse implements Itera
|
|||
return pendingTasks.iterator();
|
||||
}
|
||||
|
||||
public String prettyPrint() {
|
||||
@Override
|
||||
public String toString() {
|
||||
StringBuilder sb = new StringBuilder();
|
||||
sb.append("tasks: (").append(pendingTasks.size()).append("):\n");
|
||||
for (PendingClusterTask pendingClusterTask : this) {
|
||||
|
@ -68,19 +68,6 @@ public class PendingClusterTasksResponse extends ActionResponse implements Itera
|
|||
return sb.toString();
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
try {
|
||||
XContentBuilder builder = XContentFactory.jsonBuilder().prettyPrint();
|
||||
builder.startObject();
|
||||
toXContent(builder, EMPTY_PARAMS);
|
||||
builder.endObject();
|
||||
return builder.string();
|
||||
} catch (IOException e) {
|
||||
return "{ \"error\" : \"" + e.getMessage() + "\"}";
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.startArray(Fields.TASKS);
|
||||
|
|
|
@ -21,19 +21,15 @@ package org.elasticsearch.action.bulk;
|
|||
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.apache.logging.log4j.util.Supplier;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.ExceptionsHelper;
|
||||
import org.elasticsearch.action.DocWriteResponse;
|
||||
import org.elasticsearch.action.DocWriteRequest;
|
||||
import org.elasticsearch.action.DocWriteResponse;
|
||||
import org.elasticsearch.action.delete.DeleteRequest;
|
||||
import org.elasticsearch.action.delete.DeleteResponse;
|
||||
import org.elasticsearch.action.delete.TransportDeleteAction;
|
||||
import org.elasticsearch.action.index.IndexRequest;
|
||||
import org.elasticsearch.action.index.IndexResponse;
|
||||
import org.elasticsearch.action.index.TransportIndexAction;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.replication.TransportWriteAction;
|
||||
import org.elasticsearch.action.support.replication.ReplicationResponse.ShardInfo;
|
||||
import org.elasticsearch.action.support.replication.TransportWriteAction;
|
||||
import org.elasticsearch.action.update.UpdateHelper;
|
||||
import org.elasticsearch.action.update.UpdateRequest;
|
||||
import org.elasticsearch.action.update.UpdateResponse;
|
||||
|
@ -51,10 +47,12 @@ import org.elasticsearch.common.xcontent.XContentHelper;
|
|||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.index.VersionType;
|
||||
import org.elasticsearch.index.engine.Engine;
|
||||
import org.elasticsearch.index.engine.EngineClosedException;
|
||||
import org.elasticsearch.index.engine.VersionConflictEngineException;
|
||||
import org.elasticsearch.index.mapper.MapperParsingException;
|
||||
import org.elasticsearch.index.shard.IndexShard;
|
||||
import org.elasticsearch.index.shard.IndexShardClosedException;
|
||||
import org.elasticsearch.index.translog.Translog;
|
||||
import org.elasticsearch.index.translog.Translog.Location;
|
||||
import org.elasticsearch.indices.IndicesService;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportRequestOptions;
|
||||
|
@ -62,11 +60,15 @@ import org.elasticsearch.transport.TransportService;
|
|||
|
||||
import java.util.Map;
|
||||
|
||||
import static org.elasticsearch.action.delete.TransportDeleteAction.executeDeleteRequestOnPrimary;
|
||||
import static org.elasticsearch.action.delete.TransportDeleteAction.executeDeleteRequestOnReplica;
|
||||
import static org.elasticsearch.action.index.TransportIndexAction.executeIndexRequestOnPrimary;
|
||||
import static org.elasticsearch.action.index.TransportIndexAction.executeIndexRequestOnReplica;
|
||||
import static org.elasticsearch.action.support.replication.ReplicationOperation.ignoreReplicaException;
|
||||
import static org.elasticsearch.action.support.replication.ReplicationOperation.isConflictException;
|
||||
|
||||
/** Performs shard-level bulk (index, delete or update) operations */
|
||||
public class TransportShardBulkAction extends TransportWriteAction<BulkShardRequest, BulkShardResponse> {
|
||||
public class TransportShardBulkAction extends TransportWriteAction<BulkShardRequest, BulkShardRequest, BulkShardResponse> {
|
||||
|
||||
public static final String ACTION_NAME = BulkAction.NAME + "[s]";
|
||||
|
||||
|
@ -80,7 +82,7 @@ public class TransportShardBulkAction extends TransportWriteAction<BulkShardRequ
|
|||
MappingUpdatedAction mappingUpdatedAction, UpdateHelper updateHelper, ActionFilters actionFilters,
|
||||
IndexNameExpressionResolver indexNameExpressionResolver) {
|
||||
super(settings, ACTION_NAME, transportService, clusterService, indicesService, threadPool, shardStateAction, actionFilters,
|
||||
indexNameExpressionResolver, BulkShardRequest::new, ThreadPool.Names.BULK);
|
||||
indexNameExpressionResolver, BulkShardRequest::new, BulkShardRequest::new, ThreadPool.Names.BULK);
|
||||
this.updateHelper = updateHelper;
|
||||
this.allowIdGeneration = settings.getAsBoolean("action.allow_id_generation", true);
|
||||
this.mappingUpdatedAction = mappingUpdatedAction;
|
||||
|
@ -102,7 +104,7 @@ public class TransportShardBulkAction extends TransportWriteAction<BulkShardRequ
|
|||
}
|
||||
|
||||
@Override
|
||||
protected WriteResult<BulkShardResponse> onPrimaryShard(BulkShardRequest request, IndexShard primary) throws Exception {
|
||||
protected WritePrimaryResult shardOperationOnPrimary(BulkShardRequest request, IndexShard primary) throws Exception {
|
||||
final IndexMetaData metaData = primary.indexSettings().getIndexMetaData();
|
||||
|
||||
long[] preVersions = new long[request.items().length];
|
||||
|
@ -118,30 +120,86 @@ public class TransportShardBulkAction extends TransportWriteAction<BulkShardRequ
|
|||
responses[i] = items[i].getPrimaryResponse();
|
||||
}
|
||||
BulkShardResponse response = new BulkShardResponse(request.shardId(), responses);
|
||||
return new WriteResult<>(response, location);
|
||||
return new WritePrimaryResult(request, response, location, null, primary);
|
||||
}
|
||||
|
||||
/** Executes bulk item requests and handles request execution exceptions */
|
||||
private Translog.Location executeBulkItemRequest(IndexMetaData metaData, IndexShard indexShard,
|
||||
private Translog.Location executeBulkItemRequest(IndexMetaData metaData, IndexShard primary,
|
||||
BulkShardRequest request,
|
||||
long[] preVersions, VersionType[] preVersionTypes,
|
||||
Translog.Location location, int requestIndex) {
|
||||
preVersions[requestIndex] = request.items()[requestIndex].request().version();
|
||||
preVersionTypes[requestIndex] = request.items()[requestIndex].request().versionType();
|
||||
DocWriteRequest.OpType opType = request.items()[requestIndex].request().opType();
|
||||
Translog.Location location, int requestIndex) throws Exception {
|
||||
final DocWriteRequest itemRequest = request.items()[requestIndex].request();
|
||||
preVersions[requestIndex] = itemRequest.version();
|
||||
preVersionTypes[requestIndex] = itemRequest.versionType();
|
||||
DocWriteRequest.OpType opType = itemRequest.opType();
|
||||
try {
|
||||
WriteResult<? extends DocWriteResponse> writeResult = innerExecuteBulkItemRequest(metaData, indexShard,
|
||||
request, requestIndex);
|
||||
if (writeResult.getLocation() != null) {
|
||||
location = locationToSync(location, writeResult.getLocation());
|
||||
} else {
|
||||
assert writeResult.getResponse().getResult() == DocWriteResponse.Result.NOOP
|
||||
: "only noop operation can have null next operation";
|
||||
// execute item request
|
||||
final Engine.Result operationResult;
|
||||
final DocWriteResponse response;
|
||||
final BulkItemRequest replicaRequest;
|
||||
switch (itemRequest.opType()) {
|
||||
case CREATE:
|
||||
case INDEX:
|
||||
final IndexRequest indexRequest = (IndexRequest) itemRequest;
|
||||
Engine.IndexResult indexResult = executeIndexRequestOnPrimary(indexRequest, primary, mappingUpdatedAction);
|
||||
operationResult = indexResult;
|
||||
response = indexResult.hasFailure() ? null
|
||||
: new IndexResponse(primary.shardId(), indexRequest.type(), indexRequest.id(),
|
||||
indexResult.getVersion(), indexResult.isCreated());
|
||||
replicaRequest = request.items()[requestIndex];
|
||||
break;
|
||||
case UPDATE:
|
||||
UpdateResultHolder updateResultHolder = executeUpdateRequest(((UpdateRequest) itemRequest),
|
||||
primary, metaData, request, requestIndex);
|
||||
operationResult = updateResultHolder.operationResult;
|
||||
response = updateResultHolder.response;
|
||||
replicaRequest = updateResultHolder.replicaRequest;
|
||||
break;
|
||||
case DELETE:
|
||||
final DeleteRequest deleteRequest = (DeleteRequest) itemRequest;
|
||||
Engine.DeleteResult deleteResult = executeDeleteRequestOnPrimary(deleteRequest, primary);
|
||||
operationResult = deleteResult;
|
||||
response = deleteResult.hasFailure() ? null :
|
||||
new DeleteResponse(request.shardId(), deleteRequest.type(), deleteRequest.id(),
|
||||
deleteResult.getVersion(), deleteResult.isFound());
|
||||
replicaRequest = request.items()[requestIndex];
|
||||
break;
|
||||
default: throw new IllegalStateException("unexpected opType [" + itemRequest.opType() + "] found");
|
||||
}
|
||||
// update the bulk item request because update request execution can mutate the bulk item request
|
||||
BulkItemRequest item = request.items()[requestIndex];
|
||||
// add the response
|
||||
setResponse(item, new BulkItemResponse(item.id(), opType, writeResult.getResponse()));
|
||||
request.items()[requestIndex] = replicaRequest;
|
||||
if (operationResult == null) { // in case of noop update operation
|
||||
assert response.getResult() == DocWriteResponse.Result.NOOP
|
||||
: "only noop update can have null operation";
|
||||
replicaRequest.setIgnoreOnReplica();
|
||||
replicaRequest.setPrimaryResponse(new BulkItemResponse(replicaRequest.id(), opType, response));
|
||||
} else if (operationResult.hasFailure() == false) {
|
||||
location = locationToSync(location, operationResult.getTranslogLocation());
|
||||
BulkItemResponse primaryResponse = new BulkItemResponse(replicaRequest.id(), opType, response);
|
||||
replicaRequest.setPrimaryResponse(primaryResponse);
|
||||
// set the ShardInfo to 0 so we can safely send it to the replicas. We won't use it in the real response though.
|
||||
primaryResponse.getResponse().setShardInfo(new ShardInfo());
|
||||
} else {
|
||||
DocWriteRequest docWriteRequest = replicaRequest.request();
|
||||
Exception failure = operationResult.getFailure();
|
||||
if (isConflictException(failure)) {
|
||||
logger.trace((Supplier<?>) () -> new ParameterizedMessage("{} failed to execute bulk item ({}) {}",
|
||||
request.shardId(), docWriteRequest.opType().getLowercase(), request), failure);
|
||||
} else {
|
||||
logger.debug((Supplier<?>) () -> new ParameterizedMessage("{} failed to execute bulk item ({}) {}",
|
||||
request.shardId(), docWriteRequest.opType().getLowercase(), request), failure);
|
||||
}
|
||||
// if its a conflict failure, and we already executed the request on a primary (and we execute it
|
||||
// again, due to primary relocation and only processing up to N bulk items when the shard gets closed)
|
||||
// then just use the response we got from the successful execution
|
||||
if (replicaRequest.getPrimaryResponse() == null || isConflictException(failure) == false) {
|
||||
replicaRequest.setIgnoreOnReplica();
|
||||
replicaRequest.setPrimaryResponse(new BulkItemResponse(replicaRequest.id(), docWriteRequest.opType(),
|
||||
new BulkItemResponse.Failure(request.index(), docWriteRequest.type(), docWriteRequest.id(), failure)));
|
||||
}
|
||||
}
|
||||
assert replicaRequest.getPrimaryResponse() != null;
|
||||
assert preVersionTypes[requestIndex] != null;
|
||||
} catch (Exception e) {
|
||||
// rethrow the failure if we are going to retry on primary and let parent failure to handle it
|
||||
if (retryPrimaryException(e)) {
|
||||
|
@ -151,147 +209,165 @@ public class TransportShardBulkAction extends TransportWriteAction<BulkShardRequ
|
|||
docWriteRequest.version(preVersions[j]);
|
||||
docWriteRequest.versionType(preVersionTypes[j]);
|
||||
}
|
||||
throw (ElasticsearchException) e;
|
||||
}
|
||||
BulkItemRequest item = request.items()[requestIndex];
|
||||
DocWriteRequest docWriteRequest = item.request();
|
||||
if (isConflictException(e)) {
|
||||
logger.trace((Supplier<?>) () -> new ParameterizedMessage("{} failed to execute bulk item ({}) {}",
|
||||
request.shardId(), docWriteRequest.opType().getLowercase(), request), e);
|
||||
} else {
|
||||
logger.debug((Supplier<?>) () -> new ParameterizedMessage("{} failed to execute bulk item ({}) {}",
|
||||
request.shardId(), docWriteRequest.opType().getLowercase(), request), e);
|
||||
}
|
||||
// if its a conflict failure, and we already executed the request on a primary (and we execute it
|
||||
// again, due to primary relocation and only processing up to N bulk items when the shard gets closed)
|
||||
// then just use the response we got from the successful execution
|
||||
if (item.getPrimaryResponse() != null && isConflictException(e)) {
|
||||
setResponse(item, item.getPrimaryResponse());
|
||||
} else {
|
||||
setResponse(item, new BulkItemResponse(item.id(), docWriteRequest.opType(),
|
||||
new BulkItemResponse.Failure(request.index(), docWriteRequest.type(), docWriteRequest.id(), e)));
|
||||
}
|
||||
throw e;
|
||||
}
|
||||
assert request.items()[requestIndex].getPrimaryResponse() != null;
|
||||
assert preVersionTypes[requestIndex] != null;
|
||||
return location;
|
||||
}
|
||||
|
||||
private WriteResult<? extends DocWriteResponse> innerExecuteBulkItemRequest(IndexMetaData metaData, IndexShard indexShard,
|
||||
BulkShardRequest request, int requestIndex) throws Exception {
|
||||
DocWriteRequest itemRequest = request.items()[requestIndex].request();
|
||||
switch (itemRequest.opType()) {
|
||||
case CREATE:
|
||||
case INDEX:
|
||||
return TransportIndexAction.executeIndexRequestOnPrimary(((IndexRequest) itemRequest), indexShard, mappingUpdatedAction);
|
||||
case UPDATE:
|
||||
int maxAttempts = ((UpdateRequest) itemRequest).retryOnConflict();
|
||||
for (int attemptCount = 0; attemptCount <= maxAttempts; attemptCount++) {
|
||||
try {
|
||||
return shardUpdateOperation(metaData, indexShard, request, requestIndex, ((UpdateRequest) itemRequest));
|
||||
} catch (Exception e) {
|
||||
final Throwable cause = ExceptionsHelper.unwrapCause(e);
|
||||
if (attemptCount == maxAttempts // bubble up exception when we run out of attempts
|
||||
|| (cause instanceof VersionConflictEngineException) == false) { // or when exception is not a version conflict
|
||||
throw e;
|
||||
}
|
||||
}
|
||||
}
|
||||
throw new IllegalStateException("version conflict exception should bubble up on last attempt");
|
||||
case DELETE:
|
||||
return TransportDeleteAction.executeDeleteRequestOnPrimary(((DeleteRequest) itemRequest), indexShard);
|
||||
default: throw new IllegalStateException("unexpected opType [" + itemRequest.opType() + "] found");
|
||||
}
|
||||
}
|
||||
private static class UpdateResultHolder {
|
||||
final BulkItemRequest replicaRequest;
|
||||
final Engine.Result operationResult;
|
||||
final DocWriteResponse response;
|
||||
|
||||
private void setResponse(BulkItemRequest request, BulkItemResponse response) {
|
||||
request.setPrimaryResponse(response);
|
||||
if (response.isFailed()) {
|
||||
request.setIgnoreOnReplica();
|
||||
} else {
|
||||
// Set the ShardInfo to 0 so we can safely send it to the replicas. We won't use it in the real response though.
|
||||
response.getResponse().setShardInfo(new ShardInfo());
|
||||
private UpdateResultHolder(BulkItemRequest replicaRequest, Engine.Result operationResult,
|
||||
DocWriteResponse response) {
|
||||
this.replicaRequest = replicaRequest;
|
||||
this.operationResult = operationResult;
|
||||
this.response = response;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Executes update request, doing a get and translating update to a index or delete operation
|
||||
* NOTE: all operations except NOOP, reassigns the bulk item request
|
||||
*/
|
||||
private WriteResult<? extends DocWriteResponse> shardUpdateOperation(IndexMetaData metaData, IndexShard indexShard,
|
||||
BulkShardRequest request,
|
||||
int requestIndex, UpdateRequest updateRequest)
|
||||
throws Exception {
|
||||
// Todo: capture read version conflicts, missing documents and malformed script errors in the write result due to get request
|
||||
UpdateHelper.Result translate = updateHelper.prepare(updateRequest, indexShard, threadPool::estimatedTimeInMillis);
|
||||
switch (translate.getResponseResult()) {
|
||||
case CREATED:
|
||||
case UPDATED:
|
||||
IndexRequest indexRequest = translate.action();
|
||||
MappingMetaData mappingMd = metaData.mappingOrDefault(indexRequest.type());
|
||||
indexRequest.process(mappingMd, allowIdGeneration, request.index());
|
||||
WriteResult<IndexResponse> writeResult = TransportIndexAction.executeIndexRequestOnPrimary(indexRequest, indexShard, mappingUpdatedAction);
|
||||
BytesReference indexSourceAsBytes = indexRequest.source();
|
||||
IndexResponse indexResponse = writeResult.getResponse();
|
||||
UpdateResponse update = new UpdateResponse(indexResponse.getShardInfo(), indexResponse.getShardId(), indexResponse.getType(), indexResponse.getId(), indexResponse.getVersion(), indexResponse.getResult());
|
||||
if ((updateRequest.fetchSource() != null && updateRequest.fetchSource().fetchSource()) ||
|
||||
(updateRequest.fields() != null && updateRequest.fields().length > 0)) {
|
||||
Tuple<XContentType, Map<String, Object>> sourceAndContent = XContentHelper.convertToMap(indexSourceAsBytes, true);
|
||||
update.setGetResult(updateHelper.extractGetResult(updateRequest, request.index(), indexResponse.getVersion(), sourceAndContent.v2(), sourceAndContent.v1(), indexSourceAsBytes));
|
||||
* Executes update request, delegating to a index or delete operation after translation,
|
||||
* handles retries on version conflict and constructs update response
|
||||
* NOTE: reassigns bulk item request at <code>requestIndex</code> for replicas to
|
||||
* execute translated update request (NOOP update is an exception). NOOP updates are
|
||||
* indicated by returning a <code>null</code> operation in {@link UpdateResultHolder}
|
||||
* */
|
||||
private UpdateResultHolder executeUpdateRequest(UpdateRequest updateRequest, IndexShard primary,
|
||||
IndexMetaData metaData, BulkShardRequest request,
|
||||
int requestIndex) throws Exception {
|
||||
Engine.Result updateOperationResult = null;
|
||||
UpdateResponse updateResponse = null;
|
||||
BulkItemRequest replicaRequest = request.items()[requestIndex];
|
||||
int maxAttempts = updateRequest.retryOnConflict();
|
||||
for (int attemptCount = 0; attemptCount <= maxAttempts; attemptCount++) {
|
||||
final UpdateHelper.Result translate;
|
||||
// translate update request
|
||||
try {
|
||||
translate = updateHelper.prepare(updateRequest, primary, threadPool::estimatedTimeInMillis);
|
||||
} catch (Exception failure) {
|
||||
// we may fail translating a update to index or delete operation
|
||||
// we use index result to communicate failure while translating update request
|
||||
updateOperationResult = new Engine.IndexResult(failure, updateRequest.version());
|
||||
break; // out of retry loop
|
||||
}
|
||||
// execute translated update request
|
||||
switch (translate.getResponseResult()) {
|
||||
case CREATED:
|
||||
case UPDATED:
|
||||
IndexRequest indexRequest = translate.action();
|
||||
MappingMetaData mappingMd = metaData.mappingOrDefault(indexRequest.type());
|
||||
indexRequest.process(mappingMd, allowIdGeneration, request.index());
|
||||
updateOperationResult = executeIndexRequestOnPrimary(indexRequest, primary, mappingUpdatedAction);
|
||||
break;
|
||||
case DELETED:
|
||||
updateOperationResult = executeDeleteRequestOnPrimary(translate.action(), primary);
|
||||
break;
|
||||
case NOOP:
|
||||
primary.noopUpdate(updateRequest.type());
|
||||
break;
|
||||
default: throw new IllegalStateException("Illegal update operation " + translate.getResponseResult());
|
||||
}
|
||||
if (updateOperationResult == null) {
|
||||
// this is a noop operation
|
||||
updateResponse = translate.action();
|
||||
break; // out of retry loop
|
||||
} else if (updateOperationResult.hasFailure() == false) {
|
||||
// enrich update response and
|
||||
// set translated update (index/delete) request for replica execution in bulk items
|
||||
switch (updateOperationResult.getOperationType()) {
|
||||
case INDEX:
|
||||
IndexRequest updateIndexRequest = translate.action();
|
||||
final IndexResponse indexResponse = new IndexResponse(primary.shardId(),
|
||||
updateIndexRequest.type(), updateIndexRequest.id(),
|
||||
updateOperationResult.getVersion(), ((Engine.IndexResult) updateOperationResult).isCreated());
|
||||
BytesReference indexSourceAsBytes = updateIndexRequest.source();
|
||||
updateResponse = new UpdateResponse(indexResponse.getShardInfo(),
|
||||
indexResponse.getShardId(), indexResponse.getType(), indexResponse.getId(),
|
||||
indexResponse.getVersion(), indexResponse.getResult());
|
||||
if ((updateRequest.fetchSource() != null && updateRequest.fetchSource().fetchSource()) ||
|
||||
(updateRequest.fields() != null && updateRequest.fields().length > 0)) {
|
||||
Tuple<XContentType, Map<String, Object>> sourceAndContent =
|
||||
XContentHelper.convertToMap(indexSourceAsBytes, true);
|
||||
updateResponse.setGetResult(updateHelper.extractGetResult(updateRequest, request.index(),
|
||||
indexResponse.getVersion(), sourceAndContent.v2(), sourceAndContent.v1(), indexSourceAsBytes));
|
||||
}
|
||||
// set translated request as replica request
|
||||
replicaRequest = new BulkItemRequest(request.items()[requestIndex].id(), updateIndexRequest);
|
||||
break;
|
||||
case DELETE:
|
||||
DeleteRequest updateDeleteRequest = translate.action();
|
||||
DeleteResponse deleteResponse = new DeleteResponse(primary.shardId(),
|
||||
updateDeleteRequest.type(), updateDeleteRequest.id(),
|
||||
updateOperationResult.getVersion(), ((Engine.DeleteResult) updateOperationResult).isFound());
|
||||
updateResponse = new UpdateResponse(deleteResponse.getShardInfo(),
|
||||
deleteResponse.getShardId(), deleteResponse.getType(), deleteResponse.getId(),
|
||||
deleteResponse.getVersion(), deleteResponse.getResult());
|
||||
updateResponse.setGetResult(updateHelper.extractGetResult(updateRequest,
|
||||
request.index(), deleteResponse.getVersion(), translate.updatedSourceAsMap(),
|
||||
translate.updateSourceContentType(), null));
|
||||
// set translated request as replica request
|
||||
replicaRequest = new BulkItemRequest(request.items()[requestIndex].id(), updateDeleteRequest);
|
||||
break;
|
||||
}
|
||||
// Replace the update request to the translated index request to execute on the replica.
|
||||
request.items()[requestIndex] = new BulkItemRequest(request.items()[requestIndex].id(), indexRequest);
|
||||
return new WriteResult<>(update, writeResult.getLocation());
|
||||
case DELETED:
|
||||
DeleteRequest deleteRequest = translate.action();
|
||||
WriteResult<DeleteResponse> deleteResult = TransportDeleteAction.executeDeleteRequestOnPrimary(deleteRequest, indexShard);
|
||||
DeleteResponse response = deleteResult.getResponse();
|
||||
UpdateResponse deleteUpdateResponse = new UpdateResponse(response.getShardInfo(), response.getShardId(), response.getType(), response.getId(), response.getVersion(), response.getResult());
|
||||
deleteUpdateResponse.setGetResult(updateHelper.extractGetResult(updateRequest, request.index(), response.getVersion(), translate.updatedSourceAsMap(), translate.updateSourceContentType(), null));
|
||||
// Replace the update request to the translated delete request to execute on the replica.
|
||||
request.items()[requestIndex] = new BulkItemRequest(request.items()[requestIndex].id(), deleteRequest);
|
||||
return new WriteResult<>(deleteUpdateResponse, deleteResult.getLocation());
|
||||
case NOOP:
|
||||
BulkItemRequest item = request.items()[requestIndex];
|
||||
indexShard.noopUpdate(updateRequest.type());
|
||||
item.setIgnoreOnReplica(); // no need to go to the replica
|
||||
return new WriteResult<>(translate.action(), null);
|
||||
default: throw new IllegalStateException("Illegal update operation " + translate.getResponseResult());
|
||||
// successful operation
|
||||
break; // out of retry loop
|
||||
} else if (updateOperationResult.getFailure() instanceof VersionConflictEngineException == false) {
|
||||
// not a version conflict exception
|
||||
break; // out of retry loop
|
||||
}
|
||||
}
|
||||
return new UpdateResultHolder(replicaRequest, updateOperationResult, updateResponse);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Location onReplicaShard(BulkShardRequest request, IndexShard indexShard) {
|
||||
protected WriteReplicaResult shardOperationOnReplica(BulkShardRequest request, IndexShard replica) throws Exception {
|
||||
Translog.Location location = null;
|
||||
for (int i = 0; i < request.items().length; i++) {
|
||||
BulkItemRequest item = request.items()[i];
|
||||
if (item == null || item.isIgnoreOnReplica()) {
|
||||
continue;
|
||||
}
|
||||
DocWriteRequest docWriteRequest = item.request();
|
||||
final Engine.Operation operation;
|
||||
try {
|
||||
switch (docWriteRequest.opType()) {
|
||||
case CREATE:
|
||||
case INDEX:
|
||||
operation = TransportIndexAction.executeIndexRequestOnReplica(((IndexRequest) docWriteRequest), indexShard);
|
||||
break;
|
||||
case DELETE:
|
||||
operation = TransportDeleteAction.executeDeleteRequestOnReplica(((DeleteRequest) docWriteRequest), indexShard);
|
||||
break;
|
||||
default: throw new IllegalStateException("Unexpected request operation type on replica: "
|
||||
+ docWriteRequest.opType().getLowercase());
|
||||
}
|
||||
location = locationToSync(location, operation.getTranslogLocation());
|
||||
} catch (Exception e) {
|
||||
// if its not an ignore replica failure, we need to make sure to bubble up the failure
|
||||
// so we will fail the shard
|
||||
if (!ignoreReplicaException(e)) {
|
||||
throw e;
|
||||
if (item.isIgnoreOnReplica() == false) {
|
||||
DocWriteRequest docWriteRequest = item.request();
|
||||
final Engine.Result operationResult;
|
||||
try {
|
||||
switch (docWriteRequest.opType()) {
|
||||
case CREATE:
|
||||
case INDEX:
|
||||
operationResult = executeIndexRequestOnReplica(((IndexRequest) docWriteRequest), replica);
|
||||
break;
|
||||
case DELETE:
|
||||
operationResult = executeDeleteRequestOnReplica(((DeleteRequest) docWriteRequest), replica);
|
||||
break;
|
||||
default:
|
||||
throw new IllegalStateException("Unexpected request operation type on replica: "
|
||||
+ docWriteRequest.opType().getLowercase());
|
||||
}
|
||||
if (operationResult.hasFailure()) {
|
||||
// check if any transient write operation failures should be bubbled up
|
||||
Exception failure = operationResult.getFailure();
|
||||
assert failure instanceof VersionConflictEngineException
|
||||
|| failure instanceof MapperParsingException
|
||||
|| failure instanceof EngineClosedException
|
||||
|| failure instanceof IndexShardClosedException
|
||||
: "expected any one of [version conflict, mapper parsing, engine closed, index shard closed]" +
|
||||
" failures. got " + failure;
|
||||
if (!ignoreReplicaException(failure)) {
|
||||
throw failure;
|
||||
}
|
||||
} else {
|
||||
location = locationToSync(location, operationResult.getTranslogLocation());
|
||||
}
|
||||
} catch (Exception e) {
|
||||
// if its not an ignore replica failure, we need to make sure to bubble up the failure
|
||||
// so we will fail the shard
|
||||
if (!ignoreReplicaException(e)) {
|
||||
throw e;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return location;
|
||||
return new WriteReplicaResult(request, location, null, replica);
|
||||
}
|
||||
|
||||
private Translog.Location locationToSync(Translog.Location current, Translog.Location next) {
|
||||
|
|
|
@ -39,7 +39,6 @@ import org.elasticsearch.common.settings.Settings;
|
|||
import org.elasticsearch.index.engine.Engine;
|
||||
import org.elasticsearch.index.shard.IndexShard;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
import org.elasticsearch.index.translog.Translog.Location;
|
||||
import org.elasticsearch.indices.IndexAlreadyExistsException;
|
||||
import org.elasticsearch.indices.IndicesService;
|
||||
import org.elasticsearch.tasks.Task;
|
||||
|
@ -49,7 +48,7 @@ import org.elasticsearch.transport.TransportService;
|
|||
/**
|
||||
* Performs the delete operation.
|
||||
*/
|
||||
public class TransportDeleteAction extends TransportWriteAction<DeleteRequest, DeleteResponse> {
|
||||
public class TransportDeleteAction extends TransportWriteAction<DeleteRequest, DeleteRequest,DeleteResponse> {
|
||||
|
||||
private final AutoCreateIndex autoCreateIndex;
|
||||
private final TransportCreateIndexAction createIndexAction;
|
||||
|
@ -61,7 +60,7 @@ public class TransportDeleteAction extends TransportWriteAction<DeleteRequest, D
|
|||
IndexNameExpressionResolver indexNameExpressionResolver,
|
||||
AutoCreateIndex autoCreateIndex) {
|
||||
super(settings, DeleteAction.NAME, transportService, clusterService, indicesService, threadPool, shardStateAction, actionFilters,
|
||||
indexNameExpressionResolver, DeleteRequest::new, ThreadPool.Names.INDEX);
|
||||
indexNameExpressionResolver, DeleteRequest::new, DeleteRequest::new, ThreadPool.Names.INDEX);
|
||||
this.createIndexAction = createIndexAction;
|
||||
this.autoCreateIndex = autoCreateIndex;
|
||||
}
|
||||
|
@ -70,7 +69,11 @@ public class TransportDeleteAction extends TransportWriteAction<DeleteRequest, D
|
|||
protected void doExecute(Task task, final DeleteRequest request, final ActionListener<DeleteResponse> listener) {
|
||||
ClusterState state = clusterService.state();
|
||||
if (autoCreateIndex.shouldAutoCreate(request.index(), state)) {
|
||||
createIndexAction.execute(task, new CreateIndexRequest().index(request.index()).cause("auto(delete api)").masterNodeTimeout(request.timeout()), new ActionListener<CreateIndexResponse>() {
|
||||
CreateIndexRequest createIndexRequest = new CreateIndexRequest()
|
||||
.index(request.index())
|
||||
.cause("auto(delete api)")
|
||||
.masterNodeTimeout(request.timeout());
|
||||
createIndexAction.execute(task, createIndexRequest, new ActionListener<CreateIndexResponse>() {
|
||||
@Override
|
||||
public void onResponse(CreateIndexResponse result) {
|
||||
innerExecute(task, request, listener);
|
||||
|
@ -119,30 +122,33 @@ public class TransportDeleteAction extends TransportWriteAction<DeleteRequest, D
|
|||
}
|
||||
|
||||
@Override
|
||||
protected WriteResult<DeleteResponse> onPrimaryShard(DeleteRequest request, IndexShard indexShard) {
|
||||
return executeDeleteRequestOnPrimary(request, indexShard);
|
||||
protected WritePrimaryResult shardOperationOnPrimary(DeleteRequest request, IndexShard primary) throws Exception {
|
||||
final Engine.DeleteResult result = executeDeleteRequestOnPrimary(request, primary);
|
||||
final DeleteResponse response = result.hasFailure() ? null :
|
||||
new DeleteResponse(primary.shardId(), request.type(), request.id(), result.getVersion(), result.isFound());
|
||||
return new WritePrimaryResult(request, response, result.getTranslogLocation(), result.getFailure(), primary);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Location onReplicaShard(DeleteRequest request, IndexShard indexShard) {
|
||||
return executeDeleteRequestOnReplica(request, indexShard).getTranslogLocation();
|
||||
protected WriteReplicaResult shardOperationOnReplica(DeleteRequest request, IndexShard replica) throws Exception {
|
||||
final Engine.DeleteResult result = executeDeleteRequestOnReplica(request, replica);
|
||||
return new WriteReplicaResult(request, result.getTranslogLocation(), result.getFailure(), replica);
|
||||
}
|
||||
|
||||
public static WriteResult<DeleteResponse> executeDeleteRequestOnPrimary(DeleteRequest request, IndexShard indexShard) {
|
||||
Engine.Delete delete = indexShard.prepareDeleteOnPrimary(request.type(), request.id(), request.version(), request.versionType());
|
||||
indexShard.delete(delete);
|
||||
// update the request with the version so it will go to the replicas
|
||||
request.versionType(delete.versionType().versionTypeForReplicationAndRecovery());
|
||||
request.version(delete.version());
|
||||
|
||||
assert request.versionType().validateVersionForWrites(request.version());
|
||||
DeleteResponse response = new DeleteResponse(indexShard.shardId(), request.type(), request.id(), delete.version(), delete.found());
|
||||
return new WriteResult<>(response, delete.getTranslogLocation());
|
||||
public static Engine.DeleteResult executeDeleteRequestOnPrimary(DeleteRequest request, IndexShard primary) {
|
||||
Engine.Delete delete = primary.prepareDeleteOnPrimary(request.type(), request.id(), request.version(), request.versionType());
|
||||
Engine.DeleteResult result = primary.delete(delete);
|
||||
if (result.hasFailure() == false) {
|
||||
// update the request with the version so it will go to the replicas
|
||||
request.versionType(delete.versionType().versionTypeForReplicationAndRecovery());
|
||||
request.version(result.getVersion());
|
||||
assert request.versionType().validateVersionForWrites(request.version());
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
public static Engine.Delete executeDeleteRequestOnReplica(DeleteRequest request, IndexShard indexShard) {
|
||||
Engine.Delete delete = indexShard.prepareDeleteOnReplica(request.type(), request.id(), request.version(), request.versionType());
|
||||
indexShard.delete(delete);
|
||||
return delete;
|
||||
public static Engine.DeleteResult executeDeleteRequestOnReplica(DeleteRequest request, IndexShard replica) {
|
||||
Engine.Delete delete = replica.prepareDeleteOnReplica(request.type(), request.id(), request.version(), request.versionType());
|
||||
return replica.delete(delete);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -101,6 +101,9 @@ public class GetRequest extends SingleShardRequest<GetRequest> implements Realti
|
|||
validationException = ValidateActions.addValidationError("illegal version value [" + version + "] for version type [" + versionType.name() + "]",
|
||||
validationException);
|
||||
}
|
||||
if (versionType == VersionType.FORCE) {
|
||||
validationException = ValidateActions.addValidationError("version type [force] may no longer be used", validationException);
|
||||
}
|
||||
return validationException;
|
||||
}
|
||||
|
||||
|
|
|
@ -39,11 +39,11 @@ import org.elasticsearch.cluster.service.ClusterService;
|
|||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.index.engine.Engine;
|
||||
import org.elasticsearch.index.mapper.MapperParsingException;
|
||||
import org.elasticsearch.index.mapper.Mapping;
|
||||
import org.elasticsearch.index.mapper.SourceToParse;
|
||||
import org.elasticsearch.index.shard.IndexShard;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
import org.elasticsearch.index.translog.Translog.Location;
|
||||
import org.elasticsearch.indices.IndexAlreadyExistsException;
|
||||
import org.elasticsearch.indices.IndicesService;
|
||||
import org.elasticsearch.tasks.Task;
|
||||
|
@ -60,7 +60,7 @@ import org.elasticsearch.transport.TransportService;
|
|||
* <li><b>allowIdGeneration</b>: If the id is set not, should it be generated. Defaults to <tt>true</tt>.
|
||||
* </ul>
|
||||
*/
|
||||
public class TransportIndexAction extends TransportWriteAction<IndexRequest, IndexResponse> {
|
||||
public class TransportIndexAction extends TransportWriteAction<IndexRequest, IndexRequest, IndexResponse> {
|
||||
|
||||
private final AutoCreateIndex autoCreateIndex;
|
||||
private final boolean allowIdGeneration;
|
||||
|
@ -76,7 +76,7 @@ public class TransportIndexAction extends TransportWriteAction<IndexRequest, Ind
|
|||
ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver,
|
||||
AutoCreateIndex autoCreateIndex) {
|
||||
super(settings, IndexAction.NAME, transportService, clusterService, indicesService, threadPool, shardStateAction,
|
||||
actionFilters, indexNameExpressionResolver, IndexRequest::new, ThreadPool.Names.INDEX);
|
||||
actionFilters, indexNameExpressionResolver, IndexRequest::new, IndexRequest::new, ThreadPool.Names.INDEX);
|
||||
this.mappingUpdatedAction = mappingUpdatedAction;
|
||||
this.createIndexAction = createIndexAction;
|
||||
this.autoCreateIndex = autoCreateIndex;
|
||||
|
@ -140,65 +140,88 @@ public class TransportIndexAction extends TransportWriteAction<IndexRequest, Ind
|
|||
}
|
||||
|
||||
@Override
|
||||
protected WriteResult<IndexResponse> onPrimaryShard(IndexRequest request, IndexShard indexShard) throws Exception {
|
||||
return executeIndexRequestOnPrimary(request, indexShard, mappingUpdatedAction);
|
||||
protected WritePrimaryResult shardOperationOnPrimary(IndexRequest request, IndexShard primary) throws Exception {
|
||||
final Engine.IndexResult indexResult = executeIndexRequestOnPrimary(request, primary, mappingUpdatedAction);
|
||||
final IndexResponse response = indexResult.hasFailure() ? null :
|
||||
new IndexResponse(primary.shardId(), request.type(), request.id(), indexResult.getVersion(),
|
||||
indexResult.isCreated());
|
||||
return new WritePrimaryResult(request, response, indexResult.getTranslogLocation(), indexResult.getFailure(), primary);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Location onReplicaShard(IndexRequest request, IndexShard indexShard) {
|
||||
return executeIndexRequestOnReplica(request, indexShard).getTranslogLocation();
|
||||
protected WriteReplicaResult shardOperationOnReplica(IndexRequest request, IndexShard replica) throws Exception {
|
||||
final Engine.IndexResult indexResult = executeIndexRequestOnReplica(request, replica);
|
||||
return new WriteReplicaResult(request, indexResult.getTranslogLocation(), indexResult.getFailure(), replica);
|
||||
}
|
||||
|
||||
/**
|
||||
* Execute the given {@link IndexRequest} on a replica shard, throwing a
|
||||
* {@link RetryOnReplicaException} if the operation needs to be re-tried.
|
||||
*/
|
||||
public static Engine.Index executeIndexRequestOnReplica(IndexRequest request, IndexShard indexShard) {
|
||||
final ShardId shardId = indexShard.shardId();
|
||||
public static Engine.IndexResult executeIndexRequestOnReplica(IndexRequest request, IndexShard replica) {
|
||||
final ShardId shardId = replica.shardId();
|
||||
SourceToParse sourceToParse = SourceToParse.source(SourceToParse.Origin.REPLICA, shardId.getIndexName(), request.type(), request.id(), request.source())
|
||||
.routing(request.routing()).parent(request.parent()).timestamp(request.timestamp()).ttl(request.ttl());
|
||||
|
||||
final Engine.Index operation = indexShard.prepareIndexOnReplica(sourceToParse, request.version(), request.versionType(), request.getAutoGeneratedTimestamp(), request.isRetry());
|
||||
final Engine.Index operation;
|
||||
try {
|
||||
operation = replica.prepareIndexOnReplica(sourceToParse, request.version(), request.versionType(), request.getAutoGeneratedTimestamp(), request.isRetry());
|
||||
} catch (MapperParsingException e) {
|
||||
return new Engine.IndexResult(e, request.version());
|
||||
}
|
||||
Mapping update = operation.parsedDoc().dynamicMappingsUpdate();
|
||||
if (update != null) {
|
||||
throw new RetryOnReplicaException(shardId, "Mappings are not available on the replica yet, triggered update: " + update);
|
||||
}
|
||||
indexShard.index(operation);
|
||||
return operation;
|
||||
return replica.index(operation);
|
||||
}
|
||||
|
||||
/** Utility method to prepare an index operation on primary shards */
|
||||
public static Engine.Index prepareIndexOperationOnPrimary(IndexRequest request, IndexShard indexShard) {
|
||||
static Engine.Index prepareIndexOperationOnPrimary(IndexRequest request, IndexShard primary) {
|
||||
SourceToParse sourceToParse = SourceToParse.source(SourceToParse.Origin.PRIMARY, request.index(), request.type(), request.id(), request.source())
|
||||
.routing(request.routing()).parent(request.parent()).timestamp(request.timestamp()).ttl(request.ttl());
|
||||
return indexShard.prepareIndexOnPrimary(sourceToParse, request.version(), request.versionType(), request.getAutoGeneratedTimestamp(), request.isRetry());
|
||||
return primary.prepareIndexOnPrimary(sourceToParse, request.version(), request.versionType(), request.getAutoGeneratedTimestamp(), request.isRetry());
|
||||
}
|
||||
|
||||
public static WriteResult<IndexResponse> executeIndexRequestOnPrimary(IndexRequest request, IndexShard indexShard,
|
||||
public static Engine.IndexResult executeIndexRequestOnPrimary(IndexRequest request, IndexShard primary,
|
||||
MappingUpdatedAction mappingUpdatedAction) throws Exception {
|
||||
Engine.Index operation = prepareIndexOperationOnPrimary(request, indexShard);
|
||||
Engine.Index operation;
|
||||
try {
|
||||
operation = prepareIndexOperationOnPrimary(request, primary);
|
||||
} catch (MapperParsingException | IllegalArgumentException e) {
|
||||
return new Engine.IndexResult(e, request.version());
|
||||
}
|
||||
Mapping update = operation.parsedDoc().dynamicMappingsUpdate();
|
||||
final ShardId shardId = indexShard.shardId();
|
||||
final ShardId shardId = primary.shardId();
|
||||
if (update != null) {
|
||||
mappingUpdatedAction.updateMappingOnMaster(shardId.getIndex(), request.type(), update);
|
||||
operation = prepareIndexOperationOnPrimary(request, indexShard);
|
||||
// can throw timeout exception when updating mappings or ISE for attempting to update default mappings
|
||||
// which are bubbled up
|
||||
try {
|
||||
mappingUpdatedAction.updateMappingOnMaster(shardId.getIndex(), request.type(), update);
|
||||
} catch (IllegalArgumentException e) {
|
||||
// throws IAE on conflicts merging dynamic mappings
|
||||
return new Engine.IndexResult(e, request.version());
|
||||
}
|
||||
try {
|
||||
operation = prepareIndexOperationOnPrimary(request, primary);
|
||||
} catch (MapperParsingException | IllegalArgumentException e) {
|
||||
return new Engine.IndexResult(e, request.version());
|
||||
}
|
||||
update = operation.parsedDoc().dynamicMappingsUpdate();
|
||||
if (update != null) {
|
||||
throw new ReplicationOperation.RetryOnPrimaryException(shardId,
|
||||
"Dynamic mappings are not available on the node that holds the primary yet");
|
||||
}
|
||||
}
|
||||
indexShard.index(operation);
|
||||
|
||||
// update the version on request so it will happen on the replicas
|
||||
final long version = operation.version();
|
||||
request.version(version);
|
||||
request.versionType(request.versionType().versionTypeForReplicationAndRecovery());
|
||||
|
||||
assert request.versionType().validateVersionForWrites(request.version());
|
||||
|
||||
IndexResponse response = new IndexResponse(shardId, request.type(), request.id(), request.version(), operation.isCreated());
|
||||
return new WriteResult<>(response, operation.getTranslogLocation());
|
||||
Engine.IndexResult result = primary.index(operation);
|
||||
if (result.hasFailure() == false) {
|
||||
// update the version on request so it will happen on the replicas
|
||||
final long version = result.getVersion();
|
||||
request.version(version);
|
||||
request.versionType(request.versionType().versionTypeForReplicationAndRecovery());
|
||||
assert request.versionType().validateVersionForWrites(request.version());
|
||||
}
|
||||
return result;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -32,4 +32,12 @@ public class DeletePipelineRequestBuilder extends ActionRequestBuilder<DeletePip
|
|||
super(client, action, new DeletePipelineRequest(id));
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the id of the pipeline to delete.
|
||||
*/
|
||||
public DeletePipelineRequestBuilder setId(String id) {
|
||||
request.setId(id);
|
||||
return this;
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -138,17 +138,8 @@ public abstract class TransportAction<Request extends ActionRequest<Request>, Re
|
|||
listener = new TaskResultStoringActionListener<>(taskManager, task, listener);
|
||||
}
|
||||
|
||||
if (filters.length == 0) {
|
||||
try {
|
||||
doExecute(task, request, listener);
|
||||
} catch(Exception e) {
|
||||
logger.trace("Error during transport action execution.", e);
|
||||
listener.onFailure(e);
|
||||
}
|
||||
} else {
|
||||
RequestFilterChain<Request, Response> requestFilterChain = new RequestFilterChain<>(this, logger);
|
||||
requestFilterChain.proceed(task, actionName, request, listener);
|
||||
}
|
||||
RequestFilterChain<Request, Response> requestFilterChain = new RequestFilterChain<>(this, logger);
|
||||
requestFilterChain.proceed(task, actionName, request, listener);
|
||||
}
|
||||
|
||||
protected void doExecute(Task task, Request request, ActionListener<Response> listener) {
|
||||
|
|
|
@ -32,6 +32,7 @@ import org.elasticsearch.cluster.routing.AllocationId;
|
|||
import org.elasticsearch.cluster.routing.IndexRoutingTable;
|
||||
import org.elasticsearch.cluster.routing.IndexShardRoutingTable;
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.util.set.Sets;
|
||||
import org.elasticsearch.index.engine.VersionConflictEngineException;
|
||||
|
@ -112,22 +113,24 @@ public class ReplicationOperation<
|
|||
pendingActions.incrementAndGet();
|
||||
primaryResult = primary.perform(request);
|
||||
final ReplicaRequest replicaRequest = primaryResult.replicaRequest();
|
||||
assert replicaRequest.primaryTerm() > 0 : "replicaRequest doesn't have a primary term";
|
||||
if (logger.isTraceEnabled()) {
|
||||
logger.trace("[{}] op [{}] completed on primary for request [{}]", primaryId, opType, request);
|
||||
if (replicaRequest != null) {
|
||||
assert replicaRequest.primaryTerm() > 0 : "replicaRequest doesn't have a primary term";
|
||||
if (logger.isTraceEnabled()) {
|
||||
logger.trace("[{}] op [{}] completed on primary for request [{}]", primaryId, opType, request);
|
||||
}
|
||||
|
||||
// we have to get a new state after successfully indexing into the primary in order to honour recovery semantics.
|
||||
// we have to make sure that every operation indexed into the primary after recovery start will also be replicated
|
||||
// to the recovery target. If we use an old cluster state, we may miss a relocation that has started since then.
|
||||
ClusterState clusterState = clusterStateSupplier.get();
|
||||
final List<ShardRouting> shards = getShards(primaryId, clusterState);
|
||||
Set<String> inSyncAllocationIds = getInSyncAllocationIds(primaryId, clusterState);
|
||||
|
||||
markUnavailableShardsAsStale(replicaRequest, inSyncAllocationIds, shards);
|
||||
|
||||
performOnReplicas(replicaRequest, shards);
|
||||
}
|
||||
|
||||
// we have to get a new state after successfully indexing into the primary in order to honour recovery semantics.
|
||||
// we have to make sure that every operation indexed into the primary after recovery start will also be replicated
|
||||
// to the recovery target. If we use an old cluster state, we may miss a relocation that has started since then.
|
||||
ClusterState clusterState = clusterStateSupplier.get();
|
||||
final List<ShardRouting> shards = getShards(primaryId, clusterState);
|
||||
Set<String> inSyncAllocationIds = getInSyncAllocationIds(primaryId, clusterState);
|
||||
|
||||
markUnavailableShardsAsStale(replicaRequest, inSyncAllocationIds, shards);
|
||||
|
||||
performOnReplicas(replicaRequest, shards);
|
||||
|
||||
successfulShards.incrementAndGet();
|
||||
decPendingAndFinishIfNeeded();
|
||||
}
|
||||
|
@ -419,7 +422,11 @@ public class ReplicationOperation<
|
|||
|
||||
public interface PrimaryResult<R extends ReplicationRequest<R>> {
|
||||
|
||||
R replicaRequest();
|
||||
/**
|
||||
* @return null if no operation needs to be sent to a replica
|
||||
* (for example when the operation failed on the primary due to a parsing exception)
|
||||
*/
|
||||
@Nullable R replicaRequest();
|
||||
|
||||
void setShardInfo(ReplicationResponse.ShardInfo shardInfo);
|
||||
}
|
||||
|
|
|
@ -24,7 +24,6 @@ import org.elasticsearch.ExceptionsHelper;
|
|||
import org.elasticsearch.action.ActionResponse;
|
||||
import org.elasticsearch.action.ShardOperationFailedException;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.io.stream.Streamable;
|
||||
|
@ -34,6 +33,7 @@ import org.elasticsearch.index.shard.ShardId;
|
|||
import org.elasticsearch.rest.RestStatus;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Arrays;
|
||||
|
||||
/**
|
||||
* Base class for write action responses.
|
||||
|
@ -162,7 +162,11 @@ public class ReplicationResponse extends ActionResponse {
|
|||
|
||||
@Override
|
||||
public String toString() {
|
||||
return Strings.toString(this);
|
||||
return "ShardInfo{" +
|
||||
"total=" + total +
|
||||
", successful=" + successful +
|
||||
", failures=" + Arrays.toString(failures) +
|
||||
'}';
|
||||
}
|
||||
|
||||
public static ShardInfo readShardInfo(StreamInput in) throws IOException {
|
||||
|
|
|
@ -24,6 +24,7 @@ import org.elasticsearch.ElasticsearchException;
|
|||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.ActionListenerResponseHandler;
|
||||
import org.elasticsearch.action.UnavailableShardsException;
|
||||
import org.elasticsearch.action.admin.indices.flush.ShardFlushRequest;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.ActiveShardCount;
|
||||
import org.elasticsearch.action.support.TransportAction;
|
||||
|
@ -178,7 +179,7 @@ public abstract class TransportReplicationAction<
|
|||
* @param shardRequest the request to the replica shard
|
||||
* @param replica the replica shard to perform the operation on
|
||||
*/
|
||||
protected abstract ReplicaResult shardOperationOnReplica(ReplicaRequest shardRequest, IndexShard replica);
|
||||
protected abstract ReplicaResult shardOperationOnReplica(ReplicaRequest shardRequest, IndexShard replica) throws Exception;
|
||||
|
||||
/**
|
||||
* Cluster level block to check before request execution
|
||||
|
@ -207,7 +208,7 @@ public abstract class TransportReplicationAction<
|
|||
|
||||
protected boolean retryPrimaryException(final Throwable e) {
|
||||
return e.getClass() == ReplicationOperation.RetryOnPrimaryException.class
|
||||
|| TransportActions.isShardNotAvailableException(e);
|
||||
|| TransportActions.isShardNotAvailableException(e);
|
||||
}
|
||||
|
||||
class OperationTransportHandler implements TransportRequestHandler<Request> {
|
||||
|
@ -310,17 +311,10 @@ public abstract class TransportReplicationAction<
|
|||
final IndexMetaData indexMetaData = clusterService.state().getMetaData().index(request.shardId().getIndex());
|
||||
final boolean executeOnReplicas = (indexMetaData == null) || shouldExecuteReplication(indexMetaData.getSettings());
|
||||
final ActionListener<Response> listener = createResponseListener(primaryShardReference);
|
||||
createReplicatedOperation(request, new ActionListener<PrimaryResult>() {
|
||||
@Override
|
||||
public void onResponse(PrimaryResult result) {
|
||||
result.respond(listener);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
listener.onFailure(e);
|
||||
}
|
||||
}, primaryShardReference, executeOnReplicas).execute();
|
||||
createReplicatedOperation(request,
|
||||
ActionListener.wrap(result -> result.respond(listener), listener::onFailure),
|
||||
primaryShardReference, executeOnReplicas)
|
||||
.execute();
|
||||
}
|
||||
} catch (Exception e) {
|
||||
Releasables.closeWhileHandlingException(primaryShardReference); // release shard operation lock before responding to caller
|
||||
|
@ -376,11 +370,24 @@ public abstract class TransportReplicationAction<
|
|||
|
||||
protected class PrimaryResult implements ReplicationOperation.PrimaryResult<ReplicaRequest> {
|
||||
final ReplicaRequest replicaRequest;
|
||||
final Response finalResponse;
|
||||
final Response finalResponseIfSuccessful;
|
||||
final Exception finalFailure;
|
||||
|
||||
public PrimaryResult(ReplicaRequest replicaRequest, Response finalResponse) {
|
||||
/**
|
||||
* Result of executing a primary operation
|
||||
* expects <code>finalResponseIfSuccessful</code> or <code>finalFailure</code> to be not-null
|
||||
*/
|
||||
public PrimaryResult(ReplicaRequest replicaRequest, Response finalResponseIfSuccessful, Exception finalFailure) {
|
||||
assert finalFailure != null ^ finalResponseIfSuccessful != null
|
||||
: "either a response or a failure has to be not null, " +
|
||||
"found [" + finalFailure + "] failure and ["+ finalResponseIfSuccessful + "] response";
|
||||
this.replicaRequest = replicaRequest;
|
||||
this.finalResponse = finalResponse;
|
||||
this.finalResponseIfSuccessful = finalResponseIfSuccessful;
|
||||
this.finalFailure = finalFailure;
|
||||
}
|
||||
|
||||
public PrimaryResult(ReplicaRequest replicaRequest, Response replicationResponse) {
|
||||
this(replicaRequest, replicationResponse, null);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -390,22 +397,37 @@ public abstract class TransportReplicationAction<
|
|||
|
||||
@Override
|
||||
public void setShardInfo(ReplicationResponse.ShardInfo shardInfo) {
|
||||
finalResponse.setShardInfo(shardInfo);
|
||||
if (finalResponseIfSuccessful != null) {
|
||||
finalResponseIfSuccessful.setShardInfo(shardInfo);
|
||||
}
|
||||
}
|
||||
|
||||
public void respond(ActionListener<Response> listener) {
|
||||
listener.onResponse(finalResponse);
|
||||
if (finalResponseIfSuccessful != null) {
|
||||
listener.onResponse(finalResponseIfSuccessful);
|
||||
} else {
|
||||
listener.onFailure(finalFailure);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
protected class ReplicaResult {
|
||||
/**
|
||||
* Public constructor so subclasses can call it.
|
||||
*/
|
||||
public ReplicaResult() {}
|
||||
final Exception finalFailure;
|
||||
|
||||
public ReplicaResult(Exception finalFailure) {
|
||||
this.finalFailure = finalFailure;
|
||||
}
|
||||
|
||||
public ReplicaResult() {
|
||||
this(null);
|
||||
}
|
||||
|
||||
public void respond(ActionListener<TransportResponse.Empty> listener) {
|
||||
listener.onResponse(TransportResponse.Empty.INSTANCE);
|
||||
if (finalFailure == null) {
|
||||
listener.onResponse(TransportResponse.Empty.INSTANCE);
|
||||
} else {
|
||||
listener.onFailure(finalFailure);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -481,6 +503,7 @@ public abstract class TransportReplicationAction<
|
|||
transportReplicaAction,
|
||||
request),
|
||||
e);
|
||||
request.onRetry();
|
||||
final ThreadContext.StoredContext context = threadPool.getThreadContext().newStoredContext();
|
||||
observer.waitForNextChange(new ClusterStateObserver.Listener() {
|
||||
@Override
|
||||
|
@ -902,7 +925,9 @@ public abstract class TransportReplicationAction<
|
|||
@Override
|
||||
public PrimaryResult perform(Request request) throws Exception {
|
||||
PrimaryResult result = shardOperationOnPrimary(request, indexShard);
|
||||
result.replicaRequest().primaryTerm(indexShard.getPrimaryTerm());
|
||||
if (result.replicaRequest() != null) {
|
||||
result.replicaRequest().primaryTerm(indexShard.getPrimaryTerm());
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
|
|
|
@ -44,81 +44,63 @@ import java.util.function.Supplier;
|
|||
|
||||
/**
|
||||
* Base class for transport actions that modify data in some shard like index, delete, and shardBulk.
|
||||
* Allows performing async actions (e.g. refresh) after performing write operations on primary and replica shards
|
||||
*/
|
||||
public abstract class TransportWriteAction<
|
||||
Request extends ReplicatedWriteRequest<Request>,
|
||||
ReplicaRequest extends ReplicatedWriteRequest<ReplicaRequest>,
|
||||
Response extends ReplicationResponse & WriteResponse
|
||||
> extends TransportReplicationAction<Request, Request, Response> {
|
||||
> extends TransportReplicationAction<Request, ReplicaRequest, Response> {
|
||||
|
||||
protected TransportWriteAction(Settings settings, String actionName, TransportService transportService,
|
||||
ClusterService clusterService, IndicesService indicesService, ThreadPool threadPool, ShardStateAction shardStateAction,
|
||||
ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, Supplier<Request> request,
|
||||
String executor) {
|
||||
Supplier<ReplicaRequest> replicaRequest, String executor) {
|
||||
super(settings, actionName, transportService, clusterService, indicesService, threadPool, shardStateAction, actionFilters,
|
||||
indexNameExpressionResolver, request, request, executor);
|
||||
indexNameExpressionResolver, request, replicaRequest, executor);
|
||||
}
|
||||
|
||||
/**
|
||||
* Called on the primary with a reference to the {@linkplain IndexShard} to modify.
|
||||
*/
|
||||
protected abstract WriteResult<Response> onPrimaryShard(Request request, IndexShard indexShard) throws Exception;
|
||||
|
||||
/**
|
||||
* Called once per replica with a reference to the {@linkplain IndexShard} to modify.
|
||||
* Called on the primary with a reference to the primary {@linkplain IndexShard} to modify.
|
||||
*
|
||||
* @return the translog location of the {@linkplain IndexShard} after the write was completed or null if no write occurred
|
||||
* @return the result of the operation on primary, including current translog location and operation response and failure
|
||||
* async refresh is performed on the <code>primary</code> shard according to the <code>Request</code> refresh policy
|
||||
*/
|
||||
protected abstract Translog.Location onReplicaShard(Request request, IndexShard indexShard);
|
||||
|
||||
@Override
|
||||
protected final WritePrimaryResult shardOperationOnPrimary(Request request, IndexShard primary) throws Exception {
|
||||
WriteResult<Response> result = onPrimaryShard(request, primary);
|
||||
return new WritePrimaryResult(request, result.getResponse(), result.getLocation(), primary);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected final WriteReplicaResult shardOperationOnReplica(Request request, IndexShard replica) {
|
||||
Translog.Location location = onReplicaShard(request, replica);
|
||||
return new WriteReplicaResult(replica, request, location);
|
||||
}
|
||||
protected abstract WritePrimaryResult shardOperationOnPrimary(Request request, IndexShard primary) throws Exception;
|
||||
|
||||
/**
|
||||
* Simple result from a write action. Write actions have static method to return these so they can integrate with bulk.
|
||||
* Called once per replica with a reference to the replica {@linkplain IndexShard} to modify.
|
||||
*
|
||||
* @return the result of the operation on replica, including current translog location and operation response and failure
|
||||
* async refresh is performed on the <code>replica</code> shard according to the <code>ReplicaRequest</code> refresh policy
|
||||
*/
|
||||
public static class WriteResult<Response extends ReplicationResponse> {
|
||||
private final Response response;
|
||||
private final Translog.Location location;
|
||||
|
||||
public WriteResult(Response response, @Nullable Location location) {
|
||||
this.response = response;
|
||||
this.location = location;
|
||||
}
|
||||
|
||||
public Response getResponse() {
|
||||
return response;
|
||||
}
|
||||
|
||||
public Translog.Location getLocation() {
|
||||
return location;
|
||||
}
|
||||
}
|
||||
@Override
|
||||
protected abstract WriteReplicaResult shardOperationOnReplica(ReplicaRequest request, IndexShard replica) throws Exception;
|
||||
|
||||
/**
|
||||
* Result of taking the action on the primary.
|
||||
*/
|
||||
class WritePrimaryResult extends PrimaryResult implements RespondingWriteResult {
|
||||
protected class WritePrimaryResult extends PrimaryResult implements RespondingWriteResult {
|
||||
boolean finishedAsyncActions;
|
||||
ActionListener<Response> listener = null;
|
||||
|
||||
public WritePrimaryResult(Request request, Response finalResponse,
|
||||
@Nullable Translog.Location location,
|
||||
IndexShard indexShard) {
|
||||
super(request, finalResponse);
|
||||
/*
|
||||
* We call this before replication because this might wait for a refresh and that can take a while. This way we wait for the
|
||||
* refresh in parallel on the primary and on the replica.
|
||||
*/
|
||||
new AsyncAfterWriteAction(indexShard, request, location, this, logger).run();
|
||||
public WritePrimaryResult(ReplicaRequest request, @Nullable Response finalResponse,
|
||||
@Nullable Location location, @Nullable Exception operationFailure,
|
||||
IndexShard primary) {
|
||||
super(request, finalResponse, operationFailure);
|
||||
assert location == null || operationFailure == null
|
||||
: "expected either failure to be null or translog location to be null, " +
|
||||
"but found: [" + location + "] translog location and [" + operationFailure + "] failure";
|
||||
if (operationFailure != null) {
|
||||
this.finishedAsyncActions = true;
|
||||
} else {
|
||||
/*
|
||||
* We call this before replication because this might wait for a refresh and that can take a while.
|
||||
* This way we wait for the refresh in parallel on the primary and on the replica.
|
||||
*/
|
||||
new AsyncAfterWriteAction(primary, request, location, this, logger).run();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -147,7 +129,7 @@ public abstract class TransportWriteAction<
|
|||
|
||||
@Override
|
||||
public synchronized void onSuccess(boolean forcedRefresh) {
|
||||
finalResponse.setForcedRefresh(forcedRefresh);
|
||||
finalResponseIfSuccessful.setForcedRefresh(forcedRefresh);
|
||||
finishedAsyncActions = true;
|
||||
respondIfPossible(null);
|
||||
}
|
||||
|
@ -156,12 +138,18 @@ public abstract class TransportWriteAction<
|
|||
/**
|
||||
* Result of taking the action on the replica.
|
||||
*/
|
||||
class WriteReplicaResult extends ReplicaResult implements RespondingWriteResult {
|
||||
protected class WriteReplicaResult extends ReplicaResult implements RespondingWriteResult {
|
||||
boolean finishedAsyncActions;
|
||||
private ActionListener<TransportResponse.Empty> listener;
|
||||
|
||||
public WriteReplicaResult(IndexShard indexShard, ReplicatedWriteRequest<?> request, Translog.Location location) {
|
||||
new AsyncAfterWriteAction(indexShard, request, location, this, logger).run();
|
||||
public WriteReplicaResult(ReplicaRequest request, @Nullable Location location,
|
||||
@Nullable Exception operationFailure, IndexShard replica) {
|
||||
super(operationFailure);
|
||||
if (operationFailure != null) {
|
||||
this.finishedAsyncActions = true;
|
||||
} else {
|
||||
new AsyncAfterWriteAction(replica, request, location, this, logger).run();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -563,6 +563,11 @@ public interface ClusterAdminClient extends ElasticsearchClient {
|
|||
*/
|
||||
DeletePipelineRequestBuilder prepareDeletePipeline();
|
||||
|
||||
/**
|
||||
* Deletes a stored ingest pipeline
|
||||
*/
|
||||
DeletePipelineRequestBuilder prepareDeletePipeline(String id);
|
||||
|
||||
/**
|
||||
* Returns a stored ingest pipeline
|
||||
*/
|
||||
|
|
|
@ -1096,6 +1096,11 @@ public abstract class AbstractClient extends AbstractComponent implements Client
|
|||
return new DeletePipelineRequestBuilder(this, DeletePipelineAction.INSTANCE);
|
||||
}
|
||||
|
||||
@Override
|
||||
public DeletePipelineRequestBuilder prepareDeletePipeline(String id) {
|
||||
return new DeletePipelineRequestBuilder(this, DeletePipelineAction.INSTANCE, id);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void getPipeline(GetPipelineRequest request, ActionListener<GetPipelineResponse> listener) {
|
||||
execute(GetPipelineAction.INSTANCE, request, listener);
|
||||
|
|
|
@ -274,15 +274,16 @@ public class ClusterState implements ToXContent, Diffable<ClusterState> {
|
|||
return routingNodes;
|
||||
}
|
||||
|
||||
public String prettyPrint() {
|
||||
@Override
|
||||
public String toString() {
|
||||
StringBuilder sb = new StringBuilder();
|
||||
sb.append("cluster uuid: ").append(metaData.clusterUUID()).append("\n");
|
||||
sb.append("version: ").append(version).append("\n");
|
||||
sb.append("state uuid: ").append(stateUUID).append("\n");
|
||||
sb.append("from_diff: ").append(wasReadFromDiff).append("\n");
|
||||
sb.append("meta data version: ").append(metaData.version()).append("\n");
|
||||
final String TAB = " ";
|
||||
for (IndexMetaData indexMetaData : metaData) {
|
||||
final String TAB = " ";
|
||||
sb.append(TAB).append(indexMetaData.getIndex());
|
||||
sb.append(": v[").append(indexMetaData.getVersion()).append("]\n");
|
||||
for (int shard = 0; shard < indexMetaData.getNumberOfShards(); shard++) {
|
||||
|
@ -291,24 +292,19 @@ public class ClusterState implements ToXContent, Diffable<ClusterState> {
|
|||
sb.append("isa_ids ").append(indexMetaData.inSyncAllocationIds(shard)).append("\n");
|
||||
}
|
||||
}
|
||||
sb.append(blocks().prettyPrint());
|
||||
sb.append(nodes().prettyPrint());
|
||||
sb.append(routingTable().prettyPrint());
|
||||
sb.append(getRoutingNodes().prettyPrint());
|
||||
return sb.toString();
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
try {
|
||||
XContentBuilder builder = XContentFactory.jsonBuilder().prettyPrint();
|
||||
builder.startObject();
|
||||
toXContent(builder, EMPTY_PARAMS);
|
||||
builder.endObject();
|
||||
return builder.string();
|
||||
} catch (IOException e) {
|
||||
return "{ \"error\" : \"" + e.getMessage() + "\"}";
|
||||
sb.append(blocks());
|
||||
sb.append(nodes());
|
||||
sb.append(routingTable());
|
||||
sb.append(getRoutingNodes());
|
||||
if (customs.isEmpty() == false) {
|
||||
sb.append("customs:\n");
|
||||
for (ObjectObjectCursor<String, Custom> cursor : customs) {
|
||||
final String type = cursor.key;
|
||||
final Custom custom = cursor.value;
|
||||
sb.append(TAB).append(type).append(": ").append(custom);
|
||||
}
|
||||
}
|
||||
return sb.toString();
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -36,6 +36,7 @@ import org.elasticsearch.discovery.zen.NodesFaultDetection;
|
|||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
import java.util.List;
|
||||
import java.util.concurrent.ConcurrentMap;
|
||||
import java.util.concurrent.ScheduledFuture;
|
||||
|
||||
|
@ -75,10 +76,10 @@ public class NodeConnectionsService extends AbstractLifecycleComponent {
|
|||
this.reconnectInterval = NodeConnectionsService.CLUSTER_NODE_RECONNECT_INTERVAL_SETTING.get(settings);
|
||||
}
|
||||
|
||||
public void connectToAddedNodes(ClusterChangedEvent event) {
|
||||
public void connectToNodes(List<DiscoveryNode> addedNodes) {
|
||||
|
||||
// TODO: do this in parallel (and wait)
|
||||
for (final DiscoveryNode node : event.nodesDelta().addedNodes()) {
|
||||
for (final DiscoveryNode node : addedNodes) {
|
||||
try (Releasable ignored = nodeLocks.acquire(node)) {
|
||||
Integer current = nodes.put(node, 0);
|
||||
assert current == null : "node " + node + " was added in event but already in internal nodes";
|
||||
|
@ -87,8 +88,8 @@ public class NodeConnectionsService extends AbstractLifecycleComponent {
|
|||
}
|
||||
}
|
||||
|
||||
public void disconnectFromRemovedNodes(ClusterChangedEvent event) {
|
||||
for (final DiscoveryNode node : event.nodesDelta().removedNodes()) {
|
||||
public void disconnectFromNodes(List<DiscoveryNode> removedNodes) {
|
||||
for (final DiscoveryNode node : removedNodes) {
|
||||
try (Releasable ignored = nodeLocks.acquire(node)) {
|
||||
Integer current = nodes.remove(node);
|
||||
assert current != null : "node " + node + " was removed in event but not in internal nodes";
|
||||
|
|
|
@ -164,7 +164,7 @@ public class ShardStateAction extends AbstractComponent {
|
|||
@Override
|
||||
public void onNewClusterState(ClusterState state) {
|
||||
if (logger.isTraceEnabled()) {
|
||||
logger.trace("new cluster state [{}] after waiting for master election to fail shard entry [{}]", state.prettyPrint(), shardEntry);
|
||||
logger.trace("new cluster state [{}] after waiting for master election to fail shard entry [{}]", state, shardEntry);
|
||||
}
|
||||
sendShardAction(actionName, observer, shardEntry, listener);
|
||||
}
|
||||
|
|
|
@ -199,7 +199,8 @@ public class ClusterBlocks extends AbstractDiffable<ClusterBlocks> {
|
|||
return new ClusterBlockException(unmodifiableSet(blocks.collect(toSet())));
|
||||
}
|
||||
|
||||
public String prettyPrint() {
|
||||
@Override
|
||||
public String toString() {
|
||||
if (global.isEmpty() && indices().isEmpty()) {
|
||||
return "";
|
||||
}
|
||||
|
|
|
@ -97,10 +97,29 @@ public class MetaData implements Iterable<IndexMetaData>, Diffable<MetaData>, Fr
|
|||
SNAPSHOT
|
||||
}
|
||||
|
||||
/**
|
||||
* Indicates that this custom metadata will be returned as part of an API call but will not be persisted
|
||||
*/
|
||||
public static EnumSet<XContentContext> API_ONLY = EnumSet.of(XContentContext.API);
|
||||
|
||||
/**
|
||||
* Indicates that this custom metadata will be returned as part of an API call and will be persisted between
|
||||
* node restarts, but will not be a part of a snapshot global state
|
||||
*/
|
||||
public static EnumSet<XContentContext> API_AND_GATEWAY = EnumSet.of(XContentContext.API, XContentContext.GATEWAY);
|
||||
|
||||
/**
|
||||
* Indicates that this custom metadata will be returned as part of an API call and stored as a part of
|
||||
* a snapshot global state, but will not be persisted between node restarts
|
||||
*/
|
||||
public static EnumSet<XContentContext> API_AND_SNAPSHOT = EnumSet.of(XContentContext.API, XContentContext.SNAPSHOT);
|
||||
|
||||
/**
|
||||
* Indicates that this custom metadata will be returned as part of an API call, stored as a part of
|
||||
* a snapshot global state, and will be persisted between node restarts
|
||||
*/
|
||||
public static EnumSet<XContentContext> ALL_CONTEXTS = EnumSet.allOf(XContentContext.class);
|
||||
|
||||
public interface Custom extends Diffable<Custom>, ToXContent {
|
||||
|
||||
String type();
|
||||
|
|
|
@ -352,7 +352,9 @@ public class MetaDataCreateIndexService extends AbstractComponent {
|
|||
throw mpe;
|
||||
}
|
||||
|
||||
final QueryShardContext queryShardContext = indexService.newQueryShardContext();
|
||||
// the context is only used for validation so it's fine to pass fake values for the shard id and the current
|
||||
// timestamp
|
||||
final QueryShardContext queryShardContext = indexService.newQueryShardContext(0, null, () -> 0L);
|
||||
for (Alias alias : request.aliases()) {
|
||||
if (Strings.hasLength(alias.filter())) {
|
||||
aliasValidator.validateAliasFilter(alias.name(), alias.filter(), queryShardContext);
|
||||
|
|
|
@ -149,7 +149,9 @@ public class MetaDataIndexAliasesService extends AbstractComponent {
|
|||
}
|
||||
indices.put(action.getIndex(), indexService);
|
||||
}
|
||||
aliasValidator.validateAliasFilter(alias, filter, indexService.newQueryShardContext());
|
||||
// the context is only used for validation so it's fine to pass fake values for the shard id and the current
|
||||
// timestamp
|
||||
aliasValidator.validateAliasFilter(alias, filter, indexService.newQueryShardContext(0, null, () -> 0L));
|
||||
}
|
||||
};
|
||||
changed |= action.apply(newAliasValidator, metadata, index);
|
||||
|
|
|
@ -26,7 +26,6 @@ import org.elasticsearch.Version;
|
|||
import org.elasticsearch.cluster.AbstractDiffable;
|
||||
import org.elasticsearch.common.Booleans;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.collect.ImmutableOpenMap;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
|
@ -398,14 +397,6 @@ public class DiscoveryNodes extends AbstractDiffable<DiscoveryNodes> implements
|
|||
|
||||
@Override
|
||||
public String toString() {
|
||||
StringBuilder sb = new StringBuilder();
|
||||
sb.append("{");
|
||||
sb.append(Strings.collectionToDelimitedString(this, ","));
|
||||
sb.append("}");
|
||||
return sb.toString();
|
||||
}
|
||||
|
||||
public String prettyPrint() {
|
||||
StringBuilder sb = new StringBuilder();
|
||||
sb.append("nodes: \n");
|
||||
for (DiscoveryNode node : this) {
|
||||
|
|
|
@ -391,7 +391,8 @@ public class RoutingNodes implements Iterable<RoutingNode> {
|
|||
return shards;
|
||||
}
|
||||
|
||||
public String prettyPrint() {
|
||||
@Override
|
||||
public String toString() {
|
||||
StringBuilder sb = new StringBuilder("routing_nodes:\n");
|
||||
for (RoutingNode routingNode : this) {
|
||||
sb.append(routingNode.prettyPrint());
|
||||
|
|
|
@ -109,7 +109,7 @@ public class RoutingService extends AbstractLifecycleComponent {
|
|||
rerouting.set(false);
|
||||
ClusterState state = clusterService.state();
|
||||
if (logger.isTraceEnabled()) {
|
||||
logger.error((Supplier<?>) () -> new ParameterizedMessage("unexpected failure during [{}], current state:\n{}", source, state.prettyPrint()), e);
|
||||
logger.error((Supplier<?>) () -> new ParameterizedMessage("unexpected failure during [{}], current state:\n{}", source, state), e);
|
||||
} else {
|
||||
logger.error((Supplier<?>) () -> new ParameterizedMessage("unexpected failure during [{}], current state version [{}]", source, state.version()), e);
|
||||
}
|
||||
|
@ -118,7 +118,7 @@ public class RoutingService extends AbstractLifecycleComponent {
|
|||
} catch (Exception e) {
|
||||
rerouting.set(false);
|
||||
ClusterState state = clusterService.state();
|
||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("failed to reroute routing table, current state:\n{}", state.prettyPrint()), e);
|
||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("failed to reroute routing table, current state:\n{}", state), e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -613,7 +613,8 @@ public class RoutingTable implements Iterable<IndexRoutingTable>, Diffable<Routi
|
|||
}
|
||||
}
|
||||
|
||||
public String prettyPrint() {
|
||||
@Override
|
||||
public String toString() {
|
||||
StringBuilder sb = new StringBuilder("routing_table (version ").append(version).append("):\n");
|
||||
for (ObjectObjectCursor<String, IndexRoutingTable> entry : indicesRouting) {
|
||||
sb.append(entry.value.prettyPrint()).append('\n');
|
||||
|
|
|
@ -233,7 +233,7 @@ public class IndexMetaDataUpdater extends RoutingChangesObserver.AbstractRouting
|
|||
Set<String> oldInSyncAllocations = oldIndexMetaData.inSyncAllocationIds(shardNumber);
|
||||
Set<String> idsToRemove = shardEntry.getValue().stream().map(e -> e.getAllocationId()).collect(Collectors.toSet());
|
||||
assert idsToRemove.stream().allMatch(id -> oldRoutingTable.getByAllocationId(shardEntry.getKey(), id) == null) :
|
||||
"removing stale ids: " + idsToRemove + ", some of which have still a routing entry: " + oldRoutingTable.prettyPrint();
|
||||
"removing stale ids: " + idsToRemove + ", some of which have still a routing entry: " + oldRoutingTable;
|
||||
Set<String> remainingInSyncAllocations = Sets.difference(oldInSyncAllocations, idsToRemove);
|
||||
assert remainingInSyncAllocations.isEmpty() == false : "Set of in-sync ids cannot become empty for shard " +
|
||||
shardEntry.getKey() + " (before: " + oldInSyncAllocations + ", ids to remove: " + idsToRemove + ")";
|
||||
|
|
|
@ -126,6 +126,18 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards
|
|||
balancer.balance();
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns a decision on rebalancing a single shard to form a more optimal cluster balance. This
|
||||
* method is not used in itself for cluster rebalancing because all shards from all indices are
|
||||
* taken into account when making rebalancing decisions. This method is only intended to be used
|
||||
* from the cluster allocation explain API to explain possible rebalancing decisions for a single
|
||||
* shard.
|
||||
*/
|
||||
public RebalanceDecision decideRebalance(final ShardRouting shard, final RoutingAllocation allocation) {
|
||||
assert allocation.debugDecision() : "debugDecision should be set in explain mode";
|
||||
return new Balancer(logger, allocation, weightFunction, threshold).decideRebalance(shard);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the currently configured delta threshold
|
||||
*/
|
||||
|
@ -267,11 +279,18 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards
|
|||
return new NodeSorter(nodesArray(), weight, this);
|
||||
}
|
||||
|
||||
/**
|
||||
* The absolute value difference between two weights.
|
||||
*/
|
||||
private static float absDelta(float lower, float higher) {
|
||||
assert higher >= lower : higher + " lt " + lower +" but was expected to be gte";
|
||||
return Math.abs(higher - lower);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns {@code true} iff the weight delta between two nodes is under a defined threshold.
|
||||
* See {@link #THRESHOLD_SETTING} for defining the threshold.
|
||||
*/
|
||||
private static boolean lessThan(float delta, float threshold) {
|
||||
/* deltas close to the threshold are "rounded" to the threshold manually
|
||||
to prevent floating point problems if the delta is very close to the
|
||||
|
@ -309,6 +328,110 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards
|
|||
balanceByWeights();
|
||||
}
|
||||
|
||||
/**
|
||||
* Makes a decision about moving a single shard to a different node to form a more
|
||||
* optimally balanced cluster. This method is invoked from the cluster allocation
|
||||
* explain API only.
|
||||
*/
|
||||
private RebalanceDecision decideRebalance(final ShardRouting shard) {
|
||||
if (shard.started() == false) {
|
||||
// cannot rebalance a shard that isn't started
|
||||
return RebalanceDecision.NOT_TAKEN;
|
||||
}
|
||||
|
||||
Decision canRebalance = allocation.deciders().canRebalance(shard, allocation);
|
||||
|
||||
if (allocation.hasPendingAsyncFetch()) {
|
||||
return new RebalanceDecision(
|
||||
canRebalance,
|
||||
Type.NO,
|
||||
"cannot rebalance due to in-flight shard store fetches, otherwise allocation may prematurely rebalance a shard to " +
|
||||
"a node that is soon to receive another shard assignment upon completion of the shard store fetch, " +
|
||||
"rendering the cluster imbalanced again"
|
||||
);
|
||||
}
|
||||
|
||||
sorter.reset(shard.getIndexName());
|
||||
ModelNode[] modelNodes = sorter.modelNodes;
|
||||
final String currentNodeId = shard.currentNodeId();
|
||||
// find currently assigned node
|
||||
ModelNode currentNode = null;
|
||||
for (ModelNode node : modelNodes) {
|
||||
if (node.getNodeId().equals(currentNodeId)) {
|
||||
currentNode = node;
|
||||
break;
|
||||
}
|
||||
}
|
||||
assert currentNode != null : "currently assigned node could not be found";
|
||||
|
||||
// balance the shard, if a better node can be found
|
||||
final float currentWeight = sorter.weight(currentNode);
|
||||
final AllocationDeciders deciders = allocation.deciders();
|
||||
final String idxName = shard.getIndexName();
|
||||
Map<String, NodeRebalanceDecision> nodeDecisions = new HashMap<>(modelNodes.length - 1);
|
||||
Type rebalanceDecisionType = Type.NO;
|
||||
String assignedNodeId = null;
|
||||
for (ModelNode node : modelNodes) {
|
||||
if (node == currentNode) {
|
||||
continue; // skip over node we're currently allocated to it
|
||||
}
|
||||
final Decision canAllocate = deciders.canAllocate(shard, node.getRoutingNode(), allocation);
|
||||
// the current weight of the node in the cluster, as computed by the weight function;
|
||||
// this is a comparison of the number of shards on this node to the number of shards
|
||||
// that should be on each node on average (both taking the cluster as a whole into account
|
||||
// as well as shards per index)
|
||||
final float nodeWeight = sorter.weight(node);
|
||||
// if the node we are examining has a worse (higher) weight than the node the shard is
|
||||
// assigned to, then there is no way moving the shard to the node with the worse weight
|
||||
// can make the balance of the cluster better, so we check for that here
|
||||
final boolean betterWeightThanCurrent = nodeWeight <= currentWeight;
|
||||
boolean rebalanceConditionsMet = false;
|
||||
boolean deltaAboveThreshold = false;
|
||||
float weightWithShardAdded = Float.POSITIVE_INFINITY;
|
||||
if (betterWeightThanCurrent) {
|
||||
// get the delta between the weights of the node we are checking and the node that holds the shard
|
||||
final float currentDelta = absDelta(nodeWeight, currentWeight);
|
||||
// checks if the weight delta is above a certain threshold; if it is not above a certain threshold,
|
||||
// then even though the node we are examining has a better weight and may make the cluster balance
|
||||
// more even, it doesn't make sense to execute the heavyweight operation of relocating a shard unless
|
||||
// the gains make it worth it, as defined by the threshold
|
||||
deltaAboveThreshold = lessThan(currentDelta, threshold) == false;
|
||||
// simulate the weight of the node if we were to relocate the shard to it
|
||||
weightWithShardAdded = weight.weightShardAdded(this, node, idxName);
|
||||
// calculate the delta of the weights of the two nodes if we were to add the shard to the
|
||||
// node in question and move it away from the node that currently holds it.
|
||||
final float proposedDelta = weightWithShardAdded - weight.weightShardRemoved(this, currentNode, idxName);
|
||||
rebalanceConditionsMet = deltaAboveThreshold && proposedDelta < currentDelta;
|
||||
// if the simulated weight delta with the shard moved away is better than the weight delta
|
||||
// with the shard remaining on the current node, and we are allowed to allocate to the
|
||||
// node in question, then allow the rebalance
|
||||
if (rebalanceConditionsMet && canAllocate.type().higherThan(rebalanceDecisionType)) {
|
||||
// rebalance to the node, only will get overwritten if the decision here is to
|
||||
// THROTTLE and we get a decision with YES on another node
|
||||
rebalanceDecisionType = canAllocate.type();
|
||||
assignedNodeId = node.getNodeId();
|
||||
}
|
||||
}
|
||||
nodeDecisions.put(node.getNodeId(), new NodeRebalanceDecision(
|
||||
rebalanceConditionsMet ? canAllocate.type() : Type.NO,
|
||||
canAllocate,
|
||||
betterWeightThanCurrent,
|
||||
deltaAboveThreshold,
|
||||
nodeWeight,
|
||||
weightWithShardAdded)
|
||||
);
|
||||
}
|
||||
|
||||
|
||||
if (canRebalance.type() != Type.YES) {
|
||||
return new RebalanceDecision(canRebalance, canRebalance.type(), "rebalancing is not allowed", null,
|
||||
nodeDecisions, currentWeight);
|
||||
} else {
|
||||
return RebalanceDecision.decision(canRebalance, rebalanceDecisionType, assignedNodeId,
|
||||
nodeDecisions, currentWeight, threshold);
|
||||
}
|
||||
}
|
||||
|
||||
public Map<DiscoveryNode, Float> weighShard(ShardRouting shard) {
|
||||
final ModelNode[] modelNodes = sorter.modelNodes;
|
||||
final float[] weights = sorter.weights;
|
||||
|
@ -539,7 +662,7 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards
|
|||
public MoveDecision makeMoveDecision(final ShardRouting shardRouting) {
|
||||
if (shardRouting.started() == false) {
|
||||
// we can only move started shards
|
||||
return MoveDecision.DECISION_NOT_TAKEN;
|
||||
return MoveDecision.NOT_TAKEN;
|
||||
}
|
||||
|
||||
final boolean explain = allocation.debugDecision();
|
||||
|
@ -1110,15 +1233,11 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards
|
|||
private final String finalExplanation;
|
||||
@Nullable
|
||||
private final String assignedNodeId;
|
||||
@Nullable
|
||||
private final Map<String, WeightedDecision> nodeDecisions;
|
||||
|
||||
protected RelocationDecision(Type finalDecision, String finalExplanation, String assignedNodeId,
|
||||
Map<String, WeightedDecision> nodeDecisions) {
|
||||
protected RelocationDecision(Type finalDecision, String finalExplanation, String assignedNodeId) {
|
||||
this.finalDecision = finalDecision;
|
||||
this.finalExplanation = finalExplanation;
|
||||
this.assignedNodeId = assignedNodeId;
|
||||
this.nodeDecisions = nodeDecisions;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1153,15 +1272,6 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards
|
|||
public String getAssignedNodeId() {
|
||||
return assignedNodeId;
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the individual node-level decisions that went into making the final decision as represented by
|
||||
* {@link #getFinalDecisionType()}. The map that is returned has the node id as the key and a {@link WeightedDecision}.
|
||||
*/
|
||||
@Nullable
|
||||
public Map<String, WeightedDecision> getNodeDecisions() {
|
||||
return nodeDecisions;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1169,18 +1279,21 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards
|
|||
*/
|
||||
public static final class MoveDecision extends RelocationDecision {
|
||||
/** a constant representing no decision taken */
|
||||
public static final MoveDecision DECISION_NOT_TAKEN = new MoveDecision(null, null, null, null, null);
|
||||
public static final MoveDecision NOT_TAKEN = new MoveDecision(null, null, null, null, null);
|
||||
/** cached decisions so we don't have to recreate objects for common decisions when not in explain mode. */
|
||||
private static final MoveDecision CACHED_STAY_DECISION = new MoveDecision(Decision.YES, Type.NO, null, null, null);
|
||||
private static final MoveDecision CACHED_CANNOT_MOVE_DECISION = new MoveDecision(Decision.NO, Type.NO, null, null, null);
|
||||
|
||||
@Nullable
|
||||
private final Decision canRemainDecision;
|
||||
@Nullable
|
||||
private final Map<String, WeightedDecision> nodeDecisions;
|
||||
|
||||
private MoveDecision(Decision canRemainDecision, Type finalDecision, String finalExplanation,
|
||||
String assignedNodeId, Map<String, WeightedDecision> nodeDecisions) {
|
||||
super(finalDecision, finalExplanation, assignedNodeId, nodeDecisions);
|
||||
super(finalDecision, finalExplanation, assignedNodeId);
|
||||
this.canRemainDecision = canRemainDecision;
|
||||
this.nodeDecisions = nodeDecisions != null ? Collections.unmodifiableMap(nodeDecisions) : null;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1250,6 +1363,147 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards
|
|||
public boolean cannotRemain() {
|
||||
return isDecisionTaken() && canRemainDecision.type() == Type.NO;
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the individual node-level decisions that went into making the final decision as represented by
|
||||
* {@link #getFinalDecisionType()}. The map that is returned has the node id as the key and a {@link WeightedDecision}.
|
||||
*/
|
||||
@Nullable
|
||||
public Map<String, WeightedDecision> getNodeDecisions() {
|
||||
return nodeDecisions;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Represents a decision to move a started shard to form a more optimally balanced cluster.
|
||||
*/
|
||||
public static final class RebalanceDecision extends RelocationDecision {
|
||||
/** a constant representing no decision taken */
|
||||
public static final RebalanceDecision NOT_TAKEN = new RebalanceDecision(null, null, null, null, null, Float.POSITIVE_INFINITY);
|
||||
|
||||
@Nullable
|
||||
private final Decision canRebalanceDecision;
|
||||
@Nullable
|
||||
private final Map<String, NodeRebalanceDecision> nodeDecisions;
|
||||
private float currentWeight;
|
||||
|
||||
protected RebalanceDecision(Decision canRebalanceDecision, Type finalDecision, String finalExplanation) {
|
||||
this(canRebalanceDecision, finalDecision, finalExplanation, null, null, Float.POSITIVE_INFINITY);
|
||||
}
|
||||
|
||||
protected RebalanceDecision(Decision canRebalanceDecision, Type finalDecision, String finalExplanation,
|
||||
String assignedNodeId, Map<String, NodeRebalanceDecision> nodeDecisions, float currentWeight) {
|
||||
super(finalDecision, finalExplanation, assignedNodeId);
|
||||
this.canRebalanceDecision = canRebalanceDecision;
|
||||
this.nodeDecisions = nodeDecisions != null ? Collections.unmodifiableMap(nodeDecisions) : null;
|
||||
this.currentWeight = currentWeight;
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a new {@link RebalanceDecision}, computing the explanation based on the decision parameters.
|
||||
*/
|
||||
public static RebalanceDecision decision(Decision canRebalanceDecision, Type finalDecision, String assignedNodeId,
|
||||
Map<String, NodeRebalanceDecision> nodeDecisions, float currentWeight, float threshold) {
|
||||
final String explanation = produceFinalExplanation(finalDecision, assignedNodeId, threshold);
|
||||
return new RebalanceDecision(canRebalanceDecision, finalDecision, explanation, assignedNodeId, nodeDecisions, currentWeight);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the decision for being allowed to rebalance the shard.
|
||||
*/
|
||||
@Nullable
|
||||
public Decision getCanRebalanceDecision() {
|
||||
return canRebalanceDecision;
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the individual node-level decisions that went into making the final decision as represented by
|
||||
* {@link #getFinalDecisionType()}. The map that is returned has the node id as the key and a {@link NodeRebalanceDecision}.
|
||||
*/
|
||||
@Nullable
|
||||
public Map<String, NodeRebalanceDecision> getNodeDecisions() {
|
||||
return nodeDecisions;
|
||||
}
|
||||
|
||||
private static String produceFinalExplanation(final Type finalDecisionType, final String assignedNodeId, final float threshold) {
|
||||
final String finalExplanation;
|
||||
if (assignedNodeId != null) {
|
||||
if (finalDecisionType == Type.THROTTLE) {
|
||||
finalExplanation = "throttle moving shard to node [" + assignedNodeId + "], as it is " +
|
||||
"currently busy with other shard relocations";
|
||||
} else {
|
||||
finalExplanation = "moving shard to node [" + assignedNodeId + "] to form a more balanced cluster";
|
||||
}
|
||||
} else {
|
||||
finalExplanation = "cannot rebalance shard, no other node exists that would form a more balanced " +
|
||||
"cluster within the defined threshold [" + threshold + "]";
|
||||
}
|
||||
return finalExplanation;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* A node-level explanation for the decision to rebalance a shard.
|
||||
*/
|
||||
public static final class NodeRebalanceDecision {
|
||||
private final Type nodeDecisionType;
|
||||
private final Decision canAllocate;
|
||||
private final boolean betterWeightThanCurrent;
|
||||
private final boolean deltaAboveThreshold;
|
||||
private final float currentWeight;
|
||||
private final float weightWithShardAdded;
|
||||
|
||||
NodeRebalanceDecision(Type nodeDecisionType, Decision canAllocate, boolean betterWeightThanCurrent,
|
||||
boolean deltaAboveThreshold, float currentWeight, float weightWithShardAdded) {
|
||||
this.nodeDecisionType = Objects.requireNonNull(nodeDecisionType);
|
||||
this.canAllocate = Objects.requireNonNull(canAllocate);
|
||||
this.betterWeightThanCurrent = betterWeightThanCurrent;
|
||||
this.deltaAboveThreshold = deltaAboveThreshold;
|
||||
this.currentWeight = currentWeight;
|
||||
this.weightWithShardAdded = weightWithShardAdded;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the decision to rebalance to the node.
|
||||
*/
|
||||
public Type getNodeDecisionType() {
|
||||
return nodeDecisionType;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns whether the shard is allowed to be allocated to the node.
|
||||
*/
|
||||
public Decision getCanAllocateDecision() {
|
||||
return canAllocate;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns whether the weight of the node is better than the weight of the node where the shard currently resides.
|
||||
*/
|
||||
public boolean isBetterWeightThanCurrent() {
|
||||
return betterWeightThanCurrent;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns if the weight delta by assigning to this node was above the threshold to warrant a rebalance.
|
||||
*/
|
||||
public boolean isDeltaAboveThreshold() {
|
||||
return deltaAboveThreshold;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the current weight of the node if the shard is not added to the node.
|
||||
*/
|
||||
public float getCurrentWeight() {
|
||||
return currentWeight;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the weight of the node if the shard is added to the node.
|
||||
*/
|
||||
public float getWeightWithShardAdded() {
|
||||
return weightWithShardAdded;
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -222,7 +222,7 @@ public abstract class Decision implements ToXContent {
|
|||
}
|
||||
|
||||
/**
|
||||
* Returns the explanation string, fully formatted. Only formats the string once
|
||||
* Returns the explanation string, fully formatted. Only formats the string once.
|
||||
*/
|
||||
@Nullable
|
||||
public String getExplanation() {
|
||||
|
|
|
@ -103,28 +103,33 @@ public class EnableAllocationDecider extends AllocationDecider {
|
|||
|
||||
final IndexMetaData indexMetaData = allocation.metaData().getIndexSafe(shardRouting.index());
|
||||
final Allocation enable;
|
||||
final boolean usedIndexSetting;
|
||||
if (INDEX_ROUTING_ALLOCATION_ENABLE_SETTING.exists(indexMetaData.getSettings())) {
|
||||
enable = INDEX_ROUTING_ALLOCATION_ENABLE_SETTING.get(indexMetaData.getSettings());
|
||||
usedIndexSetting = true;
|
||||
} else {
|
||||
enable = this.enableAllocation;
|
||||
usedIndexSetting = false;
|
||||
}
|
||||
switch (enable) {
|
||||
case ALL:
|
||||
return allocation.decision(Decision.YES, NAME, "all allocations are allowed");
|
||||
case NONE:
|
||||
return allocation.decision(Decision.NO, NAME, "no allocations are allowed");
|
||||
return allocation.decision(Decision.NO, NAME, "no allocations are allowed due to {}", setting(enable, usedIndexSetting));
|
||||
case NEW_PRIMARIES:
|
||||
if (shardRouting.primary() && shardRouting.active() == false &&
|
||||
shardRouting.recoverySource().getType() != RecoverySource.Type.EXISTING_STORE) {
|
||||
return allocation.decision(Decision.YES, NAME, "new primary allocations are allowed");
|
||||
} else {
|
||||
return allocation.decision(Decision.NO, NAME, "non-new primary allocations are forbidden");
|
||||
return allocation.decision(Decision.NO, NAME, "non-new primary allocations are forbidden due to {}",
|
||||
setting(enable, usedIndexSetting));
|
||||
}
|
||||
case PRIMARIES:
|
||||
if (shardRouting.primary()) {
|
||||
return allocation.decision(Decision.YES, NAME, "primary allocations are allowed");
|
||||
} else {
|
||||
return allocation.decision(Decision.NO, NAME, "replica allocations are forbidden");
|
||||
return allocation.decision(Decision.NO, NAME, "replica allocations are forbidden due to {}",
|
||||
setting(enable, usedIndexSetting));
|
||||
}
|
||||
default:
|
||||
throw new IllegalStateException("Unknown allocation option");
|
||||
|
@ -139,33 +144,60 @@ public class EnableAllocationDecider extends AllocationDecider {
|
|||
|
||||
Settings indexSettings = allocation.metaData().getIndexSafe(shardRouting.index()).getSettings();
|
||||
final Rebalance enable;
|
||||
final boolean usedIndexSetting;
|
||||
if (INDEX_ROUTING_REBALANCE_ENABLE_SETTING.exists(indexSettings)) {
|
||||
enable = INDEX_ROUTING_REBALANCE_ENABLE_SETTING.get(indexSettings);
|
||||
usedIndexSetting = true;
|
||||
} else {
|
||||
enable = this.enableRebalance;
|
||||
usedIndexSetting = false;
|
||||
}
|
||||
switch (enable) {
|
||||
case ALL:
|
||||
return allocation.decision(Decision.YES, NAME, "all rebalancing is allowed");
|
||||
case NONE:
|
||||
return allocation.decision(Decision.NO, NAME, "no rebalancing is allowed");
|
||||
return allocation.decision(Decision.NO, NAME, "no rebalancing is allowed due to {}", setting(enable, usedIndexSetting));
|
||||
case PRIMARIES:
|
||||
if (shardRouting.primary()) {
|
||||
return allocation.decision(Decision.YES, NAME, "primary rebalancing is allowed");
|
||||
} else {
|
||||
return allocation.decision(Decision.NO, NAME, "replica rebalancing is forbidden");
|
||||
return allocation.decision(Decision.NO, NAME, "replica rebalancing is forbidden due to {}",
|
||||
setting(enable, usedIndexSetting));
|
||||
}
|
||||
case REPLICAS:
|
||||
if (shardRouting.primary() == false) {
|
||||
return allocation.decision(Decision.YES, NAME, "replica rebalancing is allowed");
|
||||
} else {
|
||||
return allocation.decision(Decision.NO, NAME, "primary rebalancing is forbidden");
|
||||
return allocation.decision(Decision.NO, NAME, "primary rebalancing is forbidden due to {}",
|
||||
setting(enable, usedIndexSetting));
|
||||
}
|
||||
default:
|
||||
throw new IllegalStateException("Unknown rebalance option");
|
||||
}
|
||||
}
|
||||
|
||||
private static String setting(Allocation allocation, boolean usedIndexSetting) {
|
||||
StringBuilder buf = new StringBuilder("[");
|
||||
if (usedIndexSetting) {
|
||||
buf.append(INDEX_ROUTING_ALLOCATION_ENABLE_SETTING.getKey());
|
||||
} else {
|
||||
buf.append(CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING.getKey());
|
||||
}
|
||||
buf.append("=").append(allocation.toString().toLowerCase(Locale.ROOT)).append("]");
|
||||
return buf.toString();
|
||||
}
|
||||
|
||||
private static String setting(Rebalance rebalance, boolean usedIndexSetting) {
|
||||
StringBuilder buf = new StringBuilder("[");
|
||||
if (usedIndexSetting) {
|
||||
buf.append(INDEX_ROUTING_REBALANCE_ENABLE_SETTING.getKey());
|
||||
} else {
|
||||
buf.append(CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey());
|
||||
}
|
||||
buf.append("=").append(rebalance.toString().toLowerCase(Locale.ROOT)).append("]");
|
||||
return buf.toString();
|
||||
}
|
||||
|
||||
/**
|
||||
* Allocation values or rather their string representation to be used used with
|
||||
* {@link EnableAllocationDecider#CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING} /
|
||||
|
|
|
@ -562,9 +562,9 @@ public class ClusterService extends AbstractLifecycleComponent {
|
|||
executionTime,
|
||||
previousClusterState.version(),
|
||||
tasksSummary,
|
||||
previousClusterState.nodes().prettyPrint(),
|
||||
previousClusterState.routingTable().prettyPrint(),
|
||||
previousClusterState.getRoutingNodes().prettyPrint()),
|
||||
previousClusterState.nodes(),
|
||||
previousClusterState.routingTable(),
|
||||
previousClusterState.getRoutingNodes()),
|
||||
e);
|
||||
}
|
||||
warnAboutSlowTaskIfNeeded(executionTime, tasksSummary);
|
||||
|
@ -656,7 +656,7 @@ public class ClusterService extends AbstractLifecycleComponent {
|
|||
newClusterState.status(ClusterState.ClusterStateStatus.BEING_APPLIED);
|
||||
|
||||
if (logger.isTraceEnabled()) {
|
||||
logger.trace("cluster state updated, source [{}]\n{}", tasksSummary, newClusterState.prettyPrint());
|
||||
logger.trace("cluster state updated, source [{}]\n{}", tasksSummary, newClusterState);
|
||||
} else if (logger.isDebugEnabled()) {
|
||||
logger.debug("cluster state updated, version [{}], source [{}]", newClusterState.version(), tasksSummary);
|
||||
}
|
||||
|
@ -671,7 +671,7 @@ public class ClusterService extends AbstractLifecycleComponent {
|
|||
}
|
||||
}
|
||||
|
||||
nodeConnectionsService.connectToAddedNodes(clusterChangedEvent);
|
||||
nodeConnectionsService.connectToNodes(clusterChangedEvent.nodesDelta().addedNodes());
|
||||
|
||||
// if we are the master, publish the new state to all nodes
|
||||
// we publish here before we send a notification to all the listeners, since if it fails
|
||||
|
@ -686,6 +686,8 @@ public class ClusterService extends AbstractLifecycleComponent {
|
|||
(Supplier<?>) () -> new ParameterizedMessage(
|
||||
"failing [{}]: failed to commit cluster state version [{}]", tasksSummary, version),
|
||||
t);
|
||||
// ensure that list of connected nodes in NodeConnectionsService is in-sync with the nodes of the current cluster state
|
||||
nodeConnectionsService.disconnectFromNodes(clusterChangedEvent.nodesDelta().addedNodes());
|
||||
proccessedListeners.forEach(task -> task.listener.onFailure(task.source, t));
|
||||
return;
|
||||
}
|
||||
|
@ -711,7 +713,7 @@ public class ClusterService extends AbstractLifecycleComponent {
|
|||
}
|
||||
}
|
||||
|
||||
nodeConnectionsService.disconnectFromRemovedNodes(clusterChangedEvent);
|
||||
nodeConnectionsService.disconnectFromNodes(clusterChangedEvent.nodesDelta().removedNodes());
|
||||
|
||||
newClusterState.status(ClusterState.ClusterStateStatus.APPLIED);
|
||||
|
||||
|
@ -757,7 +759,7 @@ public class ClusterService extends AbstractLifecycleComponent {
|
|||
TimeValue executionTime = TimeValue.timeValueMillis(Math.max(0, TimeValue.nsecToMSec(currentTimeInNanos() - startTimeNS)));
|
||||
final long version = newClusterState.version();
|
||||
final String stateUUID = newClusterState.stateUUID();
|
||||
final String prettyPrint = newClusterState.prettyPrint();
|
||||
final String fullState = newClusterState.toString();
|
||||
logger.warn(
|
||||
(Supplier<?>) () -> new ParameterizedMessage(
|
||||
"failed to apply updated cluster state in [{}]:\nversion [{}], uuid [{}], source [{}]\n{}",
|
||||
|
@ -765,7 +767,7 @@ public class ClusterService extends AbstractLifecycleComponent {
|
|||
version,
|
||||
stateUUID,
|
||||
tasksSummary,
|
||||
prettyPrint),
|
||||
fullState),
|
||||
e);
|
||||
// TODO: do we want to call updateTask.onFailure here?
|
||||
}
|
||||
|
@ -824,9 +826,7 @@ public class ClusterService extends AbstractLifecycleComponent {
|
|||
(Supplier<?>) () -> new ParameterizedMessage(
|
||||
"exception thrown by listener while notifying of cluster state processed from [{}], old cluster state:\n" +
|
||||
"{}\nnew cluster state:\n{}",
|
||||
source,
|
||||
oldState.prettyPrint(),
|
||||
newState.prettyPrint()),
|
||||
source, oldState, newState),
|
||||
e);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -21,6 +21,7 @@ package org.elasticsearch.common.component;
|
|||
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.List;
|
||||
import java.util.concurrent.CopyOnWriteArrayList;
|
||||
|
||||
|
@ -101,11 +102,17 @@ public abstract class AbstractLifecycleComponent extends AbstractComponent imple
|
|||
listener.beforeClose();
|
||||
}
|
||||
lifecycle.moveToClosed();
|
||||
doClose();
|
||||
try {
|
||||
doClose();
|
||||
} catch (IOException e) {
|
||||
// TODO: we need to separate out closing (ie shutting down) services, vs releasing runtime transient
|
||||
// structures. Shutting down services should use IOUtils.close
|
||||
logger.warn("failed to close " + getClass().getName(), e);
|
||||
}
|
||||
for (LifecycleListener listener : listeners) {
|
||||
listener.afterClose();
|
||||
}
|
||||
}
|
||||
|
||||
protected abstract void doClose();
|
||||
protected abstract void doClose() throws IOException;
|
||||
}
|
||||
|
|
|
@ -69,7 +69,7 @@ public class BytesStreamOutput extends StreamOutput implements BytesStream {
|
|||
|
||||
@Override
|
||||
public void writeByte(byte b) throws IOException {
|
||||
ensureCapacity(count+1);
|
||||
ensureCapacity(count + 1L);
|
||||
bytes.set(count, b);
|
||||
count++;
|
||||
}
|
||||
|
@ -87,7 +87,7 @@ public class BytesStreamOutput extends StreamOutput implements BytesStream {
|
|||
}
|
||||
|
||||
// get enough pages for new size
|
||||
ensureCapacity(count+length);
|
||||
ensureCapacity(((long) count) + length);
|
||||
|
||||
// bulk copy
|
||||
bytes.set(count, b, offset, length);
|
||||
|
@ -113,22 +113,17 @@ public class BytesStreamOutput extends StreamOutput implements BytesStream {
|
|||
}
|
||||
|
||||
@Override
|
||||
public void seek(long position) throws IOException {
|
||||
if (position > Integer.MAX_VALUE) {
|
||||
throw new IllegalArgumentException("position " + position + " > Integer.MAX_VALUE");
|
||||
}
|
||||
|
||||
count = (int)position;
|
||||
ensureCapacity(count);
|
||||
public void seek(long position) {
|
||||
ensureCapacity(position);
|
||||
count = (int) position;
|
||||
}
|
||||
|
||||
public void skip(int length) {
|
||||
count += length;
|
||||
ensureCapacity(count);
|
||||
seek(((long) count) + length);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() throws IOException {
|
||||
public void close() {
|
||||
// empty for now.
|
||||
}
|
||||
|
||||
|
@ -156,7 +151,10 @@ public class BytesStreamOutput extends StreamOutput implements BytesStream {
|
|||
return bytes.ramBytesUsed();
|
||||
}
|
||||
|
||||
private void ensureCapacity(int offset) {
|
||||
private void ensureCapacity(long offset) {
|
||||
if (offset > Integer.MAX_VALUE) {
|
||||
throw new IllegalArgumentException(getClass().getSimpleName() + " cannot hold more than 2GB of data");
|
||||
}
|
||||
bytes = bigArrays.grow(bytes, offset);
|
||||
}
|
||||
|
||||
|
|
|
@ -1,250 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.common.util;
|
||||
|
||||
import org.elasticsearch.common.inject.Binder;
|
||||
import org.elasticsearch.common.inject.multibindings.MapBinder;
|
||||
import org.elasticsearch.common.inject.multibindings.Multibinder;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
||||
/**
|
||||
* This class defines an official elasticsearch extension point. It registers
|
||||
* all extensions by a single name and ensures that extensions are not registered
|
||||
* more than once.
|
||||
*/
|
||||
public abstract class ExtensionPoint {
|
||||
protected final String name;
|
||||
protected final Class<?>[] singletons;
|
||||
|
||||
/**
|
||||
* Creates a new extension point
|
||||
*
|
||||
* @param name the human readable underscore case name of the extension point. This is used in error messages etc.
|
||||
* @param singletons a list of singletons to bind with this extension point - these are bound in {@link #bind(Binder)}
|
||||
*/
|
||||
public ExtensionPoint(String name, Class<?>... singletons) {
|
||||
this.name = name;
|
||||
this.singletons = singletons;
|
||||
}
|
||||
|
||||
/**
|
||||
* Binds the extension as well as the singletons to the given guice binder.
|
||||
*
|
||||
* @param binder the binder to use
|
||||
*/
|
||||
public final void bind(Binder binder) {
|
||||
for (Class<?> c : singletons) {
|
||||
binder.bind(c).asEagerSingleton();
|
||||
}
|
||||
bindExtensions(binder);
|
||||
}
|
||||
|
||||
/**
|
||||
* Subclasses can bind their type, map or set extensions here.
|
||||
*/
|
||||
protected abstract void bindExtensions(Binder binder);
|
||||
|
||||
/**
|
||||
* A map based extension point which allows to register keyed implementations ie. parsers or some kind of strategies.
|
||||
*/
|
||||
public static class ClassMap<T> extends ExtensionPoint {
|
||||
protected final Class<T> extensionClass;
|
||||
protected final Map<String, Class<? extends T>> extensions = new HashMap<>();
|
||||
private final Set<String> reservedKeys;
|
||||
|
||||
/**
|
||||
* Creates a new {@link ClassMap}
|
||||
*
|
||||
* @param name the human readable underscore case name of the extension point. This is used in error messages etc.
|
||||
* @param extensionClass the base class that should be extended
|
||||
* @param singletons a list of singletons to bind with this extension point - these are bound in {@link #bind(Binder)}
|
||||
* @param reservedKeys a set of reserved keys by internal implementations
|
||||
*/
|
||||
public ClassMap(String name, Class<T> extensionClass, Set<String> reservedKeys, Class<?>... singletons) {
|
||||
super(name, singletons);
|
||||
this.extensionClass = extensionClass;
|
||||
this.reservedKeys = reservedKeys;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the extension for the given key or <code>null</code>
|
||||
*/
|
||||
public Class<? extends T> getExtension(String type) {
|
||||
return extensions.get(type);
|
||||
}
|
||||
|
||||
/**
|
||||
* Registers an extension class for a given key. This method will thr
|
||||
*
|
||||
* @param key the extensions key
|
||||
* @param extension the extension
|
||||
* @throws IllegalArgumentException iff the key is already registered or if the key is a reserved key for an internal implementation
|
||||
*/
|
||||
public final void registerExtension(String key, Class<? extends T> extension) {
|
||||
if (extensions.containsKey(key) || reservedKeys.contains(key)) {
|
||||
throw new IllegalArgumentException("Can't register the same [" + this.name + "] more than once for [" + key + "]");
|
||||
}
|
||||
extensions.put(key, extension);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected final void bindExtensions(Binder binder) {
|
||||
MapBinder<String, T> parserMapBinder = MapBinder.newMapBinder(binder, String.class, extensionClass);
|
||||
for (Map.Entry<String, Class<? extends T>> clazz : extensions.entrySet()) {
|
||||
parserMapBinder.addBinding(clazz.getKey()).to(clazz.getValue());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* A Type extension point which basically allows to registered keyed extensions like {@link ClassMap}
|
||||
* but doesn't instantiate and bind all the registered key value pairs but instead replace a singleton based on a given setting via {@link #bindType(Binder, Settings, String, String)}
|
||||
* Note: {@link #bind(Binder)} is not supported by this class
|
||||
*/
|
||||
public static final class SelectedType<T> extends ClassMap<T> {
|
||||
|
||||
public SelectedType(String name, Class<T> extensionClass) {
|
||||
super(name, extensionClass, Collections.emptySet());
|
||||
}
|
||||
|
||||
/**
|
||||
* Binds the extension class to the class that is registered for the give configured for the settings key in
|
||||
* the settings object.
|
||||
*
|
||||
* @param binder the binder to use
|
||||
* @param settings the settings to look up the key to find the implementation to bind
|
||||
* @param settingsKey the key to use with the settings
|
||||
* @param defaultValue the default value if the settings do not contain the key, or null if there is no default
|
||||
* @return the actual bound type key
|
||||
*/
|
||||
public String bindType(Binder binder, Settings settings, String settingsKey, String defaultValue) {
|
||||
final String type = settings.get(settingsKey, defaultValue);
|
||||
if (type == null) {
|
||||
throw new IllegalArgumentException("Missing setting [" + settingsKey + "]");
|
||||
}
|
||||
final Class<? extends T> instance = getExtension(type);
|
||||
if (instance == null) {
|
||||
throw new IllegalArgumentException("Unknown [" + this.name + "] type [" + type + "] possible values: "
|
||||
+ extensions.keySet());
|
||||
}
|
||||
if (extensionClass == instance) {
|
||||
binder.bind(extensionClass).asEagerSingleton();
|
||||
} else {
|
||||
binder.bind(extensionClass).to(instance).asEagerSingleton();
|
||||
}
|
||||
return type;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* A set based extension point which allows to register extended classes that might be used to chain additional functionality etc.
|
||||
*/
|
||||
public static final class ClassSet<T> extends ExtensionPoint {
|
||||
protected final Class<T> extensionClass;
|
||||
private final Set<Class<? extends T>> extensions = new HashSet<>();
|
||||
|
||||
/**
|
||||
* Creates a new {@link ClassSet}
|
||||
*
|
||||
* @param name the human readable underscore case name of the extension point. This is used in error messages etc.
|
||||
* @param extensionClass the base class that should be extended
|
||||
* @param singletons a list of singletons to bind with this extension point - these are bound in {@link #bind(Binder)}
|
||||
*/
|
||||
public ClassSet(String name, Class<T> extensionClass, Class<?>... singletons) {
|
||||
super(name, singletons);
|
||||
this.extensionClass = extensionClass;
|
||||
}
|
||||
|
||||
/**
|
||||
* Registers a new extension
|
||||
*
|
||||
* @param extension the extension to register
|
||||
* @throws IllegalArgumentException iff the class is already registered
|
||||
*/
|
||||
public void registerExtension(Class<? extends T> extension) {
|
||||
if (extensions.contains(extension)) {
|
||||
throw new IllegalArgumentException("Can't register the same [" + this.name + "] more than once for [" + extension.getName() + "]");
|
||||
}
|
||||
extensions.add(extension);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void bindExtensions(Binder binder) {
|
||||
Multibinder<T> allocationMultibinder = Multibinder.newSetBinder(binder, extensionClass);
|
||||
for (Class<? extends T> clazz : extensions) {
|
||||
binder.bind(clazz).asEagerSingleton();
|
||||
allocationMultibinder.addBinding().to(clazz);
|
||||
}
|
||||
}
|
||||
|
||||
public boolean isEmpty() {
|
||||
return extensions.isEmpty();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* A an instance of a map, mapping one instance value to another. Both key and value are instances, not classes
|
||||
* like with other extension points.
|
||||
*/
|
||||
public static final class InstanceMap<K, V> extends ExtensionPoint {
|
||||
private final Map<K, V> map = new HashMap<>();
|
||||
private final Class<K> keyType;
|
||||
private final Class<V> valueType;
|
||||
|
||||
/**
|
||||
* Creates a new {@link ClassSet}
|
||||
*
|
||||
* @param name the human readable underscore case name of the extension point. This is used in error messages.
|
||||
* @param singletons a list of singletons to bind with this extension point - these are bound in {@link #bind(Binder)}
|
||||
*/
|
||||
public InstanceMap(String name, Class<K> keyType, Class<V> valueType, Class<?>... singletons) {
|
||||
super(name, singletons);
|
||||
this.keyType = keyType;
|
||||
this.valueType = valueType;
|
||||
}
|
||||
|
||||
/**
|
||||
* Registers a mapping from {@code key} to {@code value}
|
||||
*
|
||||
* @throws IllegalArgumentException iff the key is already registered
|
||||
*/
|
||||
public void registerExtension(K key, V value) {
|
||||
V old = map.put(key, value);
|
||||
if (old != null) {
|
||||
throw new IllegalArgumentException("Cannot register [" + this.name + "] with key [" + key + "] to [" + value + "], already registered to [" + old + "]");
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void bindExtensions(Binder binder) {
|
||||
MapBinder<K, V> mapBinder = MapBinder.newMapBinder(binder, keyType, valueType);
|
||||
for (Map.Entry<K, V> entry : map.entrySet()) {
|
||||
mapBinder.addBinding(entry.getKey()).toInstance(entry.getValue());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -94,4 +94,9 @@ public interface XContentGenerator extends Closeable, Flushable {
|
|||
|
||||
void copyCurrentStructure(XContentParser parser) throws IOException;
|
||||
|
||||
/**
|
||||
* Returns {@code true} if this XContentGenerator has been closed. A closed generator can not do any more output.
|
||||
*/
|
||||
boolean isClosed();
|
||||
|
||||
}
|
||||
|
|
|
@ -419,4 +419,8 @@ public class JsonXContentGenerator implements XContentGenerator {
|
|||
generator.close();
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isClosed() {
|
||||
return generator.isClosed();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -185,8 +185,8 @@ public class XContentMapValues {
|
|||
// we want all sub properties to match as soon as an object matches
|
||||
|
||||
return (map) -> filter(map,
|
||||
include, include.getInitialState(),
|
||||
exclude, exclude.getInitialState(),
|
||||
include, 0,
|
||||
exclude, 0,
|
||||
matchAllAutomaton);
|
||||
}
|
||||
|
||||
|
@ -237,7 +237,7 @@ public class XContentMapValues {
|
|||
// the object matched, so consider that the include matches every inner property
|
||||
// we only care about excludes now
|
||||
subIncludeAutomaton = matchAllAutomaton;
|
||||
subIncludeState = includeAutomaton.getInitialState();
|
||||
subIncludeState = 0;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -19,20 +19,6 @@
|
|||
|
||||
package org.elasticsearch.discovery;
|
||||
|
||||
import org.elasticsearch.common.inject.AbstractModule;
|
||||
import org.elasticsearch.common.network.NetworkService;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Setting.Property;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.util.ExtensionPoint;
|
||||
import org.elasticsearch.discovery.zen.ZenDiscovery;
|
||||
import org.elasticsearch.plugins.DiscoveryPlugin;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
import org.elasticsearch.discovery.zen.ZenPing;
|
||||
import org.elasticsearch.discovery.zen.ZenPingService;
|
||||
import org.elasticsearch.discovery.zen.UnicastHostsProvider;
|
||||
import org.elasticsearch.discovery.zen.UnicastZenPing;
|
||||
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
|
@ -41,6 +27,16 @@ import java.util.Objects;
|
|||
import java.util.function.Function;
|
||||
import java.util.function.Supplier;
|
||||
|
||||
import org.elasticsearch.common.inject.AbstractModule;
|
||||
import org.elasticsearch.common.network.NetworkService;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Setting.Property;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.discovery.zen.UnicastHostsProvider;
|
||||
import org.elasticsearch.discovery.zen.ZenDiscovery;
|
||||
import org.elasticsearch.plugins.DiscoveryPlugin;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
/**
|
||||
* A module for loading classes for node discovery.
|
||||
*/
|
||||
|
@ -52,8 +48,7 @@ public class DiscoveryModule extends AbstractModule {
|
|||
new Setting<>("discovery.zen.hosts_provider", DISCOVERY_TYPE_SETTING, Function.identity(), Property.NodeScope);
|
||||
|
||||
private final Settings settings;
|
||||
private final Map<String, Supplier<UnicastHostsProvider>> unicastHostProviders;
|
||||
private final ExtensionPoint.ClassSet<ZenPing> zenPings = new ExtensionPoint.ClassSet<>("zen_ping", ZenPing.class);
|
||||
private final UnicastHostsProvider hostsProvider;
|
||||
private final Map<String, Class<? extends Discovery>> discoveryTypes = new HashMap<>();
|
||||
|
||||
public DiscoveryModule(Settings settings, TransportService transportService, NetworkService networkService,
|
||||
|
@ -62,16 +57,30 @@ public class DiscoveryModule extends AbstractModule {
|
|||
addDiscoveryType("none", NoneDiscovery.class);
|
||||
addDiscoveryType("zen", ZenDiscovery.class);
|
||||
|
||||
Map<String, Supplier<UnicastHostsProvider>> hostProviders = new HashMap<>();
|
||||
hostProviders.put("zen", () -> Collections::emptyList);
|
||||
for (DiscoveryPlugin plugin : plugins) {
|
||||
plugin.getZenHostsProviders(transportService, networkService).entrySet().forEach(entry -> {
|
||||
if (hostProviders.put(entry.getKey(), entry.getValue()) != null) {
|
||||
throw new IllegalArgumentException("Cannot specify zen hosts provider [" + entry.getKey() + "] twice");
|
||||
}
|
||||
});
|
||||
String discoveryType = DISCOVERY_TYPE_SETTING.get(settings);
|
||||
if (discoveryType.equals("none") == false) {
|
||||
Map<String, Supplier<UnicastHostsProvider>> hostProviders = new HashMap<>();
|
||||
hostProviders.put("zen", () -> Collections::emptyList);
|
||||
for (DiscoveryPlugin plugin : plugins) {
|
||||
plugin.getZenHostsProviders(transportService, networkService).entrySet().forEach(entry -> {
|
||||
if (hostProviders.put(entry.getKey(), entry.getValue()) != null) {
|
||||
throw new IllegalArgumentException("Cannot specify zen hosts provider [" + entry.getKey() + "] twice");
|
||||
}
|
||||
});
|
||||
}
|
||||
String hostsProviderName = DISCOVERY_HOSTS_PROVIDER_SETTING.get(settings);
|
||||
Supplier<UnicastHostsProvider> hostsProviderSupplier = hostProviders.get(hostsProviderName);
|
||||
if (hostsProviderSupplier == null) {
|
||||
throw new IllegalArgumentException("Unknown zen hosts provider [" + hostsProviderName + "]");
|
||||
}
|
||||
hostsProvider = Objects.requireNonNull(hostsProviderSupplier.get());
|
||||
} else {
|
||||
hostsProvider = null;
|
||||
}
|
||||
unicastHostProviders = Collections.unmodifiableMap(hostProviders);
|
||||
}
|
||||
|
||||
public UnicastHostsProvider getHostsProvider() {
|
||||
return hostsProvider;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -84,10 +93,6 @@ public class DiscoveryModule extends AbstractModule {
|
|||
discoveryTypes.put(type, clazz);
|
||||
}
|
||||
|
||||
public void addZenPing(Class<? extends ZenPing> clazz) {
|
||||
zenPings.registerExtension(clazz);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void configure() {
|
||||
String discoveryType = DISCOVERY_TYPE_SETTING.get(settings);
|
||||
|
@ -97,18 +102,7 @@ public class DiscoveryModule extends AbstractModule {
|
|||
}
|
||||
|
||||
if (discoveryType.equals("none") == false) {
|
||||
bind(ZenPingService.class).asEagerSingleton();
|
||||
String hostsProviderName = DISCOVERY_HOSTS_PROVIDER_SETTING.get(settings);
|
||||
Supplier<UnicastHostsProvider> hostsProviderSupplier = unicastHostProviders.get(hostsProviderName);
|
||||
if (hostsProviderSupplier == null) {
|
||||
throw new IllegalArgumentException("Unknown zen hosts provider [" + hostsProviderName + "]");
|
||||
}
|
||||
UnicastHostsProvider hostsProvider = Objects.requireNonNull(hostsProviderSupplier.get());
|
||||
bind(UnicastHostsProvider.class).toInstance(hostsProvider);
|
||||
if (zenPings.isEmpty()) {
|
||||
zenPings.registerExtension(UnicastZenPing.class);
|
||||
}
|
||||
zenPings.bind(binder());
|
||||
}
|
||||
bind(Discovery.class).to(discoveryClass).asEagerSingleton();
|
||||
}
|
||||
|
|
|
@ -19,6 +19,8 @@
|
|||
|
||||
package org.elasticsearch.discovery.zen;
|
||||
|
||||
import java.io.Closeable;
|
||||
|
||||
import org.elasticsearch.cluster.ClusterName;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.common.component.AbstractComponent;
|
||||
|
@ -36,7 +38,7 @@ import static org.elasticsearch.common.unit.TimeValue.timeValueSeconds;
|
|||
* A base class for {@link MasterFaultDetection} & {@link NodesFaultDetection},
|
||||
* making sure both use the same setting.
|
||||
*/
|
||||
public abstract class FaultDetection extends AbstractComponent {
|
||||
public abstract class FaultDetection extends AbstractComponent implements Closeable {
|
||||
|
||||
public static final Setting<Boolean> CONNECT_ON_NETWORK_DISCONNECT_SETTING =
|
||||
Setting.boolSetting("discovery.zen.fd.connect_on_network_disconnect", false, Property.NodeScope);
|
||||
|
@ -80,6 +82,7 @@ public abstract class FaultDetection extends AbstractComponent {
|
|||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() {
|
||||
transportService.removeConnectionListener(connectionListener);
|
||||
}
|
||||
|
|
|
@ -464,7 +464,7 @@ public class NodeJoinController extends AbstractComponent {
|
|||
}
|
||||
|
||||
private ClusterState.Builder becomeMasterAndTrimConflictingNodes(ClusterState currentState, List<DiscoveryNode> joiningNodes) {
|
||||
assert currentState.nodes().getMasterNodeId() == null : currentState.prettyPrint();
|
||||
assert currentState.nodes().getMasterNodeId() == null : currentState;
|
||||
DiscoveryNodes.Builder nodesBuilder = DiscoveryNodes.builder(currentState.nodes());
|
||||
nodesBuilder.masterNodeId(currentState.nodes().getLocalNodeId());
|
||||
ClusterBlocks clusterBlocks = ClusterBlocks.builder().blocks(currentState.blocks())
|
||||
|
|
|
@ -19,6 +19,26 @@
|
|||
|
||||
package org.elasticsearch.discovery.zen;
|
||||
|
||||
import java.io.Closeable;
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collection;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Queue;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
import java.util.concurrent.ExecutorService;
|
||||
import java.util.concurrent.RejectedExecutionException;
|
||||
import java.util.concurrent.ThreadFactory;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
import java.util.concurrent.atomic.AtomicReference;
|
||||
import java.util.function.Function;
|
||||
|
||||
import com.carrotsearch.hppc.cursors.ObjectCursor;
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.apache.logging.log4j.util.Supplier;
|
||||
|
@ -30,8 +50,7 @@ import org.elasticsearch.cluster.node.DiscoveryNode;
|
|||
import org.elasticsearch.cluster.node.DiscoveryNodes;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.UUIDs;
|
||||
import org.elasticsearch.common.component.AbstractLifecycleComponent;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.component.AbstractComponent;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
|
@ -56,34 +75,13 @@ import org.elasticsearch.transport.TransportResponse;
|
|||
import org.elasticsearch.transport.TransportResponseHandler;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
import java.io.Closeable;
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collection;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Queue;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.CopyOnWriteArrayList;
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
import java.util.concurrent.ExecutorService;
|
||||
import java.util.concurrent.RejectedExecutionException;
|
||||
import java.util.concurrent.ThreadFactory;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
import java.util.concurrent.atomic.AtomicReference;
|
||||
import java.util.function.Function;
|
||||
|
||||
import static java.util.Collections.emptyList;
|
||||
import static java.util.Collections.emptyMap;
|
||||
import static java.util.Collections.emptySet;
|
||||
import static org.elasticsearch.common.util.concurrent.ConcurrentCollections.newConcurrentMap;
|
||||
import static org.elasticsearch.discovery.zen.ZenPing.PingResponse.readPingResponse;
|
||||
|
||||
public class UnicastZenPing extends AbstractLifecycleComponent implements ZenPing {
|
||||
public class UnicastZenPing extends AbstractComponent implements ZenPing {
|
||||
|
||||
public static final String ACTION_NAME = "internal:discovery/zen/unicast";
|
||||
public static final Setting<List<String>> DISCOVERY_ZEN_PING_UNICAST_HOSTS_SETTING =
|
||||
|
@ -125,15 +123,13 @@ public class UnicastZenPing extends AbstractLifecycleComponent implements ZenPin
|
|||
|
||||
private volatile boolean closed = false;
|
||||
|
||||
@Inject
|
||||
public UnicastZenPing(Settings settings, ThreadPool threadPool, TransportService transportService,
|
||||
UnicastHostsProvider unicastHostsProviders) {
|
||||
UnicastHostsProvider unicastHostsProvider) {
|
||||
super(settings);
|
||||
this.threadPool = threadPool;
|
||||
this.transportService = transportService;
|
||||
this.clusterName = ClusterName.CLUSTER_NAME_SETTING.get(settings);
|
||||
|
||||
this.hostsProvider = unicastHostsProviders;
|
||||
this.hostsProvider = unicastHostsProvider;
|
||||
|
||||
this.concurrentConnects = DISCOVERY_ZEN_PING_UNICAST_CONCURRENT_CONNECTS_SETTING.get(settings);
|
||||
List<String> hosts = DISCOVERY_ZEN_PING_UNICAST_HOSTS_SETTING.get(settings);
|
||||
|
@ -190,26 +186,14 @@ public class UnicastZenPing extends AbstractLifecycleComponent implements ZenPin
|
|||
}
|
||||
|
||||
@Override
|
||||
protected void doStart() {
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doStop() {
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doClose() {
|
||||
public void close() throws IOException {
|
||||
ThreadPool.terminate(unicastConnectExecutor, 0, TimeUnit.SECONDS);
|
||||
try {
|
||||
IOUtils.close(receivedResponses.values());
|
||||
} catch (IOException e) {
|
||||
throw new ElasticsearchException("Error wile closing send ping handlers", e);
|
||||
}
|
||||
IOUtils.close(receivedResponses.values());
|
||||
closed = true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setPingContextProvider(PingContextProvider contextProvider) {
|
||||
public void start(PingContextProvider contextProvider) {
|
||||
this.contextProvider = contextProvider;
|
||||
}
|
||||
|
||||
|
@ -501,9 +485,6 @@ public class UnicastZenPing extends AbstractLifecycleComponent implements ZenPin
|
|||
}
|
||||
|
||||
private UnicastPingResponse handlePingRequest(final UnicastPingRequest request) {
|
||||
if (!lifecycle.started()) {
|
||||
throw new IllegalStateException("received ping request while not started");
|
||||
}
|
||||
temporalResponses.add(request.pingResponse);
|
||||
threadPool.schedule(TimeValue.timeValueMillis(request.timeout.millis() * 2), ThreadPool.Names.SAME, new Runnable() {
|
||||
@Override
|
||||
|
|
|
@ -22,6 +22,7 @@ package org.elasticsearch.discovery.zen;
|
|||
import org.apache.logging.log4j.Logger;
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.apache.logging.log4j.util.Supplier;
|
||||
import org.apache.lucene.util.IOUtils;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.ExceptionsHelper;
|
||||
import org.elasticsearch.cluster.ClusterChangedEvent;
|
||||
|
@ -67,6 +68,7 @@ import java.io.IOException;
|
|||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
import java.util.concurrent.atomic.AtomicReference;
|
||||
|
@ -105,7 +107,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover
|
|||
private AllocationService allocationService;
|
||||
private final ClusterName clusterName;
|
||||
private final DiscoverySettings discoverySettings;
|
||||
private final ZenPingService pingService;
|
||||
private final ZenPing zenPing;
|
||||
private final MasterFaultDetection masterFD;
|
||||
private final NodesFaultDetection nodesFD;
|
||||
private final PublishClusterStateAction publishClusterState;
|
||||
|
@ -137,18 +139,16 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover
|
|||
private volatile NodeRemovalClusterStateTaskExecutor nodeRemovalExecutor;
|
||||
|
||||
@Inject
|
||||
public ZenDiscovery(Settings settings, ThreadPool threadPool,
|
||||
TransportService transportService, final ClusterService clusterService, ClusterSettings clusterSettings,
|
||||
ZenPingService pingService) {
|
||||
public ZenDiscovery(Settings settings, ThreadPool threadPool, TransportService transportService,
|
||||
ClusterService clusterService, ClusterSettings clusterSettings, ZenPing zenPing) {
|
||||
super(settings);
|
||||
this.clusterService = clusterService;
|
||||
this.clusterName = clusterService.getClusterName();
|
||||
this.transportService = transportService;
|
||||
this.discoverySettings = new DiscoverySettings(settings, clusterSettings);
|
||||
this.pingService = pingService;
|
||||
this.zenPing = zenPing;
|
||||
this.electMaster = new ElectMasterService(settings);
|
||||
this.pingTimeout = PING_TIMEOUT_SETTING.get(settings);
|
||||
|
||||
this.joinTimeout = JOIN_TIMEOUT_SETTING.get(settings);
|
||||
this.joinRetryAttempts = JOIN_RETRY_ATTEMPTS_SETTING.get(settings);
|
||||
this.joinRetryDelay = JOIN_RETRY_DELAY_SETTING.get(settings);
|
||||
|
@ -171,7 +171,6 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover
|
|||
|
||||
this.masterFD = new MasterFaultDetection(settings, threadPool, transportService, clusterService);
|
||||
this.masterFD.addListener(new MasterNodeFailureListener());
|
||||
|
||||
this.nodesFD = new NodesFaultDetection(settings, threadPool, transportService, clusterService.getClusterName());
|
||||
this.nodesFD.addListener(new NodeFaultDetectionListener());
|
||||
|
||||
|
@ -183,9 +182,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover
|
|||
new NewPendingClusterStateListener(),
|
||||
discoverySettings,
|
||||
clusterService.getClusterName());
|
||||
this.pingService.setPingContextProvider(this);
|
||||
this.membership = new MembershipAction(settings, transportService, this, new MembershipListener());
|
||||
|
||||
this.joinThreadControl = new JoinThreadControl(threadPool);
|
||||
|
||||
transportService.registerRequestHandler(
|
||||
|
@ -201,7 +198,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover
|
|||
protected void doStart() {
|
||||
nodesFD.setLocalNode(clusterService.localNode());
|
||||
joinThreadControl.start();
|
||||
pingService.start();
|
||||
zenPing.start(this);
|
||||
this.nodeJoinController = new NodeJoinController(clusterService, allocationService, electMaster, discoverySettings, settings);
|
||||
this.nodeRemovalExecutor = new NodeRemovalClusterStateTaskExecutor(allocationService, electMaster, this::rejoin, logger);
|
||||
}
|
||||
|
@ -233,7 +230,6 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover
|
|||
@Override
|
||||
protected void doStop() {
|
||||
joinThreadControl.stop();
|
||||
pingService.stop();
|
||||
masterFD.stop("zen disco stop");
|
||||
nodesFD.stop();
|
||||
DiscoveryNodes nodes = nodes();
|
||||
|
@ -264,10 +260,8 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover
|
|||
}
|
||||
|
||||
@Override
|
||||
protected void doClose() {
|
||||
masterFD.close();
|
||||
nodesFD.close();
|
||||
pingService.close();
|
||||
protected void doClose() throws IOException {
|
||||
IOUtils.close(masterFD, nodesFD, zenPing);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -322,6 +316,11 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover
|
|||
|
||||
// update the set of nodes to ping after the new cluster state has been published
|
||||
nodesFD.updateNodesAndPing(clusterChangedEvent.state());
|
||||
|
||||
// clean the pending cluster queue - we are currently master, so any pending cluster state should be failed
|
||||
// note that we also clean the queue on master failure (see handleMasterGone) but a delayed cluster state publish
|
||||
// from a stale master can still make it in the queue during the election (but not be committed)
|
||||
publishClusterState.pendingStatesQueue().failAllStatesAndClear(new ElasticsearchException("elected as master"));
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -362,6 +361,10 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover
|
|||
return publishClusterState.pendingStatesQueue().pendingClusterStates();
|
||||
}
|
||||
|
||||
PendingClusterStatesQueue pendingClusterStatesQueue() {
|
||||
return publishClusterState.pendingStatesQueue();
|
||||
}
|
||||
|
||||
/**
|
||||
* the main function of a join thread. This function is guaranteed to join the cluster
|
||||
* or spawn a new join thread upon failure to do so.
|
||||
|
@ -862,7 +865,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover
|
|||
|
||||
private DiscoveryNode findMaster() {
|
||||
logger.trace("starting to ping");
|
||||
List<ZenPing.PingResponse> fullPingResponses = pingService.pingAndWait(pingTimeout).toList();
|
||||
List<ZenPing.PingResponse> fullPingResponses = pingAndWait(pingTimeout).toList();
|
||||
if (fullPingResponses == null) {
|
||||
logger.trace("No full ping responses");
|
||||
return null;
|
||||
|
@ -1004,6 +1007,28 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover
|
|||
}
|
||||
}
|
||||
|
||||
private ZenPing.PingCollection pingAndWait(TimeValue timeout) {
|
||||
final ZenPing.PingCollection response = new ZenPing.PingCollection();
|
||||
final CountDownLatch latch = new CountDownLatch(1);
|
||||
try {
|
||||
zenPing.ping(pings -> {
|
||||
response.addPings(pings);
|
||||
latch.countDown();
|
||||
}, timeout);
|
||||
} catch (Exception ex) {
|
||||
logger.warn("Ping execution failed", ex);
|
||||
latch.countDown();
|
||||
}
|
||||
|
||||
try {
|
||||
latch.await();
|
||||
return response;
|
||||
} catch (InterruptedException e) {
|
||||
logger.trace("pingAndWait interrupted");
|
||||
return response;
|
||||
}
|
||||
}
|
||||
|
||||
private class NewPendingClusterStateListener implements PublishClusterStateAction.NewPendingClusterStateListener {
|
||||
|
||||
@Override
|
||||
|
|
|
@ -19,15 +19,7 @@
|
|||
|
||||
package org.elasticsearch.discovery.zen;
|
||||
|
||||
import org.elasticsearch.cluster.ClusterName;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.common.component.LifecycleComponent;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.io.stream.Streamable;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
|
||||
import java.io.Closeable;
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collection;
|
||||
|
@ -36,11 +28,19 @@ import java.util.List;
|
|||
import java.util.Map;
|
||||
import java.util.concurrent.atomic.AtomicLong;
|
||||
|
||||
import org.elasticsearch.cluster.ClusterName;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.io.stream.Streamable;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
|
||||
import static org.elasticsearch.gateway.GatewayService.STATE_NOT_RECOVERED_BLOCK;
|
||||
|
||||
public interface ZenPing extends LifecycleComponent {
|
||||
public interface ZenPing extends Closeable {
|
||||
|
||||
void setPingContextProvider(PingContextProvider contextProvider);
|
||||
void start(PingContextProvider contextProvider);
|
||||
|
||||
void ping(PingListener listener, TimeValue timeout);
|
||||
|
||||
|
|
|
@ -1,105 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.discovery.zen;
|
||||
|
||||
import org.elasticsearch.common.component.AbstractLifecycleComponent;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
|
||||
public class ZenPingService extends AbstractLifecycleComponent {
|
||||
|
||||
private List<ZenPing> zenPings = Collections.emptyList();
|
||||
|
||||
@Inject
|
||||
public ZenPingService(Settings settings, Set<ZenPing> zenPings) {
|
||||
super(settings);
|
||||
this.zenPings = Collections.unmodifiableList(new ArrayList<>(zenPings));
|
||||
}
|
||||
|
||||
public List<ZenPing> zenPings() {
|
||||
return this.zenPings;
|
||||
}
|
||||
|
||||
public void setPingContextProvider(PingContextProvider contextProvider) {
|
||||
if (lifecycle.started()) {
|
||||
throw new IllegalStateException("Can't set nodes provider when started");
|
||||
}
|
||||
for (ZenPing zenPing : zenPings) {
|
||||
zenPing.setPingContextProvider(contextProvider);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doStart() {
|
||||
for (ZenPing zenPing : zenPings) {
|
||||
zenPing.start();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doStop() {
|
||||
for (ZenPing zenPing : zenPings) {
|
||||
zenPing.stop();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doClose() {
|
||||
for (ZenPing zenPing : zenPings) {
|
||||
zenPing.close();
|
||||
}
|
||||
}
|
||||
|
||||
public ZenPing.PingCollection pingAndWait(TimeValue timeout) {
|
||||
final ZenPing.PingCollection response = new ZenPing.PingCollection();
|
||||
final CountDownLatch latch = new CountDownLatch(zenPings.size());
|
||||
for (ZenPing zenPing : zenPings) {
|
||||
final AtomicBoolean counted = new AtomicBoolean();
|
||||
try {
|
||||
zenPing.ping(pings -> {
|
||||
response.addPings(pings);
|
||||
if (counted.compareAndSet(false, true)) {
|
||||
latch.countDown();
|
||||
}
|
||||
}, timeout);
|
||||
} catch (Exception ex) {
|
||||
logger.warn("Ping execution failed", ex);
|
||||
if (counted.compareAndSet(false, true)) {
|
||||
latch.countDown();
|
||||
}
|
||||
}
|
||||
}
|
||||
try {
|
||||
latch.await();
|
||||
return response;
|
||||
} catch (InterruptedException e) {
|
||||
logger.trace("pingAndWait interrupted");
|
||||
return response;
|
||||
}
|
||||
}
|
||||
}
|
|
@ -52,6 +52,7 @@ import org.elasticsearch.index.query.QueryShardContext;
|
|||
import org.elasticsearch.index.shard.IndexEventListener;
|
||||
import org.elasticsearch.index.shard.IndexSearcherWrapper;
|
||||
import org.elasticsearch.index.shard.IndexShard;
|
||||
import org.elasticsearch.index.shard.IndexShardClosedException;
|
||||
import org.elasticsearch.index.shard.IndexingOperationListener;
|
||||
import org.elasticsearch.index.shard.SearchOperationListener;
|
||||
import org.elasticsearch.index.shard.ShadowIndexShard;
|
||||
|
@ -144,7 +145,10 @@ public class IndexService extends AbstractIndexComponent implements IndicesClust
|
|||
this.indexAnalyzers = registry.build(indexSettings);
|
||||
this.similarityService = similarityService;
|
||||
this.mapperService = new MapperService(indexSettings, indexAnalyzers, similarityService, mapperRegistry,
|
||||
IndexService.this::newQueryShardContext);
|
||||
// we parse all percolator queries as they would be parsed on shard 0
|
||||
() -> newQueryShardContext(0, null, () -> {
|
||||
throw new IllegalArgumentException("Percolator queries are not allowed to use the curent timestamp");
|
||||
}));
|
||||
this.indexFieldData = new IndexFieldDataService(indexSettings, indicesFieldDataCache, circuitBreakerService, mapperService);
|
||||
this.shardStoreDeleter = shardStoreDeleter;
|
||||
this.bigArrays = bigArrays;
|
||||
|
@ -452,7 +456,10 @@ public class IndexService extends AbstractIndexComponent implements IndicesClust
|
|||
|
||||
/**
|
||||
* Creates a new QueryShardContext. The context has not types set yet, if types are required set them via
|
||||
* {@link QueryShardContext#setTypes(String...)}
|
||||
* {@link QueryShardContext#setTypes(String...)}.
|
||||
*
|
||||
* Passing a {@code null} {@link IndexReader} will return a valid context, however it won't be able to make
|
||||
* {@link IndexReader}-specific optimizations, such as rewriting containing range queries.
|
||||
*/
|
||||
public QueryShardContext newQueryShardContext(int shardId, IndexReader indexReader, LongSupplier nowInMillis) {
|
||||
return new QueryShardContext(
|
||||
|
@ -463,15 +470,6 @@ public class IndexService extends AbstractIndexComponent implements IndicesClust
|
|||
nowInMillis);
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a new QueryShardContext. The context has not types set yet, if types are required set them via
|
||||
* {@link QueryShardContext#setTypes(String...)}. This context may be used for query parsing but cannot be
|
||||
* used for rewriting since it does not know about the current {@link IndexReader}.
|
||||
*/
|
||||
public QueryShardContext newQueryShardContext() {
|
||||
return newQueryShardContext(0, null, System::currentTimeMillis);
|
||||
}
|
||||
|
||||
/**
|
||||
* The {@link ThreadPool} to use for this index.
|
||||
*/
|
||||
|
@ -692,7 +690,7 @@ public class IndexService extends AbstractIndexComponent implements IndicesClust
|
|||
if (shard.isRefreshNeeded()) {
|
||||
shard.refresh("schedule");
|
||||
}
|
||||
} catch (EngineClosedException | AlreadyClosedException ex) {
|
||||
} catch (IndexShardClosedException | AlreadyClosedException ex) {
|
||||
// fine - continue;
|
||||
}
|
||||
continue;
|
||||
|
|
|
@ -133,23 +133,20 @@ public final class IndexingSlowLog implements IndexingOperationListener {
|
|||
this.reformat = reformat;
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public void postIndex(Engine.Index index, boolean created) {
|
||||
final long took = index.endTime() - index.startTime();
|
||||
postIndexing(index.parsedDoc(), took);
|
||||
}
|
||||
|
||||
|
||||
private void postIndexing(ParsedDocument doc, long tookInNanos) {
|
||||
if (indexWarnThreshold >= 0 && tookInNanos > indexWarnThreshold) {
|
||||
indexLogger.warn("{}", new SlowLogParsedDocumentPrinter(index, doc, tookInNanos, reformat, maxSourceCharsToLog));
|
||||
} else if (indexInfoThreshold >= 0 && tookInNanos > indexInfoThreshold) {
|
||||
indexLogger.info("{}", new SlowLogParsedDocumentPrinter(index, doc, tookInNanos, reformat, maxSourceCharsToLog));
|
||||
} else if (indexDebugThreshold >= 0 && tookInNanos > indexDebugThreshold) {
|
||||
indexLogger.debug("{}", new SlowLogParsedDocumentPrinter(index, doc, tookInNanos, reformat, maxSourceCharsToLog));
|
||||
} else if (indexTraceThreshold >= 0 && tookInNanos > indexTraceThreshold) {
|
||||
indexLogger.trace("{}", new SlowLogParsedDocumentPrinter(index, doc, tookInNanos, reformat, maxSourceCharsToLog));
|
||||
public void postIndex(Engine.Index indexOperation, Engine.IndexResult result) {
|
||||
if (result.hasFailure() == false) {
|
||||
final ParsedDocument doc = indexOperation.parsedDoc();
|
||||
final long tookInNanos = result.getTook();
|
||||
if (indexWarnThreshold >= 0 && tookInNanos > indexWarnThreshold) {
|
||||
indexLogger.warn("{}", new SlowLogParsedDocumentPrinter(index, doc, tookInNanos, reformat, maxSourceCharsToLog));
|
||||
} else if (indexInfoThreshold >= 0 && tookInNanos > indexInfoThreshold) {
|
||||
indexLogger.info("{}", new SlowLogParsedDocumentPrinter(index, doc, tookInNanos, reformat, maxSourceCharsToLog));
|
||||
} else if (indexDebugThreshold >= 0 && tookInNanos > indexDebugThreshold) {
|
||||
indexLogger.debug("{}", new SlowLogParsedDocumentPrinter(index, doc, tookInNanos, reformat, maxSourceCharsToLog));
|
||||
} else if (indexTraceThreshold >= 0 && tookInNanos > indexTraceThreshold) {
|
||||
indexLogger.trace("{}", new SlowLogParsedDocumentPrinter(index, doc, tookInNanos, reformat, maxSourceCharsToLog));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -24,12 +24,13 @@ import org.elasticsearch.index.shard.ShardId;
|
|||
|
||||
import java.io.IOException;
|
||||
|
||||
/**
|
||||
* Deprecated as not used in 6.0, should be removed in 7.0
|
||||
* Still exists for bwc in serializing/deserializing from
|
||||
* 5.x nodes
|
||||
*/
|
||||
@Deprecated
|
||||
public class DeleteFailedEngineException extends EngineException {
|
||||
|
||||
public DeleteFailedEngineException(ShardId shardId, Engine.Delete delete, Throwable cause) {
|
||||
super(shardId, "Delete failed for [" + delete.uid().text() + "]", cause);
|
||||
}
|
||||
|
||||
public DeleteFailedEngineException(StreamInput in) throws IOException{
|
||||
super(in);
|
||||
}
|
||||
|
|
|
@ -48,4 +48,12 @@ class DeleteVersionValue extends VersionValue {
|
|||
public long ramBytesUsed() {
|
||||
return BASE_RAM_BYTES_USED;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "DeleteVersionValue{" +
|
||||
"version=" + version() + ", " +
|
||||
"time=" + time +
|
||||
'}';
|
||||
}
|
||||
}
|
||||
|
|
|
@ -77,6 +77,7 @@ import java.util.Base64;
|
|||
import java.util.Comparator;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Locale;
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
@ -277,9 +278,135 @@ public abstract class Engine implements Closeable {
|
|||
}
|
||||
}
|
||||
|
||||
public abstract void index(Index operation) throws EngineException;
|
||||
/**
|
||||
* Perform document index operation on the engine
|
||||
* @param index operation to perform
|
||||
* @return {@link IndexResult} containing updated translog location, version and
|
||||
* document specific failures
|
||||
*
|
||||
* Note: engine level failures (i.e. persistent engine failures) are thrown
|
||||
*/
|
||||
public abstract IndexResult index(final Index index);
|
||||
|
||||
public abstract void delete(Delete delete) throws EngineException;
|
||||
/**
|
||||
* Perform document delete operation on the engine
|
||||
* @param delete operation to perform
|
||||
* @return {@link DeleteResult} containing updated translog location, version and
|
||||
* document specific failures
|
||||
*
|
||||
* Note: engine level failures (i.e. persistent engine failures) are thrown
|
||||
*/
|
||||
public abstract DeleteResult delete(final Delete delete);
|
||||
|
||||
/**
|
||||
* Base class for index and delete operation results
|
||||
* Holds result meta data (e.g. translog location, updated version)
|
||||
* for an executed write {@link Operation}
|
||||
**/
|
||||
public abstract static class Result {
|
||||
private final Operation.TYPE operationType;
|
||||
private final long version;
|
||||
private final Exception failure;
|
||||
private final SetOnce<Boolean> freeze = new SetOnce<>();
|
||||
private Translog.Location translogLocation;
|
||||
private long took;
|
||||
|
||||
protected Result(Operation.TYPE operationType, Exception failure, long version) {
|
||||
this.operationType = operationType;
|
||||
this.failure = failure;
|
||||
this.version = version;
|
||||
}
|
||||
|
||||
protected Result(Operation.TYPE operationType, long version) {
|
||||
this(operationType, null, version);
|
||||
}
|
||||
|
||||
/** whether the operation had failure */
|
||||
public boolean hasFailure() {
|
||||
return failure != null;
|
||||
}
|
||||
|
||||
/** get the updated document version */
|
||||
public long getVersion() {
|
||||
return version;
|
||||
}
|
||||
|
||||
/** get the translog location after executing the operation */
|
||||
public Translog.Location getTranslogLocation() {
|
||||
return translogLocation;
|
||||
}
|
||||
|
||||
/** get document failure while executing the operation {@code null} in case of no failure */
|
||||
public Exception getFailure() {
|
||||
return failure;
|
||||
}
|
||||
|
||||
/** get total time in nanoseconds */
|
||||
public long getTook() {
|
||||
return took;
|
||||
}
|
||||
|
||||
public Operation.TYPE getOperationType() {
|
||||
return operationType;
|
||||
}
|
||||
|
||||
void setTranslogLocation(Translog.Location translogLocation) {
|
||||
if (freeze.get() == null) {
|
||||
assert failure == null : "failure has to be null to set translog location";
|
||||
this.translogLocation = translogLocation;
|
||||
} else {
|
||||
throw new IllegalStateException("result is already frozen");
|
||||
}
|
||||
}
|
||||
|
||||
void setTook(long took) {
|
||||
if (freeze.get() == null) {
|
||||
this.took = took;
|
||||
} else {
|
||||
throw new IllegalStateException("result is already frozen");
|
||||
}
|
||||
}
|
||||
|
||||
void freeze() {
|
||||
freeze.set(true);
|
||||
}
|
||||
}
|
||||
|
||||
public static class IndexResult extends Result {
|
||||
private final boolean created;
|
||||
|
||||
public IndexResult(long version, boolean created) {
|
||||
super(Operation.TYPE.INDEX, version);
|
||||
this.created = created;
|
||||
}
|
||||
|
||||
public IndexResult(Exception failure, long version) {
|
||||
super(Operation.TYPE.INDEX, failure, version);
|
||||
this.created = false;
|
||||
}
|
||||
|
||||
public boolean isCreated() {
|
||||
return created;
|
||||
}
|
||||
}
|
||||
|
||||
public static class DeleteResult extends Result {
|
||||
private final boolean found;
|
||||
|
||||
public DeleteResult(long version, boolean found) {
|
||||
super(Operation.TYPE.DELETE, version);
|
||||
this.found = found;
|
||||
}
|
||||
|
||||
public DeleteResult(Exception failure, long version) {
|
||||
super(Operation.TYPE.DELETE, failure, version);
|
||||
this.found = false;
|
||||
}
|
||||
|
||||
public boolean isFound() {
|
||||
return found;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Attempts to do a special commit where the given syncID is put into the commit data. The attempt
|
||||
|
@ -767,13 +894,27 @@ public abstract class Engine implements Closeable {
|
|||
}
|
||||
|
||||
public abstract static class Operation {
|
||||
|
||||
/** type of operation (index, delete), subclasses use static types */
|
||||
public enum TYPE {
|
||||
INDEX, DELETE;
|
||||
|
||||
private final String lowercase;
|
||||
|
||||
TYPE() {
|
||||
this.lowercase = this.toString().toLowerCase(Locale.ROOT);
|
||||
}
|
||||
|
||||
public String getLowercase() {
|
||||
return lowercase;
|
||||
}
|
||||
}
|
||||
|
||||
private final Term uid;
|
||||
private long version;
|
||||
private final long version;
|
||||
private final VersionType versionType;
|
||||
private final Origin origin;
|
||||
private Translog.Location location;
|
||||
private final long startTime;
|
||||
private long endTime;
|
||||
|
||||
public Operation(Term uid, long version, VersionType versionType, Origin origin, long startTime) {
|
||||
this.uid = uid;
|
||||
|
@ -806,27 +947,7 @@ public abstract class Engine implements Closeable {
|
|||
return this.version;
|
||||
}
|
||||
|
||||
public void updateVersion(long version) {
|
||||
this.version = version;
|
||||
}
|
||||
|
||||
public void setTranslogLocation(Translog.Location location) {
|
||||
this.location = location;
|
||||
}
|
||||
|
||||
public Translog.Location getTranslogLocation() {
|
||||
return this.location;
|
||||
}
|
||||
|
||||
public int sizeInBytes() {
|
||||
if (location != null) {
|
||||
return location.size;
|
||||
} else {
|
||||
return estimatedSizeInBytes();
|
||||
}
|
||||
}
|
||||
|
||||
protected abstract int estimatedSizeInBytes();
|
||||
public abstract int estimatedSizeInBytes();
|
||||
|
||||
public VersionType versionType() {
|
||||
return this.versionType;
|
||||
|
@ -839,20 +960,11 @@ public abstract class Engine implements Closeable {
|
|||
return this.startTime;
|
||||
}
|
||||
|
||||
public void endTime(long endTime) {
|
||||
this.endTime = endTime;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns operation end time in nanoseconds.
|
||||
*/
|
||||
public long endTime() {
|
||||
return this.endTime;
|
||||
}
|
||||
|
||||
abstract String type();
|
||||
public abstract String type();
|
||||
|
||||
abstract String id();
|
||||
|
||||
abstract TYPE operationType();
|
||||
}
|
||||
|
||||
public static class Index extends Operation {
|
||||
|
@ -860,7 +972,6 @@ public abstract class Engine implements Closeable {
|
|||
private final ParsedDocument doc;
|
||||
private final long autoGeneratedIdTimestamp;
|
||||
private final boolean isRetry;
|
||||
private boolean created;
|
||||
|
||||
public Index(Term uid, ParsedDocument doc, long version, VersionType versionType, Origin origin, long startTime,
|
||||
long autoGeneratedIdTimestamp, boolean isRetry) {
|
||||
|
@ -892,6 +1003,11 @@ public abstract class Engine implements Closeable {
|
|||
return this.doc.id();
|
||||
}
|
||||
|
||||
@Override
|
||||
TYPE operationType() {
|
||||
return TYPE.INDEX;
|
||||
}
|
||||
|
||||
public String routing() {
|
||||
return this.doc.routing();
|
||||
}
|
||||
|
@ -904,12 +1020,6 @@ public abstract class Engine implements Closeable {
|
|||
return this.doc.ttl();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void updateVersion(long version) {
|
||||
super.updateVersion(version);
|
||||
this.doc.version().setLongValue(version);
|
||||
}
|
||||
|
||||
public String parent() {
|
||||
return this.doc.parent();
|
||||
}
|
||||
|
@ -922,16 +1032,8 @@ public abstract class Engine implements Closeable {
|
|||
return this.doc.source();
|
||||
}
|
||||
|
||||
public boolean isCreated() {
|
||||
return created;
|
||||
}
|
||||
|
||||
public void setCreated(boolean created) {
|
||||
this.created = created;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected int estimatedSizeInBytes() {
|
||||
public int estimatedSizeInBytes() {
|
||||
return (id().length() + type().length()) * 2 + source().length() + 12;
|
||||
}
|
||||
|
||||
|
@ -958,21 +1060,19 @@ public abstract class Engine implements Closeable {
|
|||
|
||||
private final String type;
|
||||
private final String id;
|
||||
private boolean found;
|
||||
|
||||
public Delete(String type, String id, Term uid, long version, VersionType versionType, Origin origin, long startTime, boolean found) {
|
||||
public Delete(String type, String id, Term uid, long version, VersionType versionType, Origin origin, long startTime) {
|
||||
super(uid, version, versionType, origin, startTime);
|
||||
this.type = type;
|
||||
this.id = id;
|
||||
this.found = found;
|
||||
}
|
||||
|
||||
public Delete(String type, String id, Term uid) {
|
||||
this(type, id, uid, Versions.MATCH_ANY, VersionType.INTERNAL, Origin.PRIMARY, System.nanoTime(), false);
|
||||
this(type, id, uid, Versions.MATCH_ANY, VersionType.INTERNAL, Origin.PRIMARY, System.nanoTime());
|
||||
}
|
||||
|
||||
public Delete(Delete template, VersionType versionType) {
|
||||
this(template.type(), template.id(), template.uid(), template.version(), versionType, template.origin(), template.startTime(), template.found());
|
||||
this(template.type(), template.id(), template.uid(), template.version(), versionType, template.origin(), template.startTime());
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -985,20 +1085,15 @@ public abstract class Engine implements Closeable {
|
|||
return this.id;
|
||||
}
|
||||
|
||||
public void updateVersion(long version, boolean found) {
|
||||
updateVersion(version);
|
||||
this.found = found;
|
||||
}
|
||||
|
||||
public boolean found() {
|
||||
return this.found;
|
||||
@Override
|
||||
TYPE operationType() {
|
||||
return TYPE.DELETE;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected int estimatedSizeInBytes() {
|
||||
public int estimatedSizeInBytes() {
|
||||
return (uid().field().length() + uid().text().length()) * 2 + 20;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
public static class Get {
|
||||
|
|
|
@ -26,20 +26,18 @@ import org.elasticsearch.index.shard.ShardId;
|
|||
import java.io.IOException;
|
||||
import java.util.Objects;
|
||||
|
||||
/**
|
||||
* Deprecated as not used in 6.0, should be removed in 7.0
|
||||
* Still exists for bwc in serializing/deserializing from
|
||||
* 5.x nodes
|
||||
*/
|
||||
@Deprecated
|
||||
public class IndexFailedEngineException extends EngineException {
|
||||
|
||||
private final String type;
|
||||
|
||||
private final String id;
|
||||
|
||||
public IndexFailedEngineException(ShardId shardId, String type, String id, Throwable cause) {
|
||||
super(shardId, "Index failed for [" + type + "#" + id + "]", cause);
|
||||
Objects.requireNonNull(type, "type must not be null");
|
||||
Objects.requireNonNull(id, "id must not be null");
|
||||
this.type = type;
|
||||
this.id = id;
|
||||
}
|
||||
|
||||
public IndexFailedEngineException(StreamInput in) throws IOException{
|
||||
super(in);
|
||||
type = in.readString();
|
||||
|
|
|
@ -35,6 +35,7 @@ import org.apache.lucene.index.Term;
|
|||
import org.apache.lucene.search.IndexSearcher;
|
||||
import org.apache.lucene.search.SearcherFactory;
|
||||
import org.apache.lucene.search.SearcherManager;
|
||||
import org.apache.lucene.search.TermQuery;
|
||||
import org.apache.lucene.store.AlreadyClosedException;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.store.LockObtainFailedException;
|
||||
|
@ -57,6 +58,7 @@ import org.elasticsearch.common.util.concurrent.KeyedLock;
|
|||
import org.elasticsearch.common.util.concurrent.ReleasableLock;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
import org.elasticsearch.index.VersionType;
|
||||
import org.elasticsearch.index.mapper.ParseContext;
|
||||
import org.elasticsearch.index.mapper.Uid;
|
||||
import org.elasticsearch.index.merge.MergeStats;
|
||||
import org.elasticsearch.index.merge.OnGoingMerge;
|
||||
|
@ -386,46 +388,61 @@ public class InternalEngine extends Engine {
|
|||
return currentVersion;
|
||||
}
|
||||
|
||||
private static VersionValueSupplier NEW_VERSION_VALUE = (u, t) -> new VersionValue(u);
|
||||
|
||||
@FunctionalInterface
|
||||
private interface VersionValueSupplier {
|
||||
VersionValue apply(long updatedVersion, long time);
|
||||
}
|
||||
|
||||
private <T extends Engine.Operation> void maybeAddToTranslog(
|
||||
final T op,
|
||||
final long updatedVersion,
|
||||
final Function<T, Translog.Operation> toTranslogOp,
|
||||
final VersionValueSupplier toVersionValue) throws IOException {
|
||||
if (op.origin() != Operation.Origin.LOCAL_TRANSLOG_RECOVERY) {
|
||||
final Translog.Location translogLocation = translog.add(toTranslogOp.apply(op));
|
||||
op.setTranslogLocation(translogLocation);
|
||||
}
|
||||
versionMap.putUnderLock(op.uid().bytes(), toVersionValue.apply(updatedVersion, engineConfig.getThreadPool().estimatedTimeInMillis()));
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
public void index(Index index) {
|
||||
public IndexResult index(Index index) {
|
||||
IndexResult result;
|
||||
try (ReleasableLock lock = readLock.acquire()) {
|
||||
ensureOpen();
|
||||
if (index.origin().isRecovery()) {
|
||||
// Don't throttle recovery operations
|
||||
innerIndex(index);
|
||||
result = innerIndex(index);
|
||||
} else {
|
||||
try (Releasable r = throttle.acquireThrottle()) {
|
||||
innerIndex(index);
|
||||
result = innerIndex(index);
|
||||
}
|
||||
}
|
||||
} catch (IllegalStateException | IOException e) {
|
||||
try {
|
||||
maybeFailEngine("index", e);
|
||||
} catch (Exception inner) {
|
||||
e.addSuppressed(inner);
|
||||
}
|
||||
throw new IndexFailedEngineException(shardId, index.type(), index.id(), e);
|
||||
} catch (Exception e) {
|
||||
result = new IndexResult(checkIfDocumentFailureOrThrow(index, e), index.version());
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
/**
|
||||
* Inspects exception thrown when executing index or delete operations
|
||||
*
|
||||
* @return failure if the failure is a document specific failure (e.g. analysis chain failure)
|
||||
* or throws Exception if the failure caused the engine to fail (e.g. out of disk, lucene tragic event)
|
||||
*
|
||||
* Note: pkg-private for testing
|
||||
*/
|
||||
final Exception checkIfDocumentFailureOrThrow(final Operation operation, final Exception failure) {
|
||||
boolean isDocumentFailure;
|
||||
try {
|
||||
// When indexing a document into Lucene, Lucene distinguishes between environment related errors
|
||||
// (like out of disk space) and document specific errors (like analysis chain problems) by setting
|
||||
// the IndexWriter.getTragicEvent() value for the former. maybeFailEngine checks for these kind of
|
||||
// errors and returns true if that is the case. We use that to indicate a document level failure
|
||||
// and set the error in operation.setFailure. In case of environment related errors, the failure
|
||||
// is bubbled up
|
||||
isDocumentFailure = maybeFailEngine(operation.operationType().getLowercase(), failure) == false;
|
||||
} catch (Exception inner) {
|
||||
// we failed checking whether the failure can fail the engine, treat it as a persistent engine failure
|
||||
isDocumentFailure = false;
|
||||
failure.addSuppressed(inner);
|
||||
}
|
||||
if (isDocumentFailure) {
|
||||
return failure;
|
||||
} else {
|
||||
// throw original exception in case the exception caused the engine to fail
|
||||
rethrow(failure);
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
// hack to rethrow original exception in case of engine level failures during index/delete operation
|
||||
@SuppressWarnings("unchecked")
|
||||
private static <T extends Throwable> void rethrow(Throwable t) throws T {
|
||||
throw (T) t;
|
||||
}
|
||||
|
||||
private boolean canOptimizeAddDocument(Index index) {
|
||||
|
@ -452,7 +469,9 @@ public class InternalEngine extends Engine {
|
|||
return false;
|
||||
}
|
||||
|
||||
private void innerIndex(Index index) throws IOException {
|
||||
private IndexResult innerIndex(Index index) throws IOException {
|
||||
final Translog.Location location;
|
||||
final long updatedVersion;
|
||||
try (Releasable ignored = acquireLock(index.uid())) {
|
||||
lastWriteNanos = index.startTime();
|
||||
/* if we have an autoGeneratedID that comes into the engine we can potentially optimize
|
||||
|
@ -484,7 +503,8 @@ public class InternalEngine extends Engine {
|
|||
// if anything is fishy here ie. there is a retry we go and force updateDocument below so we are updating the document in the
|
||||
// lucene index without checking the version map but we still do the version check
|
||||
final boolean forceUpdateDocument;
|
||||
if (canOptimizeAddDocument(index)) {
|
||||
final boolean canOptimizeAddDocument = canOptimizeAddDocument(index);
|
||||
if (canOptimizeAddDocument) {
|
||||
long deOptimizeTimestamp = maxUnsafeAutoIdTimestamp.get();
|
||||
if (index.isRetry()) {
|
||||
forceUpdateDocument = true;
|
||||
|
@ -516,60 +536,81 @@ public class InternalEngine extends Engine {
|
|||
}
|
||||
}
|
||||
final long expectedVersion = index.version();
|
||||
final IndexResult indexResult;
|
||||
if (checkVersionConflict(index, currentVersion, expectedVersion, deleted)) {
|
||||
index.setCreated(false);
|
||||
return;
|
||||
}
|
||||
final long updatedVersion = updateVersion(index, currentVersion, expectedVersion);
|
||||
index.setCreated(deleted);
|
||||
if (currentVersion == Versions.NOT_FOUND && forceUpdateDocument == false) {
|
||||
// document does not exists, we can optimize for create
|
||||
index(index, indexWriter);
|
||||
// skip index operation because of version conflict on recovery
|
||||
indexResult = new IndexResult(expectedVersion, false);
|
||||
} else {
|
||||
update(index, indexWriter);
|
||||
updatedVersion = index.versionType().updateVersion(currentVersion, expectedVersion);
|
||||
index.parsedDoc().version().setLongValue(updatedVersion);
|
||||
if (currentVersion == Versions.NOT_FOUND && forceUpdateDocument == false) {
|
||||
// document does not exists, we can optimize for create, but double check if assertions are running
|
||||
assert assertDocDoesNotExist(index, canOptimizeAddDocument == false);
|
||||
index(index.docs(), indexWriter);
|
||||
} else {
|
||||
update(index.uid(), index.docs(), indexWriter);
|
||||
}
|
||||
indexResult = new IndexResult(updatedVersion, deleted);
|
||||
location = index.origin() != Operation.Origin.LOCAL_TRANSLOG_RECOVERY
|
||||
? translog.add(new Translog.Index(index, indexResult))
|
||||
: null;
|
||||
versionMap.putUnderLock(index.uid().bytes(), new VersionValue(updatedVersion));
|
||||
indexResult.setTranslogLocation(location);
|
||||
}
|
||||
maybeAddToTranslog(index, updatedVersion, Translog.Index::new, NEW_VERSION_VALUE);
|
||||
indexResult.setTook(System.nanoTime() - index.startTime());
|
||||
indexResult.freeze();
|
||||
return indexResult;
|
||||
}
|
||||
}
|
||||
|
||||
private long updateVersion(Engine.Operation op, long currentVersion, long expectedVersion) {
|
||||
final long updatedVersion = op.versionType().updateVersion(currentVersion, expectedVersion);
|
||||
op.updateVersion(updatedVersion);
|
||||
return updatedVersion;
|
||||
}
|
||||
|
||||
private static void index(final Index index, final IndexWriter indexWriter) throws IOException {
|
||||
if (index.docs().size() > 1) {
|
||||
indexWriter.addDocuments(index.docs());
|
||||
private static void index(final List<ParseContext.Document> docs, final IndexWriter indexWriter) throws IOException {
|
||||
if (docs.size() > 1) {
|
||||
indexWriter.addDocuments(docs);
|
||||
} else {
|
||||
indexWriter.addDocument(index.docs().get(0));
|
||||
indexWriter.addDocument(docs.get(0));
|
||||
}
|
||||
}
|
||||
|
||||
private static void update(final Index index, final IndexWriter indexWriter) throws IOException {
|
||||
if (index.docs().size() > 1) {
|
||||
indexWriter.updateDocuments(index.uid(), index.docs());
|
||||
/**
|
||||
* Asserts that the doc in the index operation really doesn't exist
|
||||
*/
|
||||
private boolean assertDocDoesNotExist(final Index index, final boolean allowDeleted) throws IOException {
|
||||
final VersionValue versionValue = versionMap.getUnderLock(index.uid());
|
||||
if (versionValue != null) {
|
||||
if (versionValue.delete() == false || allowDeleted == false) {
|
||||
throw new AssertionError("doc [" + index.type() + "][" + index.id() + "] exists in version map (version " + versionValue + ")");
|
||||
}
|
||||
} else {
|
||||
indexWriter.updateDocument(index.uid(), index.docs().get(0));
|
||||
try (final Searcher searcher = acquireSearcher("assert doc doesn't exist")) {
|
||||
final long docsWithId = searcher.searcher().count(new TermQuery(index.uid()));
|
||||
if (docsWithId > 0) {
|
||||
throw new AssertionError("doc [" + index.type() + "][" + index.id() + "] exists [" + docsWithId + "] times in index");
|
||||
}
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
private static void update(final Term uid, final List<ParseContext.Document> docs, final IndexWriter indexWriter) throws IOException {
|
||||
if (docs.size() > 1) {
|
||||
indexWriter.updateDocuments(uid, docs);
|
||||
} else {
|
||||
indexWriter.updateDocument(uid, docs.get(0));
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void delete(Delete delete) throws EngineException {
|
||||
public DeleteResult delete(Delete delete) {
|
||||
DeleteResult result;
|
||||
try (ReleasableLock lock = readLock.acquire()) {
|
||||
ensureOpen();
|
||||
// NOTE: we don't throttle this when merges fall behind because delete-by-id does not create new segments:
|
||||
innerDelete(delete);
|
||||
} catch (IllegalStateException | IOException e) {
|
||||
try {
|
||||
maybeFailEngine("delete", e);
|
||||
} catch (Exception inner) {
|
||||
e.addSuppressed(inner);
|
||||
}
|
||||
throw new DeleteFailedEngineException(shardId, delete, e);
|
||||
result = innerDelete(delete);
|
||||
} catch (Exception e) {
|
||||
result = new DeleteResult(checkIfDocumentFailureOrThrow(delete, e), delete.version());
|
||||
}
|
||||
|
||||
maybePruneDeletedTombstones();
|
||||
return result;
|
||||
}
|
||||
|
||||
private void maybePruneDeletedTombstones() {
|
||||
|
@ -580,7 +621,10 @@ public class InternalEngine extends Engine {
|
|||
}
|
||||
}
|
||||
|
||||
private void innerDelete(Delete delete) throws IOException {
|
||||
private DeleteResult innerDelete(Delete delete) throws IOException {
|
||||
final Translog.Location location;
|
||||
final long updatedVersion;
|
||||
final boolean found;
|
||||
try (Releasable ignored = acquireLock(delete.uid())) {
|
||||
lastWriteNanos = delete.startTime();
|
||||
final long currentVersion;
|
||||
|
@ -596,19 +640,28 @@ public class InternalEngine extends Engine {
|
|||
}
|
||||
|
||||
final long expectedVersion = delete.version();
|
||||
if (checkVersionConflict(delete, currentVersion, expectedVersion, deleted)) return;
|
||||
|
||||
final long updatedVersion = updateVersion(delete, currentVersion, expectedVersion);
|
||||
|
||||
final boolean found = deleteIfFound(delete, currentVersion, deleted, versionValue);
|
||||
|
||||
delete.updateVersion(updatedVersion, found);
|
||||
|
||||
maybeAddToTranslog(delete, updatedVersion, Translog.Delete::new, DeleteVersionValue::new);
|
||||
final DeleteResult deleteResult;
|
||||
if (checkVersionConflict(delete, currentVersion, expectedVersion, deleted)) {
|
||||
// skip executing delete because of version conflict on recovery
|
||||
deleteResult = new DeleteResult(expectedVersion, true);
|
||||
} else {
|
||||
updatedVersion = delete.versionType().updateVersion(currentVersion, expectedVersion);
|
||||
found = deleteIfFound(delete.uid(), currentVersion, deleted, versionValue);
|
||||
deleteResult = new DeleteResult(updatedVersion, found);
|
||||
location = delete.origin() != Operation.Origin.LOCAL_TRANSLOG_RECOVERY
|
||||
? translog.add(new Translog.Delete(delete, deleteResult))
|
||||
: null;
|
||||
versionMap.putUnderLock(delete.uid().bytes(),
|
||||
new DeleteVersionValue(updatedVersion, engineConfig.getThreadPool().estimatedTimeInMillis()));
|
||||
deleteResult.setTranslogLocation(location);
|
||||
}
|
||||
deleteResult.setTook(System.nanoTime() - delete.startTime());
|
||||
deleteResult.freeze();
|
||||
return deleteResult;
|
||||
}
|
||||
}
|
||||
|
||||
private boolean deleteIfFound(Delete delete, long currentVersion, boolean deleted, VersionValue versionValue) throws IOException {
|
||||
private boolean deleteIfFound(Term uid, long currentVersion, boolean deleted, VersionValue versionValue) throws IOException {
|
||||
final boolean found;
|
||||
if (currentVersion == Versions.NOT_FOUND) {
|
||||
// doc does not exist and no prior deletes
|
||||
|
@ -618,7 +671,7 @@ public class InternalEngine extends Engine {
|
|||
found = false;
|
||||
} else {
|
||||
// we deleted a currently existing document
|
||||
indexWriter.deleteDocuments(delete.uid());
|
||||
indexWriter.deleteDocuments(uid);
|
||||
found = true;
|
||||
}
|
||||
return found;
|
||||
|
@ -1086,7 +1139,8 @@ public class InternalEngine extends Engine {
|
|||
}
|
||||
}
|
||||
|
||||
private IndexWriter createWriter(boolean create) throws IOException {
|
||||
// pkg-private for testing
|
||||
IndexWriter createWriter(boolean create) throws IOException {
|
||||
try {
|
||||
final IndexWriterConfig iwc = new IndexWriterConfig(engineConfig.getAnalyzer());
|
||||
iwc.setCommitOnClose(false); // we by default don't commit on close
|
||||
|
|
|
@ -106,12 +106,12 @@ public class ShadowEngine extends Engine {
|
|||
|
||||
|
||||
@Override
|
||||
public void index(Index index) throws EngineException {
|
||||
public IndexResult index(Index index) {
|
||||
throw new UnsupportedOperationException(shardId + " index operation not allowed on shadow engine");
|
||||
}
|
||||
|
||||
@Override
|
||||
public void delete(Delete delete) throws EngineException {
|
||||
public DeleteResult delete(Delete delete) {
|
||||
throw new UnsupportedOperationException(shardId + " delete operation not allowed on shadow engine");
|
||||
}
|
||||
|
||||
|
|
|
@ -57,4 +57,11 @@ class VersionValue implements Accountable {
|
|||
public Collection<Accountable> getChildResources() {
|
||||
return Collections.emptyList();
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "VersionValue{" +
|
||||
"version=" + version +
|
||||
'}';
|
||||
}
|
||||
}
|
||||
|
|
|
@ -69,7 +69,7 @@ public class DocumentMapperParser {
|
|||
}
|
||||
|
||||
public Mapper.TypeParser.ParserContext parserContext(String type) {
|
||||
return new Mapper.TypeParser.ParserContext(type, indexAnalyzers, similarityService::getSimilarity, mapperService, typeParsers::get, indexVersionCreated, parseFieldMatcher, queryShardContextSupplier.get());
|
||||
return new Mapper.TypeParser.ParserContext(type, indexAnalyzers, similarityService::getSimilarity, mapperService, typeParsers::get, indexVersionCreated, parseFieldMatcher, queryShardContextSupplier);
|
||||
}
|
||||
|
||||
public DocumentMapper parse(@Nullable String type, CompressedXContent source) throws MapperParsingException {
|
||||
|
|
|
@ -20,7 +20,6 @@
|
|||
package org.elasticsearch.index.mapper;
|
||||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.ParseFieldMatcher;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
|
@ -31,6 +30,7 @@ import org.elasticsearch.index.similarity.SimilarityProvider;
|
|||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
import java.util.function.Function;
|
||||
import java.util.function.Supplier;
|
||||
|
||||
public abstract class Mapper implements ToXContent, Iterable<Mapper> {
|
||||
|
||||
|
@ -93,11 +93,13 @@ public abstract class Mapper implements ToXContent, Iterable<Mapper> {
|
|||
|
||||
private final ParseFieldMatcher parseFieldMatcher;
|
||||
|
||||
private final QueryShardContext queryShardContext;
|
||||
private final Supplier<QueryShardContext> queryShardContextSupplier;
|
||||
private QueryShardContext queryShardContext;
|
||||
|
||||
public ParserContext(String type, IndexAnalyzers indexAnalyzers, Function<String, SimilarityProvider> similarityLookupService,
|
||||
MapperService mapperService, Function<String, TypeParser> typeParsers,
|
||||
Version indexVersionCreated, ParseFieldMatcher parseFieldMatcher, QueryShardContext queryShardContext) {
|
||||
Version indexVersionCreated, ParseFieldMatcher parseFieldMatcher,
|
||||
Supplier<QueryShardContext> queryShardContextSupplier) {
|
||||
this.type = type;
|
||||
this.indexAnalyzers = indexAnalyzers;
|
||||
this.similarityLookupService = similarityLookupService;
|
||||
|
@ -105,7 +107,7 @@ public abstract class Mapper implements ToXContent, Iterable<Mapper> {
|
|||
this.typeParsers = typeParsers;
|
||||
this.indexVersionCreated = indexVersionCreated;
|
||||
this.parseFieldMatcher = parseFieldMatcher;
|
||||
this.queryShardContext = queryShardContext;
|
||||
this.queryShardContextSupplier = queryShardContextSupplier;
|
||||
}
|
||||
|
||||
public String type() {
|
||||
|
@ -137,6 +139,10 @@ public abstract class Mapper implements ToXContent, Iterable<Mapper> {
|
|||
}
|
||||
|
||||
public QueryShardContext queryShardContext() {
|
||||
// No need for synchronization, this class must be used in a single thread
|
||||
if (queryShardContext == null) {
|
||||
queryShardContext = queryShardContextSupplier.get();
|
||||
}
|
||||
return queryShardContext;
|
||||
}
|
||||
|
||||
|
@ -155,7 +161,7 @@ public abstract class Mapper implements ToXContent, Iterable<Mapper> {
|
|||
|
||||
static class MultiFieldParserContext extends ParserContext {
|
||||
MultiFieldParserContext(ParserContext in) {
|
||||
super(in.type(), in.indexAnalyzers, in.similarityLookupService(), in.mapperService(), in.typeParsers(), in.indexVersionCreated(), in.parseFieldMatcher(), in.queryShardContext());
|
||||
super(in.type(), in.indexAnalyzers, in.similarityLookupService(), in.mapperService(), in.typeParsers(), in.indexVersionCreated(), in.parseFieldMatcher(), in::queryShardContext);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -54,7 +54,6 @@ import java.util.List;
|
|||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.function.Function;
|
||||
import java.util.function.LongSupplier;
|
||||
import java.util.function.Supplier;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
|
|
|
@ -28,6 +28,8 @@ import org.elasticsearch.common.ParsingException;
|
|||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.logging.DeprecationLogger;
|
||||
import org.elasticsearch.common.logging.Loggers;
|
||||
import org.elasticsearch.common.lucene.BytesRefs;
|
||||
import org.elasticsearch.common.unit.Fuzziness;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
|
@ -49,6 +51,8 @@ import java.util.Optional;
|
|||
public class FuzzyQueryBuilder extends AbstractQueryBuilder<FuzzyQueryBuilder> implements MultiTermQueryBuilder {
|
||||
public static final String NAME = "fuzzy";
|
||||
|
||||
private static final DeprecationLogger DEPRECATION_LOGGER = new DeprecationLogger(Loggers.getLogger(FuzzyQueryBuilder.class));
|
||||
|
||||
/** Default maximum edit distance. Defaults to AUTO. */
|
||||
public static final Fuzziness DEFAULT_FUZZINESS = Fuzziness.AUTO;
|
||||
|
||||
|
@ -151,6 +155,7 @@ public class FuzzyQueryBuilder extends AbstractQueryBuilder<FuzzyQueryBuilder> i
|
|||
* @param value The value of the term
|
||||
*/
|
||||
public FuzzyQueryBuilder(String fieldName, Object value) {
|
||||
DEPRECATION_LOGGER.deprecated("{} query is deprecated. Instead use the [match] query with fuzziness parameter", NAME);
|
||||
if (Strings.isEmpty(fieldName)) {
|
||||
throw new IllegalArgumentException("field name cannot be null or empty");
|
||||
}
|
||||
|
|
|
@ -67,7 +67,7 @@ public class QueryRewriteContext implements ParseFieldMatcherSupplier {
|
|||
/**
|
||||
* Returns a clients to fetch resources from local or remove nodes.
|
||||
*/
|
||||
public final Client getClient() {
|
||||
public Client getClient() {
|
||||
return client;
|
||||
}
|
||||
|
||||
|
@ -86,7 +86,9 @@ public class QueryRewriteContext implements ParseFieldMatcherSupplier {
|
|||
return mapperService;
|
||||
}
|
||||
|
||||
/** Return the current {@link IndexReader}, or {@code null} if we are on the coordinating node. */
|
||||
/** Return the current {@link IndexReader}, or {@code null} if no index reader is available, for
|
||||
* instance if we are on the coordinating node or if this rewrite context is used to index
|
||||
* queries (percolation). */
|
||||
public IndexReader getIndexReader() {
|
||||
return reader;
|
||||
}
|
||||
|
|
|
@ -421,4 +421,9 @@ public class QueryShardContext extends QueryRewriteContext {
|
|||
return super.nowInMillis();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Client getClient() {
|
||||
failIfFrozen(); // we somebody uses a terms filter with lookup for instance can't be cached...
|
||||
return super.getClient();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -26,6 +26,7 @@ import org.apache.lucene.search.BoostQuery;
|
|||
import org.apache.lucene.search.FuzzyQuery;
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.apache.lucene.util.automaton.Operations;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.common.ParseField;
|
||||
import org.elasticsearch.common.ParsingException;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
|
@ -59,9 +60,10 @@ import java.util.TreeMap;
|
|||
public class QueryStringQueryBuilder extends AbstractQueryBuilder<QueryStringQueryBuilder> {
|
||||
public static final String NAME = "query_string";
|
||||
|
||||
public static final Version V_5_1_0_UNRELEASED = Version.fromId(5010099);
|
||||
|
||||
public static final boolean DEFAULT_AUTO_GENERATE_PHRASE_QUERIES = false;
|
||||
public static final int DEFAULT_MAX_DETERMINED_STATES = Operations.DEFAULT_MAX_DETERMINIZED_STATES;
|
||||
public static final boolean DEFAULT_LOWERCASE_EXPANDED_TERMS = true;
|
||||
public static final boolean DEFAULT_ENABLE_POSITION_INCREMENTS = true;
|
||||
public static final boolean DEFAULT_ESCAPE = false;
|
||||
public static final boolean DEFAULT_USE_DIS_MAX = true;
|
||||
|
@ -71,7 +73,7 @@ public class QueryStringQueryBuilder extends AbstractQueryBuilder<QueryStringQue
|
|||
public static final float DEFAULT_TIE_BREAKER = 0.0f;
|
||||
public static final Fuzziness DEFAULT_FUZZINESS = Fuzziness.AUTO;
|
||||
public static final Operator DEFAULT_OPERATOR = Operator.OR;
|
||||
public static final Locale DEFAULT_LOCALE = Locale.ROOT;
|
||||
public static final boolean DEFAULT_SPLIT_ON_WHITESPACE = true;
|
||||
|
||||
private static final ParseField QUERY_FIELD = new ParseField("query");
|
||||
private static final ParseField FIELDS_FIELD = new ParseField("fields");
|
||||
|
@ -82,7 +84,8 @@ public class QueryStringQueryBuilder extends AbstractQueryBuilder<QueryStringQue
|
|||
private static final ParseField ALLOW_LEADING_WILDCARD_FIELD = new ParseField("allow_leading_wildcard");
|
||||
private static final ParseField AUTO_GENERATE_PHRASE_QUERIES_FIELD = new ParseField("auto_generate_phrase_queries");
|
||||
private static final ParseField MAX_DETERMINED_STATES_FIELD = new ParseField("max_determined_states");
|
||||
private static final ParseField LOWERCASE_EXPANDED_TERMS_FIELD = new ParseField("lowercase_expanded_terms");
|
||||
private static final ParseField LOWERCASE_EXPANDED_TERMS_FIELD = new ParseField("lowercase_expanded_terms")
|
||||
.withAllDeprecated("Decision is now made by the analyzer");
|
||||
private static final ParseField ENABLE_POSITION_INCREMENTS_FIELD = new ParseField("enable_position_increment");
|
||||
private static final ParseField ESCAPE_FIELD = new ParseField("escape");
|
||||
private static final ParseField USE_DIS_MAX_FIELD = new ParseField("use_dis_max");
|
||||
|
@ -96,9 +99,10 @@ public class QueryStringQueryBuilder extends AbstractQueryBuilder<QueryStringQue
|
|||
private static final ParseField MINIMUM_SHOULD_MATCH_FIELD = new ParseField("minimum_should_match");
|
||||
private static final ParseField QUOTE_FIELD_SUFFIX_FIELD = new ParseField("quote_field_suffix");
|
||||
private static final ParseField LENIENT_FIELD = new ParseField("lenient");
|
||||
private static final ParseField LOCALE_FIELD = new ParseField("locale");
|
||||
private static final ParseField LOCALE_FIELD = new ParseField("locale")
|
||||
.withAllDeprecated("Decision is now made by the analyzer");
|
||||
private static final ParseField TIME_ZONE_FIELD = new ParseField("time_zone");
|
||||
|
||||
private static final ParseField SPLIT_ON_WHITESPACE = new ParseField("split_on_whitespace");
|
||||
|
||||
private final String queryString;
|
||||
|
||||
|
@ -126,12 +130,8 @@ public class QueryStringQueryBuilder extends AbstractQueryBuilder<QueryStringQue
|
|||
|
||||
private Boolean analyzeWildcard;
|
||||
|
||||
private boolean lowercaseExpandedTerms = DEFAULT_LOWERCASE_EXPANDED_TERMS;
|
||||
|
||||
private boolean enablePositionIncrements = DEFAULT_ENABLE_POSITION_INCREMENTS;
|
||||
|
||||
private Locale locale = DEFAULT_LOCALE;
|
||||
|
||||
private Fuzziness fuzziness = DEFAULT_FUZZINESS;
|
||||
|
||||
private int fuzzyPrefixLength = DEFAULT_FUZZY_PREFIX_LENGTH;
|
||||
|
@ -159,6 +159,8 @@ public class QueryStringQueryBuilder extends AbstractQueryBuilder<QueryStringQue
|
|||
/** To limit effort spent determinizing regexp queries. */
|
||||
private int maxDeterminizedStates = DEFAULT_MAX_DETERMINED_STATES;
|
||||
|
||||
private boolean splitOnWhitespace = DEFAULT_SPLIT_ON_WHITESPACE;
|
||||
|
||||
public QueryStringQueryBuilder(String queryString) {
|
||||
if (queryString == null) {
|
||||
throw new IllegalArgumentException("query text missing");
|
||||
|
@ -184,9 +186,13 @@ public class QueryStringQueryBuilder extends AbstractQueryBuilder<QueryStringQue
|
|||
autoGeneratePhraseQueries = in.readBoolean();
|
||||
allowLeadingWildcard = in.readOptionalBoolean();
|
||||
analyzeWildcard = in.readOptionalBoolean();
|
||||
lowercaseExpandedTerms = in.readBoolean();
|
||||
if (in.getVersion().before(V_5_1_0_UNRELEASED)) {
|
||||
in.readBoolean(); // lowercase_expanded_terms
|
||||
}
|
||||
enablePositionIncrements = in.readBoolean();
|
||||
locale = Locale.forLanguageTag(in.readString());
|
||||
if (in.getVersion().before(V_5_1_0_UNRELEASED)) {
|
||||
in.readString(); // locale
|
||||
}
|
||||
fuzziness = new Fuzziness(in);
|
||||
fuzzyPrefixLength = in.readVInt();
|
||||
fuzzyMaxExpansions = in.readVInt();
|
||||
|
@ -200,6 +206,11 @@ public class QueryStringQueryBuilder extends AbstractQueryBuilder<QueryStringQue
|
|||
timeZone = in.readOptionalTimeZone();
|
||||
escape = in.readBoolean();
|
||||
maxDeterminizedStates = in.readVInt();
|
||||
if (in.getVersion().onOrAfter(V_5_1_0_UNRELEASED)) {
|
||||
splitOnWhitespace = in.readBoolean();
|
||||
} else {
|
||||
splitOnWhitespace = DEFAULT_SPLIT_ON_WHITESPACE;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -218,9 +229,13 @@ public class QueryStringQueryBuilder extends AbstractQueryBuilder<QueryStringQue
|
|||
out.writeBoolean(this.autoGeneratePhraseQueries);
|
||||
out.writeOptionalBoolean(this.allowLeadingWildcard);
|
||||
out.writeOptionalBoolean(this.analyzeWildcard);
|
||||
out.writeBoolean(this.lowercaseExpandedTerms);
|
||||
if (out.getVersion().before(V_5_1_0_UNRELEASED)) {
|
||||
out.writeBoolean(true); // lowercase_expanded_terms
|
||||
}
|
||||
out.writeBoolean(this.enablePositionIncrements);
|
||||
out.writeString(this.locale.toLanguageTag());
|
||||
if (out.getVersion().before(V_5_1_0_UNRELEASED)) {
|
||||
out.writeString(Locale.ROOT.toLanguageTag()); // locale
|
||||
}
|
||||
this.fuzziness.writeTo(out);
|
||||
out.writeVInt(this.fuzzyPrefixLength);
|
||||
out.writeVInt(this.fuzzyMaxExpansions);
|
||||
|
@ -234,6 +249,9 @@ public class QueryStringQueryBuilder extends AbstractQueryBuilder<QueryStringQue
|
|||
out.writeOptionalTimeZone(timeZone);
|
||||
out.writeBoolean(this.escape);
|
||||
out.writeVInt(this.maxDeterminizedStates);
|
||||
if (out.getVersion().onOrAfter(V_5_1_0_UNRELEASED)) {
|
||||
out.writeBoolean(this.splitOnWhitespace);
|
||||
}
|
||||
}
|
||||
|
||||
public String queryString() {
|
||||
|
@ -389,19 +407,6 @@ public class QueryStringQueryBuilder extends AbstractQueryBuilder<QueryStringQue
|
|||
return this.allowLeadingWildcard;
|
||||
}
|
||||
|
||||
/**
|
||||
* Whether terms of wildcard, prefix, fuzzy and range queries are to be automatically
|
||||
* lower-cased or not. Default is <tt>true</tt>.
|
||||
*/
|
||||
public QueryStringQueryBuilder lowercaseExpandedTerms(boolean lowercaseExpandedTerms) {
|
||||
this.lowercaseExpandedTerms = lowercaseExpandedTerms;
|
||||
return this;
|
||||
}
|
||||
|
||||
public boolean lowercaseExpandedTerms() {
|
||||
return this.lowercaseExpandedTerms;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set to <tt>true</tt> to enable position increments in result query. Defaults to
|
||||
* <tt>true</tt>.
|
||||
|
@ -473,6 +478,11 @@ public class QueryStringQueryBuilder extends AbstractQueryBuilder<QueryStringQue
|
|||
return phraseSlop;
|
||||
}
|
||||
|
||||
public QueryStringQueryBuilder rewrite(String rewrite) {
|
||||
this.rewrite = rewrite;
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set to <tt>true</tt> to enable analysis on wildcard and prefix queries.
|
||||
*/
|
||||
|
@ -485,11 +495,6 @@ public class QueryStringQueryBuilder extends AbstractQueryBuilder<QueryStringQue
|
|||
return this.analyzeWildcard;
|
||||
}
|
||||
|
||||
public QueryStringQueryBuilder rewrite(String rewrite) {
|
||||
this.rewrite = rewrite;
|
||||
return this;
|
||||
}
|
||||
|
||||
public String rewrite() {
|
||||
return this.rewrite;
|
||||
}
|
||||
|
@ -528,15 +533,6 @@ public class QueryStringQueryBuilder extends AbstractQueryBuilder<QueryStringQue
|
|||
return this.lenient;
|
||||
}
|
||||
|
||||
public QueryStringQueryBuilder locale(Locale locale) {
|
||||
this.locale = locale == null ? DEFAULT_LOCALE : locale;
|
||||
return this;
|
||||
}
|
||||
|
||||
public Locale locale() {
|
||||
return this.locale;
|
||||
}
|
||||
|
||||
/**
|
||||
* In case of date field, we can adjust the from/to fields using a timezone
|
||||
*/
|
||||
|
@ -570,6 +566,19 @@ public class QueryStringQueryBuilder extends AbstractQueryBuilder<QueryStringQue
|
|||
return this.escape;
|
||||
}
|
||||
|
||||
/**
|
||||
* Whether query text should be split on whitespace prior to analysis.
|
||||
* Default is <code>{@value #DEFAULT_SPLIT_ON_WHITESPACE}</code>.
|
||||
*/
|
||||
public QueryStringQueryBuilder splitOnWhitespace(boolean value) {
|
||||
this.splitOnWhitespace = value;
|
||||
return this;
|
||||
}
|
||||
|
||||
public boolean splitOnWhitespace() {
|
||||
return splitOnWhitespace;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.startObject(NAME);
|
||||
|
@ -597,7 +606,6 @@ public class QueryStringQueryBuilder extends AbstractQueryBuilder<QueryStringQue
|
|||
if (this.allowLeadingWildcard != null) {
|
||||
builder.field(ALLOW_LEADING_WILDCARD_FIELD.getPreferredName(), this.allowLeadingWildcard);
|
||||
}
|
||||
builder.field(LOWERCASE_EXPANDED_TERMS_FIELD.getPreferredName(), this.lowercaseExpandedTerms);
|
||||
builder.field(ENABLE_POSITION_INCREMENTS_FIELD.getPreferredName(), this.enablePositionIncrements);
|
||||
this.fuzziness.toXContent(builder, params);
|
||||
builder.field(FUZZY_PREFIX_LENGTH_FIELD.getPreferredName(), this.fuzzyPrefixLength);
|
||||
|
@ -621,11 +629,11 @@ public class QueryStringQueryBuilder extends AbstractQueryBuilder<QueryStringQue
|
|||
if (this.lenient != null) {
|
||||
builder.field(LENIENT_FIELD.getPreferredName(), this.lenient);
|
||||
}
|
||||
builder.field(LOCALE_FIELD.getPreferredName(), this.locale.toLanguageTag());
|
||||
if (this.timeZone != null) {
|
||||
builder.field(TIME_ZONE_FIELD.getPreferredName(), this.timeZone.getID());
|
||||
}
|
||||
builder.field(ESCAPE_FIELD.getPreferredName(), this.escape);
|
||||
builder.field(SPLIT_ON_WHITESPACE.getPreferredName(), this.splitOnWhitespace);
|
||||
printBoostAndQueryName(builder);
|
||||
builder.endObject();
|
||||
}
|
||||
|
@ -642,7 +650,6 @@ public class QueryStringQueryBuilder extends AbstractQueryBuilder<QueryStringQue
|
|||
float boost = AbstractQueryBuilder.DEFAULT_BOOST;
|
||||
boolean autoGeneratePhraseQueries = QueryStringQueryBuilder.DEFAULT_AUTO_GENERATE_PHRASE_QUERIES;
|
||||
int maxDeterminizedStates = QueryStringQueryBuilder.DEFAULT_MAX_DETERMINED_STATES;
|
||||
boolean lowercaseExpandedTerms = QueryStringQueryBuilder.DEFAULT_LOWERCASE_EXPANDED_TERMS;
|
||||
boolean enablePositionIncrements = QueryStringQueryBuilder.DEFAULT_ENABLE_POSITION_INCREMENTS;
|
||||
boolean escape = QueryStringQueryBuilder.DEFAULT_ESCAPE;
|
||||
boolean useDisMax = QueryStringQueryBuilder.DEFAULT_USE_DIS_MAX;
|
||||
|
@ -657,10 +664,10 @@ public class QueryStringQueryBuilder extends AbstractQueryBuilder<QueryStringQue
|
|||
Boolean lenient = null;
|
||||
Operator defaultOperator = QueryStringQueryBuilder.DEFAULT_OPERATOR;
|
||||
String timeZone = null;
|
||||
Locale locale = QueryStringQueryBuilder.DEFAULT_LOCALE;
|
||||
Fuzziness fuzziness = QueryStringQueryBuilder.DEFAULT_FUZZINESS;
|
||||
String fuzzyRewrite = null;
|
||||
String rewrite = null;
|
||||
boolean splitOnWhitespace = DEFAULT_SPLIT_ON_WHITESPACE;
|
||||
Map<String, Float> fieldsAndWeights = new HashMap<>();
|
||||
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
|
||||
if (token == XContentParser.Token.FIELD_NAME) {
|
||||
|
@ -707,7 +714,7 @@ public class QueryStringQueryBuilder extends AbstractQueryBuilder<QueryStringQue
|
|||
} else if (parseContext.getParseFieldMatcher().match(currentFieldName, MAX_DETERMINED_STATES_FIELD)) {
|
||||
maxDeterminizedStates = parser.intValue();
|
||||
} else if (parseContext.getParseFieldMatcher().match(currentFieldName, LOWERCASE_EXPANDED_TERMS_FIELD)) {
|
||||
lowercaseExpandedTerms = parser.booleanValue();
|
||||
// ignore, deprecated setting
|
||||
} else if (parseContext.getParseFieldMatcher().match(currentFieldName, ENABLE_POSITION_INCREMENTS_FIELD)) {
|
||||
enablePositionIncrements = parser.booleanValue();
|
||||
} else if (parseContext.getParseFieldMatcher().match(currentFieldName, ESCAPE_FIELD)) {
|
||||
|
@ -739,8 +746,7 @@ public class QueryStringQueryBuilder extends AbstractQueryBuilder<QueryStringQue
|
|||
} else if (parseContext.getParseFieldMatcher().match(currentFieldName, LENIENT_FIELD)) {
|
||||
lenient = parser.booleanValue();
|
||||
} else if (parseContext.getParseFieldMatcher().match(currentFieldName, LOCALE_FIELD)) {
|
||||
String localeStr = parser.text();
|
||||
locale = Locale.forLanguageTag(localeStr);
|
||||
// ignore, deprecated setting
|
||||
} else if (parseContext.getParseFieldMatcher().match(currentFieldName, TIME_ZONE_FIELD)) {
|
||||
try {
|
||||
timeZone = parser.text();
|
||||
|
@ -750,6 +756,8 @@ public class QueryStringQueryBuilder extends AbstractQueryBuilder<QueryStringQue
|
|||
}
|
||||
} else if (parseContext.getParseFieldMatcher().match(currentFieldName, AbstractQueryBuilder.NAME_FIELD)) {
|
||||
queryName = parser.text();
|
||||
} else if (parseContext.getParseFieldMatcher().match(currentFieldName, SPLIT_ON_WHITESPACE)) {
|
||||
splitOnWhitespace = parser.booleanValue();
|
||||
} else {
|
||||
throw new ParsingException(parser.getTokenLocation(), "[" + QueryStringQueryBuilder.NAME +
|
||||
"] query does not support [" + currentFieldName + "]");
|
||||
|
@ -772,7 +780,6 @@ public class QueryStringQueryBuilder extends AbstractQueryBuilder<QueryStringQue
|
|||
queryStringQuery.allowLeadingWildcard(allowLeadingWildcard);
|
||||
queryStringQuery.autoGeneratePhraseQueries(autoGeneratePhraseQueries);
|
||||
queryStringQuery.maxDeterminizedStates(maxDeterminizedStates);
|
||||
queryStringQuery.lowercaseExpandedTerms(lowercaseExpandedTerms);
|
||||
queryStringQuery.enablePositionIncrements(enablePositionIncrements);
|
||||
queryStringQuery.escape(escape);
|
||||
queryStringQuery.useDisMax(useDisMax);
|
||||
|
@ -788,9 +795,9 @@ public class QueryStringQueryBuilder extends AbstractQueryBuilder<QueryStringQue
|
|||
queryStringQuery.quoteFieldSuffix(quoteFieldSuffix);
|
||||
queryStringQuery.lenient(lenient);
|
||||
queryStringQuery.timeZone(timeZone);
|
||||
queryStringQuery.locale(locale);
|
||||
queryStringQuery.boost(boost);
|
||||
queryStringQuery.queryName(queryName);
|
||||
queryStringQuery.splitOnWhitespace(splitOnWhitespace);
|
||||
return Optional.of(queryStringQuery);
|
||||
}
|
||||
|
||||
|
@ -810,10 +817,8 @@ public class QueryStringQueryBuilder extends AbstractQueryBuilder<QueryStringQue
|
|||
Objects.equals(quoteFieldSuffix, other.quoteFieldSuffix) &&
|
||||
Objects.equals(autoGeneratePhraseQueries, other.autoGeneratePhraseQueries) &&
|
||||
Objects.equals(allowLeadingWildcard, other.allowLeadingWildcard) &&
|
||||
Objects.equals(lowercaseExpandedTerms, other.lowercaseExpandedTerms) &&
|
||||
Objects.equals(enablePositionIncrements, other.enablePositionIncrements) &&
|
||||
Objects.equals(analyzeWildcard, other.analyzeWildcard) &&
|
||||
Objects.equals(locale.toLanguageTag(), other.locale.toLanguageTag()) &&
|
||||
Objects.equals(fuzziness, other.fuzziness) &&
|
||||
Objects.equals(fuzzyPrefixLength, other.fuzzyPrefixLength) &&
|
||||
Objects.equals(fuzzyMaxExpansions, other.fuzzyMaxExpansions) &&
|
||||
|
@ -827,16 +832,17 @@ public class QueryStringQueryBuilder extends AbstractQueryBuilder<QueryStringQue
|
|||
timeZone == null ? other.timeZone == null : other.timeZone != null &&
|
||||
Objects.equals(timeZone.getID(), other.timeZone.getID()) &&
|
||||
Objects.equals(escape, other.escape) &&
|
||||
Objects.equals(maxDeterminizedStates, other.maxDeterminizedStates);
|
||||
Objects.equals(maxDeterminizedStates, other.maxDeterminizedStates) &&
|
||||
Objects.equals(splitOnWhitespace, other.splitOnWhitespace);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected int doHashCode() {
|
||||
return Objects.hash(queryString, defaultField, fieldsAndWeights, defaultOperator, analyzer, quoteAnalyzer,
|
||||
quoteFieldSuffix, autoGeneratePhraseQueries, allowLeadingWildcard, lowercaseExpandedTerms,
|
||||
enablePositionIncrements, analyzeWildcard, locale.toLanguageTag(), fuzziness, fuzzyPrefixLength,
|
||||
quoteFieldSuffix, autoGeneratePhraseQueries, allowLeadingWildcard, analyzeWildcard,
|
||||
enablePositionIncrements, fuzziness, fuzzyPrefixLength,
|
||||
fuzzyMaxExpansions, fuzzyRewrite, phraseSlop, useDisMax, tieBreaker, rewrite, minimumShouldMatch, lenient,
|
||||
timeZone == null ? 0 : timeZone.getID(), escape, maxDeterminizedStates);
|
||||
timeZone == null ? 0 : timeZone.getID(), escape, maxDeterminizedStates, splitOnWhitespace);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -890,9 +896,7 @@ public class QueryStringQueryBuilder extends AbstractQueryBuilder<QueryStringQue
|
|||
qpSettings.autoGeneratePhraseQueries(autoGeneratePhraseQueries);
|
||||
qpSettings.allowLeadingWildcard(allowLeadingWildcard == null ? context.queryStringAllowLeadingWildcard() : allowLeadingWildcard);
|
||||
qpSettings.analyzeWildcard(analyzeWildcard == null ? context.queryStringAnalyzeWildcard() : analyzeWildcard);
|
||||
qpSettings.lowercaseExpandedTerms(lowercaseExpandedTerms);
|
||||
qpSettings.enablePositionIncrements(enablePositionIncrements);
|
||||
qpSettings.locale(locale);
|
||||
qpSettings.fuzziness(fuzziness);
|
||||
qpSettings.fuzzyPrefixLength(fuzzyPrefixLength);
|
||||
qpSettings.fuzzyMaxExpansions(fuzzyMaxExpansions);
|
||||
|
@ -904,6 +908,7 @@ public class QueryStringQueryBuilder extends AbstractQueryBuilder<QueryStringQue
|
|||
qpSettings.lenient(lenient == null ? context.queryStringLenient() : lenient);
|
||||
qpSettings.timeZone(timeZone);
|
||||
qpSettings.maxDeterminizedStates(maxDeterminizedStates);
|
||||
qpSettings.splitOnWhitespace(splitOnWhitespace);
|
||||
|
||||
MapperQueryParser queryParser = context.queryParser(qpSettings);
|
||||
Query query;
|
||||
|
|
|
@ -30,10 +30,10 @@ import org.apache.lucene.search.FuzzyQuery;
|
|||
import org.apache.lucene.search.PrefixQuery;
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.apache.lucene.search.SynonymQuery;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.elasticsearch.index.mapper.MappedFieldType;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Locale;
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
import java.util.List;
|
||||
|
@ -98,14 +98,13 @@ public class SimpleQueryParser extends org.apache.lucene.queryparser.simple.Simp
|
|||
*/
|
||||
@Override
|
||||
public Query newFuzzyQuery(String text, int fuzziness) {
|
||||
if (settings.lowercaseExpandedTerms()) {
|
||||
text = text.toLowerCase(settings.locale());
|
||||
}
|
||||
BooleanQuery.Builder bq = new BooleanQuery.Builder();
|
||||
bq.setDisableCoord(true);
|
||||
for (Map.Entry<String,Float> entry : weights.entrySet()) {
|
||||
final String fieldName = entry.getKey();
|
||||
try {
|
||||
Query query = new FuzzyQuery(new Term(entry.getKey(), text), fuzziness);
|
||||
final BytesRef term = getAnalyzer().normalize(fieldName, text);
|
||||
Query query = new FuzzyQuery(new Term(fieldName, term), fuzziness);
|
||||
bq.add(wrapWithBoost(query, entry.getValue()), BooleanClause.Occur.SHOULD);
|
||||
} catch (RuntimeException e) {
|
||||
rethrowUnlessLenient(e);
|
||||
|
@ -120,9 +119,18 @@ public class SimpleQueryParser extends org.apache.lucene.queryparser.simple.Simp
|
|||
bq.setDisableCoord(true);
|
||||
for (Map.Entry<String,Float> entry : weights.entrySet()) {
|
||||
try {
|
||||
Query q = createPhraseQuery(entry.getKey(), text, slop);
|
||||
String field = entry.getKey();
|
||||
if (settings.quoteFieldSuffix() != null) {
|
||||
String quoteField = field + settings.quoteFieldSuffix();
|
||||
MappedFieldType quotedFieldType = context.fieldMapper(quoteField);
|
||||
if (quotedFieldType != null) {
|
||||
field = quoteField;
|
||||
}
|
||||
}
|
||||
Float boost = entry.getValue();
|
||||
Query q = createPhraseQuery(field, text, slop);
|
||||
if (q != null) {
|
||||
bq.add(wrapWithBoost(q, entry.getValue()), BooleanClause.Occur.SHOULD);
|
||||
bq.add(wrapWithBoost(q, boost), BooleanClause.Occur.SHOULD);
|
||||
}
|
||||
} catch (RuntimeException e) {
|
||||
rethrowUnlessLenient(e);
|
||||
|
@ -137,20 +145,19 @@ public class SimpleQueryParser extends org.apache.lucene.queryparser.simple.Simp
|
|||
*/
|
||||
@Override
|
||||
public Query newPrefixQuery(String text) {
|
||||
if (settings.lowercaseExpandedTerms()) {
|
||||
text = text.toLowerCase(settings.locale());
|
||||
}
|
||||
BooleanQuery.Builder bq = new BooleanQuery.Builder();
|
||||
bq.setDisableCoord(true);
|
||||
for (Map.Entry<String,Float> entry : weights.entrySet()) {
|
||||
final String fieldName = entry.getKey();
|
||||
try {
|
||||
if (settings.analyzeWildcard()) {
|
||||
Query analyzedQuery = newPossiblyAnalyzedQuery(entry.getKey(), text);
|
||||
Query analyzedQuery = newPossiblyAnalyzedQuery(fieldName, text);
|
||||
if (analyzedQuery != null) {
|
||||
bq.add(wrapWithBoost(analyzedQuery, entry.getValue()), BooleanClause.Occur.SHOULD);
|
||||
}
|
||||
} else {
|
||||
Query query = new PrefixQuery(new Term(entry.getKey(), text));
|
||||
Term term = new Term(fieldName, getAnalyzer().normalize(fieldName, text));
|
||||
Query query = new PrefixQuery(term);
|
||||
bq.add(wrapWithBoost(query, entry.getValue()), BooleanClause.Occur.SHOULD);
|
||||
}
|
||||
} catch (RuntimeException e) {
|
||||
|
@ -173,11 +180,11 @@ public class SimpleQueryParser extends org.apache.lucene.queryparser.simple.Simp
|
|||
* of {@code TermQuery}s and {@code PrefixQuery}s
|
||||
*/
|
||||
private Query newPossiblyAnalyzedQuery(String field, String termStr) {
|
||||
List<List<String>> tlist = new ArrayList<> ();
|
||||
List<List<BytesRef>> tlist = new ArrayList<> ();
|
||||
// get Analyzer from superclass and tokenize the term
|
||||
try (TokenStream source = getAnalyzer().tokenStream(field, termStr)) {
|
||||
source.reset();
|
||||
List<String> currentPos = new ArrayList<>();
|
||||
List<BytesRef> currentPos = new ArrayList<>();
|
||||
CharTermAttribute termAtt = source.addAttribute(CharTermAttribute.class);
|
||||
PositionIncrementAttribute posAtt = source.addAttribute(PositionIncrementAttribute.class);
|
||||
|
||||
|
@ -188,7 +195,8 @@ public class SimpleQueryParser extends org.apache.lucene.queryparser.simple.Simp
|
|||
tlist.add(currentPos);
|
||||
currentPos = new ArrayList<>();
|
||||
}
|
||||
currentPos.add(termAtt.toString());
|
||||
final BytesRef term = getAnalyzer().normalize(field, termAtt.toString());
|
||||
currentPos.add(term);
|
||||
hasMoreTokens = source.incrementToken();
|
||||
}
|
||||
if (currentPos.isEmpty() == false) {
|
||||
|
@ -214,7 +222,7 @@ public class SimpleQueryParser extends org.apache.lucene.queryparser.simple.Simp
|
|||
// build a boolean query with prefix on the last position only.
|
||||
BooleanQuery.Builder builder = new BooleanQuery.Builder();
|
||||
for (int pos = 0; pos < tlist.size(); pos++) {
|
||||
List<String> plist = tlist.get(pos);
|
||||
List<BytesRef> plist = tlist.get(pos);
|
||||
boolean isLastPos = (pos == tlist.size()-1);
|
||||
Query posQuery;
|
||||
if (plist.size() == 1) {
|
||||
|
@ -232,7 +240,7 @@ public class SimpleQueryParser extends org.apache.lucene.queryparser.simple.Simp
|
|||
posQuery = new SynonymQuery(terms);
|
||||
} else {
|
||||
BooleanQuery.Builder innerBuilder = new BooleanQuery.Builder();
|
||||
for (String token : plist) {
|
||||
for (BytesRef token : plist) {
|
||||
innerBuilder.add(new BooleanClause(new PrefixQuery(new Term(field, token)),
|
||||
BooleanClause.Occur.SHOULD));
|
||||
}
|
||||
|
@ -248,14 +256,12 @@ public class SimpleQueryParser extends org.apache.lucene.queryparser.simple.Simp
|
|||
* their default values
|
||||
*/
|
||||
static class Settings {
|
||||
/** Locale to use for parsing. */
|
||||
private Locale locale = SimpleQueryStringBuilder.DEFAULT_LOCALE;
|
||||
/** Specifies whether parsed terms should be lowercased. */
|
||||
private boolean lowercaseExpandedTerms = SimpleQueryStringBuilder.DEFAULT_LOWERCASE_EXPANDED_TERMS;
|
||||
/** Specifies whether lenient query parsing should be used. */
|
||||
private boolean lenient = SimpleQueryStringBuilder.DEFAULT_LENIENT;
|
||||
/** Specifies whether wildcards should be analyzed. */
|
||||
private boolean analyzeWildcard = SimpleQueryStringBuilder.DEFAULT_ANALYZE_WILDCARD;
|
||||
/** Specifies a suffix, if any, to apply to field names for phrase matching. */
|
||||
private String quoteFieldSuffix = null;
|
||||
|
||||
/**
|
||||
* Generates default {@link Settings} object (uses ROOT locale, does
|
||||
|
@ -264,36 +270,6 @@ public class SimpleQueryParser extends org.apache.lucene.queryparser.simple.Simp
|
|||
public Settings() {
|
||||
}
|
||||
|
||||
public Settings(Locale locale, Boolean lowercaseExpandedTerms, Boolean lenient, Boolean analyzeWildcard) {
|
||||
this.locale = locale;
|
||||
this.lowercaseExpandedTerms = lowercaseExpandedTerms;
|
||||
this.lenient = lenient;
|
||||
this.analyzeWildcard = analyzeWildcard;
|
||||
}
|
||||
|
||||
/** Specifies the locale to use for parsing, Locale.ROOT by default. */
|
||||
public void locale(Locale locale) {
|
||||
this.locale = (locale != null) ? locale : SimpleQueryStringBuilder.DEFAULT_LOCALE;
|
||||
}
|
||||
|
||||
/** Returns the locale to use for parsing. */
|
||||
public Locale locale() {
|
||||
return this.locale;
|
||||
}
|
||||
|
||||
/**
|
||||
* Specifies whether to lowercase parse terms, defaults to true if
|
||||
* unset.
|
||||
*/
|
||||
public void lowercaseExpandedTerms(boolean lowercaseExpandedTerms) {
|
||||
this.lowercaseExpandedTerms = lowercaseExpandedTerms;
|
||||
}
|
||||
|
||||
/** Returns whether to lowercase parse terms. */
|
||||
public boolean lowercaseExpandedTerms() {
|
||||
return this.lowercaseExpandedTerms;
|
||||
}
|
||||
|
||||
/** Specifies whether to use lenient parsing, defaults to false. */
|
||||
public void lenient(boolean lenient) {
|
||||
this.lenient = lenient;
|
||||
|
@ -314,12 +290,24 @@ public class SimpleQueryParser extends org.apache.lucene.queryparser.simple.Simp
|
|||
return analyzeWildcard;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the suffix to append to field names for phrase matching.
|
||||
*/
|
||||
public void quoteFieldSuffix(String suffix) {
|
||||
this.quoteFieldSuffix = suffix;
|
||||
}
|
||||
|
||||
/**
|
||||
* Return the suffix to append for phrase matching, or {@code null} if
|
||||
* no suffix should be appended.
|
||||
*/
|
||||
public String quoteFieldSuffix() {
|
||||
return quoteFieldSuffix;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
// checking the return value of toLanguageTag() for locales only.
|
||||
// For further reasoning see
|
||||
// https://issues.apache.org/jira/browse/LUCENE-4021
|
||||
return Objects.hash(locale.toLanguageTag(), lowercaseExpandedTerms, lenient, analyzeWildcard);
|
||||
return Objects.hash(lenient, analyzeWildcard, quoteFieldSuffix);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -331,14 +319,8 @@ public class SimpleQueryParser extends org.apache.lucene.queryparser.simple.Simp
|
|||
return false;
|
||||
}
|
||||
Settings other = (Settings) obj;
|
||||
|
||||
// checking the return value of toLanguageTag() for locales only.
|
||||
// For further reasoning see
|
||||
// https://issues.apache.org/jira/browse/LUCENE-4021
|
||||
return (Objects.equals(locale.toLanguageTag(), other.locale.toLanguageTag())
|
||||
&& Objects.equals(lowercaseExpandedTerms, other.lowercaseExpandedTerms)
|
||||
&& Objects.equals(lenient, other.lenient)
|
||||
&& Objects.equals(analyzeWildcard, other.analyzeWildcard));
|
||||
return Objects.equals(lenient, other.lenient) && Objects.equals(analyzeWildcard, other.analyzeWildcard)
|
||||
&& Objects.equals(quoteFieldSuffix, other.quoteFieldSuffix);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -22,6 +22,7 @@ package org.elasticsearch.index.query;
|
|||
import org.apache.lucene.analysis.Analyzer;
|
||||
import org.apache.lucene.search.BooleanQuery;
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.common.ParseField;
|
||||
import org.elasticsearch.common.ParsingException;
|
||||
import org.elasticsearch.common.Strings;
|
||||
|
@ -78,10 +79,6 @@ import java.util.TreeMap;
|
|||
* > online documentation</a>.
|
||||
*/
|
||||
public class SimpleQueryStringBuilder extends AbstractQueryBuilder<SimpleQueryStringBuilder> {
|
||||
/** Default locale used for parsing.*/
|
||||
public static final Locale DEFAULT_LOCALE = Locale.ROOT;
|
||||
/** Default for lowercasing parsed terms.*/
|
||||
public static final boolean DEFAULT_LOWERCASE_EXPANDED_TERMS = true;
|
||||
/** Default for using lenient query parsing.*/
|
||||
public static final boolean DEFAULT_LENIENT = false;
|
||||
/** Default for wildcard analysis.*/
|
||||
|
@ -94,16 +91,21 @@ public class SimpleQueryStringBuilder extends AbstractQueryBuilder<SimpleQuerySt
|
|||
/** Name for (de-)serialization. */
|
||||
public static final String NAME = "simple_query_string";
|
||||
|
||||
public static final Version V_5_1_0_UNRELEASED = Version.fromId(5010099);
|
||||
|
||||
private static final ParseField MINIMUM_SHOULD_MATCH_FIELD = new ParseField("minimum_should_match");
|
||||
private static final ParseField ANALYZE_WILDCARD_FIELD = new ParseField("analyze_wildcard");
|
||||
private static final ParseField LENIENT_FIELD = new ParseField("lenient");
|
||||
private static final ParseField LOWERCASE_EXPANDED_TERMS_FIELD = new ParseField("lowercase_expanded_terms");
|
||||
private static final ParseField LOCALE_FIELD = new ParseField("locale");
|
||||
private static final ParseField LOWERCASE_EXPANDED_TERMS_FIELD = new ParseField("lowercase_expanded_terms")
|
||||
.withAllDeprecated("Decision is now made by the analyzer");
|
||||
private static final ParseField LOCALE_FIELD = new ParseField("locale")
|
||||
.withAllDeprecated("Decision is now made by the analyzer");
|
||||
private static final ParseField FLAGS_FIELD = new ParseField("flags");
|
||||
private static final ParseField DEFAULT_OPERATOR_FIELD = new ParseField("default_operator");
|
||||
private static final ParseField ANALYZER_FIELD = new ParseField("analyzer");
|
||||
private static final ParseField QUERY_FIELD = new ParseField("query");
|
||||
private static final ParseField FIELDS_FIELD = new ParseField("fields");
|
||||
private static final ParseField QUOTE_FIELD_SUFFIX_FIELD = new ParseField("quote_field_suffix");
|
||||
|
||||
/** Query text to parse. */
|
||||
private final String queryText;
|
||||
|
@ -153,11 +155,18 @@ public class SimpleQueryStringBuilder extends AbstractQueryBuilder<SimpleQuerySt
|
|||
flags = in.readInt();
|
||||
analyzer = in.readOptionalString();
|
||||
defaultOperator = Operator.readFromStream(in);
|
||||
settings.lowercaseExpandedTerms(in.readBoolean());
|
||||
if (in.getVersion().before(V_5_1_0_UNRELEASED)) {
|
||||
in.readBoolean(); // lowercase_expanded_terms
|
||||
}
|
||||
settings.lenient(in.readBoolean());
|
||||
settings.analyzeWildcard(in.readBoolean());
|
||||
settings.locale(Locale.forLanguageTag(in.readString()));
|
||||
if (in.getVersion().before(V_5_1_0_UNRELEASED)) {
|
||||
in.readString(); // locale
|
||||
}
|
||||
minimumShouldMatch = in.readOptionalString();
|
||||
if (in.getVersion().onOrAfter(V_5_1_0_UNRELEASED)) {
|
||||
settings.quoteFieldSuffix(in.readOptionalString());
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -171,11 +180,18 @@ public class SimpleQueryStringBuilder extends AbstractQueryBuilder<SimpleQuerySt
|
|||
out.writeInt(flags);
|
||||
out.writeOptionalString(analyzer);
|
||||
defaultOperator.writeTo(out);
|
||||
out.writeBoolean(settings.lowercaseExpandedTerms());
|
||||
if (out.getVersion().before(V_5_1_0_UNRELEASED)) {
|
||||
out.writeBoolean(true); // lowercase_expanded_terms
|
||||
}
|
||||
out.writeBoolean(settings.lenient());
|
||||
out.writeBoolean(settings.analyzeWildcard());
|
||||
out.writeString(settings.locale().toLanguageTag());
|
||||
if (out.getVersion().before(V_5_1_0_UNRELEASED)) {
|
||||
out.writeString(Locale.ROOT.toLanguageTag()); // locale
|
||||
}
|
||||
out.writeOptionalString(minimumShouldMatch);
|
||||
if (out.getVersion().onOrAfter(V_5_1_0_UNRELEASED)) {
|
||||
out.writeOptionalString(settings.quoteFieldSuffix());
|
||||
}
|
||||
}
|
||||
|
||||
/** Returns the text to parse the query from. */
|
||||
|
@ -268,28 +284,18 @@ public class SimpleQueryStringBuilder extends AbstractQueryBuilder<SimpleQuerySt
|
|||
}
|
||||
|
||||
/**
|
||||
* Specifies whether parsed terms for this query should be lower-cased.
|
||||
* Defaults to true if not set.
|
||||
* Set the suffix to append to field names for phrase matching.
|
||||
*/
|
||||
public SimpleQueryStringBuilder lowercaseExpandedTerms(boolean lowercaseExpandedTerms) {
|
||||
this.settings.lowercaseExpandedTerms(lowercaseExpandedTerms);
|
||||
public SimpleQueryStringBuilder quoteFieldSuffix(String suffix) {
|
||||
settings.quoteFieldSuffix(suffix);
|
||||
return this;
|
||||
}
|
||||
|
||||
/** Returns whether parsed terms should be lower cased for this query. */
|
||||
public boolean lowercaseExpandedTerms() {
|
||||
return this.settings.lowercaseExpandedTerms();
|
||||
}
|
||||
|
||||
/** Specifies the locale for parsing terms. Defaults to ROOT if none is set. */
|
||||
public SimpleQueryStringBuilder locale(Locale locale) {
|
||||
this.settings.locale(locale);
|
||||
return this;
|
||||
}
|
||||
|
||||
/** Returns the locale for parsing terms for this query. */
|
||||
public Locale locale() {
|
||||
return this.settings.locale();
|
||||
/**
|
||||
* Return the suffix to append to field names for phrase matching.
|
||||
*/
|
||||
public String quoteFieldSuffix() {
|
||||
return settings.quoteFieldSuffix();
|
||||
}
|
||||
|
||||
/** Specifies whether query parsing should be lenient. Defaults to false. */
|
||||
|
@ -404,10 +410,11 @@ public class SimpleQueryStringBuilder extends AbstractQueryBuilder<SimpleQuerySt
|
|||
|
||||
builder.field(FLAGS_FIELD.getPreferredName(), flags);
|
||||
builder.field(DEFAULT_OPERATOR_FIELD.getPreferredName(), defaultOperator.name().toLowerCase(Locale.ROOT));
|
||||
builder.field(LOWERCASE_EXPANDED_TERMS_FIELD.getPreferredName(), settings.lowercaseExpandedTerms());
|
||||
builder.field(LENIENT_FIELD.getPreferredName(), settings.lenient());
|
||||
builder.field(ANALYZE_WILDCARD_FIELD.getPreferredName(), settings.analyzeWildcard());
|
||||
builder.field(LOCALE_FIELD.getPreferredName(), (settings.locale().toLanguageTag()));
|
||||
if (settings.quoteFieldSuffix() != null) {
|
||||
builder.field(QUOTE_FIELD_SUFFIX_FIELD.getPreferredName(), settings.quoteFieldSuffix());
|
||||
}
|
||||
|
||||
if (minimumShouldMatch != null) {
|
||||
builder.field(MINIMUM_SHOULD_MATCH_FIELD.getPreferredName(), minimumShouldMatch);
|
||||
|
@ -430,9 +437,8 @@ public class SimpleQueryStringBuilder extends AbstractQueryBuilder<SimpleQuerySt
|
|||
String analyzerName = null;
|
||||
int flags = SimpleQueryStringFlag.ALL.value();
|
||||
boolean lenient = SimpleQueryStringBuilder.DEFAULT_LENIENT;
|
||||
boolean lowercaseExpandedTerms = SimpleQueryStringBuilder.DEFAULT_LOWERCASE_EXPANDED_TERMS;
|
||||
boolean analyzeWildcard = SimpleQueryStringBuilder.DEFAULT_ANALYZE_WILDCARD;
|
||||
Locale locale = null;
|
||||
String quoteFieldSuffix = null;
|
||||
|
||||
XContentParser.Token token;
|
||||
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
|
||||
|
@ -483,10 +489,9 @@ public class SimpleQueryStringBuilder extends AbstractQueryBuilder<SimpleQuerySt
|
|||
}
|
||||
}
|
||||
} else if (parseContext.getParseFieldMatcher().match(currentFieldName, LOCALE_FIELD)) {
|
||||
String localeStr = parser.text();
|
||||
locale = Locale.forLanguageTag(localeStr);
|
||||
// ignore, deprecated setting
|
||||
} else if (parseContext.getParseFieldMatcher().match(currentFieldName, LOWERCASE_EXPANDED_TERMS_FIELD)) {
|
||||
lowercaseExpandedTerms = parser.booleanValue();
|
||||
// ignore, deprecated setting
|
||||
} else if (parseContext.getParseFieldMatcher().match(currentFieldName, LENIENT_FIELD)) {
|
||||
lenient = parser.booleanValue();
|
||||
} else if (parseContext.getParseFieldMatcher().match(currentFieldName, ANALYZE_WILDCARD_FIELD)) {
|
||||
|
@ -495,6 +500,8 @@ public class SimpleQueryStringBuilder extends AbstractQueryBuilder<SimpleQuerySt
|
|||
queryName = parser.text();
|
||||
} else if (parseContext.getParseFieldMatcher().match(currentFieldName, MINIMUM_SHOULD_MATCH_FIELD)) {
|
||||
minimumShouldMatch = parser.textOrNull();
|
||||
} else if (parseContext.getParseFieldMatcher().match(currentFieldName, QUOTE_FIELD_SUFFIX_FIELD)) {
|
||||
quoteFieldSuffix = parser.textOrNull();
|
||||
} else {
|
||||
throw new ParsingException(parser.getTokenLocation(), "[" + SimpleQueryStringBuilder.NAME +
|
||||
"] unsupported field [" + parser.currentName() + "]");
|
||||
|
@ -512,8 +519,8 @@ public class SimpleQueryStringBuilder extends AbstractQueryBuilder<SimpleQuerySt
|
|||
|
||||
SimpleQueryStringBuilder qb = new SimpleQueryStringBuilder(queryBody);
|
||||
qb.boost(boost).fields(fieldsAndWeights).analyzer(analyzerName).queryName(queryName).minimumShouldMatch(minimumShouldMatch);
|
||||
qb.flags(flags).defaultOperator(defaultOperator).locale(locale).lowercaseExpandedTerms(lowercaseExpandedTerms);
|
||||
qb.lenient(lenient).analyzeWildcard(analyzeWildcard).boost(boost);
|
||||
qb.flags(flags).defaultOperator(defaultOperator);
|
||||
qb.lenient(lenient).analyzeWildcard(analyzeWildcard).boost(boost).quoteFieldSuffix(quoteFieldSuffix);
|
||||
return Optional.of(qb);
|
||||
}
|
||||
|
||||
|
|
|
@ -536,26 +536,27 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
|
|||
return new Engine.Index(uid, doc, version, versionType, origin, startTime, autoGeneratedIdTimestamp, isRetry);
|
||||
}
|
||||
|
||||
public void index(Engine.Index index) {
|
||||
public Engine.IndexResult index(Engine.Index index) {
|
||||
ensureWriteAllowed(index);
|
||||
Engine engine = getEngine();
|
||||
index(engine, index);
|
||||
return index(engine, index);
|
||||
}
|
||||
|
||||
private void index(Engine engine, Engine.Index index) {
|
||||
private Engine.IndexResult index(Engine engine, Engine.Index index) {
|
||||
active.set(true);
|
||||
final Engine.IndexResult result;
|
||||
index = indexingOperationListeners.preIndex(index);
|
||||
try {
|
||||
if (logger.isTraceEnabled()) {
|
||||
logger.trace("index [{}][{}]{}", index.type(), index.id(), index.docs());
|
||||
}
|
||||
engine.index(index);
|
||||
index.endTime(System.nanoTime());
|
||||
result = engine.index(index);
|
||||
} catch (Exception e) {
|
||||
indexingOperationListeners.postIndex(index, e);
|
||||
throw e;
|
||||
}
|
||||
indexingOperationListeners.postIndex(index, index.isCreated());
|
||||
indexingOperationListeners.postIndex(index, result);
|
||||
return result;
|
||||
}
|
||||
|
||||
public Engine.Delete prepareDeleteOnPrimary(String type, String id, long version, VersionType versionType) {
|
||||
|
@ -577,30 +578,30 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
|
|||
|
||||
static Engine.Delete prepareDelete(String type, String id, Term uid, long version, VersionType versionType, Engine.Operation.Origin origin) {
|
||||
long startTime = System.nanoTime();
|
||||
return new Engine.Delete(type, id, uid, version, versionType, origin, startTime, false);
|
||||
return new Engine.Delete(type, id, uid, version, versionType, origin, startTime);
|
||||
}
|
||||
|
||||
public void delete(Engine.Delete delete) {
|
||||
public Engine.DeleteResult delete(Engine.Delete delete) {
|
||||
ensureWriteAllowed(delete);
|
||||
Engine engine = getEngine();
|
||||
delete(engine, delete);
|
||||
return delete(engine, delete);
|
||||
}
|
||||
|
||||
private void delete(Engine engine, Engine.Delete delete) {
|
||||
private Engine.DeleteResult delete(Engine engine, Engine.Delete delete) {
|
||||
active.set(true);
|
||||
final Engine.DeleteResult result;
|
||||
delete = indexingOperationListeners.preDelete(delete);
|
||||
try {
|
||||
if (logger.isTraceEnabled()) {
|
||||
logger.trace("delete [{}]", delete.uid().text());
|
||||
}
|
||||
engine.delete(delete);
|
||||
delete.endTime(System.nanoTime());
|
||||
result = engine.delete(delete);
|
||||
} catch (Exception e) {
|
||||
indexingOperationListeners.postDelete(delete, e);
|
||||
throw e;
|
||||
}
|
||||
|
||||
indexingOperationListeners.postDelete(delete);
|
||||
indexingOperationListeners.postDelete(delete, result);
|
||||
return result;
|
||||
}
|
||||
|
||||
public Engine.GetResult get(Engine.Get get) {
|
||||
|
|
|
@ -38,12 +38,17 @@ public interface IndexingOperationListener {
|
|||
}
|
||||
|
||||
/**
|
||||
* Called after the indexing operation occurred.
|
||||
* Called after the indexing operation occurred. Note that this is
|
||||
* also called when indexing a document did not succeed due to document
|
||||
* related failures. See {@link #postIndex(Engine.Index, Exception)}
|
||||
* for engine level failures
|
||||
*/
|
||||
default void postIndex(Engine.Index index, boolean created) {}
|
||||
default void postIndex(Engine.Index index, Engine.IndexResult result) {}
|
||||
|
||||
/**
|
||||
* Called after the indexing operation occurred with exception.
|
||||
* Called after the indexing operation occurred with engine level exception.
|
||||
* See {@link #postIndex(Engine.Index, Engine.IndexResult)} for document
|
||||
* related failures
|
||||
*/
|
||||
default void postIndex(Engine.Index index, Exception ex) {}
|
||||
|
||||
|
@ -56,12 +61,17 @@ public interface IndexingOperationListener {
|
|||
|
||||
|
||||
/**
|
||||
* Called after the delete operation occurred.
|
||||
* Called after the delete operation occurred. Note that this is
|
||||
* also called when deleting a document did not succeed due to document
|
||||
* related failures. See {@link #postDelete(Engine.Delete, Exception)}
|
||||
* for engine level failures
|
||||
*/
|
||||
default void postDelete(Engine.Delete delete) {}
|
||||
default void postDelete(Engine.Delete delete, Engine.DeleteResult result) {}
|
||||
|
||||
/**
|
||||
* Called after the delete operation occurred with exception.
|
||||
* Called after the delete operation occurred with engine level exception.
|
||||
* See {@link #postDelete(Engine.Delete, Engine.DeleteResult)} for document
|
||||
* related failures
|
||||
*/
|
||||
default void postDelete(Engine.Delete delete, Exception ex) {}
|
||||
|
||||
|
@ -91,11 +101,11 @@ public interface IndexingOperationListener {
|
|||
}
|
||||
|
||||
@Override
|
||||
public void postIndex(Engine.Index index, boolean created) {
|
||||
public void postIndex(Engine.Index index, Engine.IndexResult result) {
|
||||
assert index != null;
|
||||
for (IndexingOperationListener listener : listeners) {
|
||||
try {
|
||||
listener.postIndex(index, created);
|
||||
listener.postIndex(index, result);
|
||||
} catch (Exception e) {
|
||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("postIndex listener [{}] failed", listener), e);
|
||||
}
|
||||
|
@ -129,11 +139,11 @@ public interface IndexingOperationListener {
|
|||
}
|
||||
|
||||
@Override
|
||||
public void postDelete(Engine.Delete delete) {
|
||||
public void postDelete(Engine.Delete delete, Engine.DeleteResult result) {
|
||||
assert delete != null;
|
||||
for (IndexingOperationListener listener : listeners) {
|
||||
try {
|
||||
listener.postDelete(delete);
|
||||
listener.postDelete(delete, result);
|
||||
} catch (Exception e) {
|
||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("postDelete listener [{}] failed", listener), e);
|
||||
}
|
||||
|
|
|
@ -74,14 +74,18 @@ final class InternalIndexingStats implements IndexingOperationListener {
|
|||
}
|
||||
|
||||
@Override
|
||||
public void postIndex(Engine.Index index, boolean created) {
|
||||
if (!index.origin().isRecovery()) {
|
||||
long took = index.endTime() - index.startTime();
|
||||
totalStats.indexMetric.inc(took);
|
||||
totalStats.indexCurrent.dec();
|
||||
StatsHolder typeStats = typeStats(index.type());
|
||||
typeStats.indexMetric.inc(took);
|
||||
typeStats.indexCurrent.dec();
|
||||
public void postIndex(Engine.Index index, Engine.IndexResult result) {
|
||||
if (result.hasFailure() == false) {
|
||||
if (!index.origin().isRecovery()) {
|
||||
long took = result.getTook();
|
||||
totalStats.indexMetric.inc(took);
|
||||
totalStats.indexCurrent.dec();
|
||||
StatsHolder typeStats = typeStats(index.type());
|
||||
typeStats.indexMetric.inc(took);
|
||||
typeStats.indexCurrent.dec();
|
||||
}
|
||||
} else {
|
||||
postIndex(index, result.getFailure());
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -106,14 +110,18 @@ final class InternalIndexingStats implements IndexingOperationListener {
|
|||
}
|
||||
|
||||
@Override
|
||||
public void postDelete(Engine.Delete delete) {
|
||||
if (!delete.origin().isRecovery()) {
|
||||
long took = delete.endTime() - delete.startTime();
|
||||
totalStats.deleteMetric.inc(took);
|
||||
totalStats.deleteCurrent.dec();
|
||||
StatsHolder typeStats = typeStats(delete.type());
|
||||
typeStats.deleteMetric.inc(took);
|
||||
typeStats.deleteCurrent.dec();
|
||||
public void postDelete(Engine.Delete delete, Engine.DeleteResult result) {
|
||||
if (result.hasFailure() == false) {
|
||||
if (!delete.origin().isRecovery()) {
|
||||
long took = result.getTook();
|
||||
totalStats.deleteMetric.inc(took);
|
||||
totalStats.deleteCurrent.dec();
|
||||
StatsHolder typeStats = typeStats(delete.type());
|
||||
typeStats.deleteMetric.inc(took);
|
||||
typeStats.deleteCurrent.dec();
|
||||
}
|
||||
} else {
|
||||
postDelete(delete, result.getFailure());
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -170,7 +170,7 @@ public class TranslogRecoveryPerformer {
|
|||
logger.trace("[translog] recover [delete] op of [{}][{}]", uid.type(), uid.id());
|
||||
}
|
||||
final Engine.Delete engineDelete = new Engine.Delete(uid.type(), uid.id(), delete.uid(), delete.version(),
|
||||
delete.versionType().versionTypeForReplicationAndRecovery(), origin, System.nanoTime(), false);
|
||||
delete.versionType().versionTypeForReplicationAndRecovery(), origin, System.nanoTime());
|
||||
delete(engine, engineDelete);
|
||||
break;
|
||||
default:
|
||||
|
|
|
@ -830,13 +830,13 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC
|
|||
}
|
||||
}
|
||||
|
||||
public Index(Engine.Index index) {
|
||||
public Index(Engine.Index index, Engine.IndexResult indexResult) {
|
||||
this.id = index.id();
|
||||
this.type = index.type();
|
||||
this.source = index.source();
|
||||
this.routing = index.routing();
|
||||
this.parent = index.parent();
|
||||
this.version = index.version();
|
||||
this.version = indexResult.getVersion();
|
||||
this.timestamp = index.timestamp();
|
||||
this.ttl = index.ttl();
|
||||
this.versionType = index.versionType();
|
||||
|
@ -994,9 +994,9 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC
|
|||
assert versionType.validateVersionForWrites(this.version);
|
||||
}
|
||||
|
||||
public Delete(Engine.Delete delete) {
|
||||
public Delete(Engine.Delete delete, Engine.DeleteResult deleteResult) {
|
||||
this.uid = delete.uid();
|
||||
this.version = delete.version();
|
||||
this.version = deleteResult.getVersion();
|
||||
this.versionType = delete.versionType();
|
||||
}
|
||||
|
||||
|
|
|
@ -19,40 +19,15 @@
|
|||
|
||||
package org.elasticsearch.indices;
|
||||
|
||||
import org.apache.lucene.index.DirectoryReader;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.cache.RemovalNotification;
|
||||
import org.elasticsearch.common.io.stream.BytesStreamOutput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.index.cache.request.ShardRequestCache;
|
||||
import org.elasticsearch.index.shard.IndexShard;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
/**
|
||||
* Abstract base class for the an {@link IndexShard} level {@linkplain IndicesRequestCache.CacheEntity}.
|
||||
*/
|
||||
abstract class AbstractIndexShardCacheEntity implements IndicesRequestCache.CacheEntity {
|
||||
@FunctionalInterface
|
||||
public interface Loader {
|
||||
void load(StreamOutput out) throws IOException;
|
||||
}
|
||||
|
||||
private final Loader loader;
|
||||
private boolean loadedFromCache = true;
|
||||
|
||||
protected AbstractIndexShardCacheEntity(Loader loader) {
|
||||
this.loader = loader;
|
||||
}
|
||||
|
||||
/**
|
||||
* When called after passing this through
|
||||
* {@link IndicesRequestCache#getOrCompute(IndicesRequestCache.CacheEntity, DirectoryReader, BytesReference)} this will return whether
|
||||
* or not the result was loaded from the cache.
|
||||
*/
|
||||
public final boolean loadedFromCache() {
|
||||
return loadedFromCache;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the {@linkplain ShardRequestCache} used to track cache statistics.
|
||||
|
@ -60,27 +35,7 @@ abstract class AbstractIndexShardCacheEntity implements IndicesRequestCache.Cach
|
|||
protected abstract ShardRequestCache stats();
|
||||
|
||||
@Override
|
||||
public final IndicesRequestCache.Value loadValue() throws IOException {
|
||||
/* BytesStreamOutput allows to pass the expected size but by default uses
|
||||
* BigArrays.PAGE_SIZE_IN_BYTES which is 16k. A common cached result ie.
|
||||
* a date histogram with 3 buckets is ~100byte so 16k might be very wasteful
|
||||
* since we don't shrink to the actual size once we are done serializing.
|
||||
* By passing 512 as the expected size we will resize the byte array in the stream
|
||||
* slowly until we hit the page size and don't waste too much memory for small query
|
||||
* results.*/
|
||||
final int expectedSizeInBytes = 512;
|
||||
try (BytesStreamOutput out = new BytesStreamOutput(expectedSizeInBytes)) {
|
||||
loader.load(out);
|
||||
// for now, keep the paged data structure, which might have unused bytes to fill a page, but better to keep
|
||||
// the memory properly paged instead of having varied sized bytes
|
||||
final BytesReference reference = out.bytes();
|
||||
loadedFromCache = false;
|
||||
return new IndicesRequestCache.Value(reference, out.ramBytesUsed());
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public final void onCached(IndicesRequestCache.Key key, IndicesRequestCache.Value value) {
|
||||
public final void onCached(IndicesRequestCache.Key key, BytesReference value) {
|
||||
stats().onCached(key, value);
|
||||
}
|
||||
|
||||
|
@ -95,7 +50,7 @@ abstract class AbstractIndexShardCacheEntity implements IndicesRequestCache.Cach
|
|||
}
|
||||
|
||||
@Override
|
||||
public final void onRemoval(RemovalNotification<IndicesRequestCache.Key, IndicesRequestCache.Value> notification) {
|
||||
public final void onRemoval(RemovalNotification<IndicesRequestCache.Key, BytesReference> notification) {
|
||||
stats().onRemoval(notification.getKey(), notification.getValue(),
|
||||
notification.getRemovalReason() == RemovalNotification.RemovalReason.EVICTED);
|
||||
}
|
||||
|
|
|
@ -189,11 +189,6 @@ public class IndexingMemoryController extends AbstractComponent implements Index
|
|||
statusChecker.run();
|
||||
}
|
||||
|
||||
/** called by IndexShard to record that this many bytes were written to translog */
|
||||
public void bytesWritten(int bytes) {
|
||||
statusChecker.bytesWritten(bytes);
|
||||
}
|
||||
|
||||
/** Asks this shard to throttle indexing to one thread */
|
||||
protected void activateThrottling(IndexShard shard) {
|
||||
shard.activateThrottling();
|
||||
|
@ -205,17 +200,20 @@ public class IndexingMemoryController extends AbstractComponent implements Index
|
|||
}
|
||||
|
||||
@Override
|
||||
public void postIndex(Engine.Index index, boolean created) {
|
||||
recordOperationBytes(index);
|
||||
public void postIndex(Engine.Index index, Engine.IndexResult result) {
|
||||
recordOperationBytes(index, result);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void postDelete(Engine.Delete delete) {
|
||||
recordOperationBytes(delete);
|
||||
public void postDelete(Engine.Delete delete, Engine.DeleteResult result) {
|
||||
recordOperationBytes(delete, result);
|
||||
}
|
||||
|
||||
private void recordOperationBytes(Engine.Operation op) {
|
||||
bytesWritten(op.sizeInBytes());
|
||||
/** called by IndexShard to record estimated bytes written to translog for the operation */
|
||||
private void recordOperationBytes(Engine.Operation operation, Engine.Result result) {
|
||||
if (result.hasFailure() == false) {
|
||||
statusChecker.bytesWritten(operation.estimatedSizeInBytes());
|
||||
}
|
||||
}
|
||||
|
||||
private static final class ShardAndBytesUsed implements Comparable<ShardAndBytesUsed> {
|
||||
|
|
|
@ -41,12 +41,12 @@ import org.elasticsearch.common.unit.TimeValue;
|
|||
import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
|
||||
|
||||
import java.io.Closeable;
|
||||
import java.io.IOException;
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.Iterator;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.ConcurrentMap;
|
||||
import java.util.function.Supplier;
|
||||
|
||||
/**
|
||||
* The indices request cache allows to cache a shard level request stage responses, helping with improving
|
||||
|
@ -62,7 +62,7 @@ import java.util.concurrent.ConcurrentMap;
|
|||
* is functional.
|
||||
*/
|
||||
public final class IndicesRequestCache extends AbstractComponent implements RemovalListener<IndicesRequestCache.Key,
|
||||
IndicesRequestCache.Value>, Closeable {
|
||||
BytesReference>, Closeable {
|
||||
|
||||
/**
|
||||
* A setting to enable or disable request caching on an index level. Its dynamic by default
|
||||
|
@ -79,14 +79,14 @@ public final class IndicesRequestCache extends AbstractComponent implements Remo
|
|||
private final Set<CleanupKey> keysToClean = ConcurrentCollections.newConcurrentSet();
|
||||
private final ByteSizeValue size;
|
||||
private final TimeValue expire;
|
||||
private final Cache<Key, Value> cache;
|
||||
private final Cache<Key, BytesReference> cache;
|
||||
|
||||
IndicesRequestCache(Settings settings) {
|
||||
super(settings);
|
||||
this.size = INDICES_CACHE_QUERY_SIZE.get(settings);
|
||||
this.expire = INDICES_CACHE_QUERY_EXPIRE.exists(settings) ? INDICES_CACHE_QUERY_EXPIRE.get(settings) : null;
|
||||
long sizeInBytes = size.getBytes();
|
||||
CacheBuilder<Key, Value> cacheBuilder = CacheBuilder.<Key, Value>builder()
|
||||
CacheBuilder<Key, BytesReference> cacheBuilder = CacheBuilder.<Key, BytesReference>builder()
|
||||
.setMaximumWeight(sizeInBytes).weigher((k, v) -> k.ramBytesUsed() + v.ramBytesUsed()).removalListener(this);
|
||||
if (expire != null) {
|
||||
cacheBuilder.setExpireAfterAccess(expire);
|
||||
|
@ -105,15 +105,16 @@ public final class IndicesRequestCache extends AbstractComponent implements Remo
|
|||
}
|
||||
|
||||
@Override
|
||||
public void onRemoval(RemovalNotification<Key, Value> notification) {
|
||||
public void onRemoval(RemovalNotification<Key, BytesReference> notification) {
|
||||
notification.getKey().entity.onRemoval(notification);
|
||||
}
|
||||
|
||||
BytesReference getOrCompute(CacheEntity cacheEntity, DirectoryReader reader, BytesReference cacheKey) throws Exception {
|
||||
BytesReference getOrCompute(CacheEntity cacheEntity, Supplier<BytesReference> loader,
|
||||
DirectoryReader reader, BytesReference cacheKey) throws Exception {
|
||||
final Key key = new Key(cacheEntity, reader.getVersion(), cacheKey);
|
||||
Loader loader = new Loader(cacheEntity);
|
||||
Value value = cache.computeIfAbsent(key, loader);
|
||||
if (loader.isLoaded()) {
|
||||
Loader cacheLoader = new Loader(cacheEntity, loader);
|
||||
BytesReference value = cache.computeIfAbsent(key, cacheLoader);
|
||||
if (cacheLoader.isLoaded()) {
|
||||
key.entity.onMiss();
|
||||
// see if its the first time we see this reader, and make sure to register a cleanup key
|
||||
CleanupKey cleanupKey = new CleanupKey(cacheEntity, reader.getVersion());
|
||||
|
@ -126,16 +127,18 @@ public final class IndicesRequestCache extends AbstractComponent implements Remo
|
|||
} else {
|
||||
key.entity.onHit();
|
||||
}
|
||||
return value.reference;
|
||||
return value;
|
||||
}
|
||||
|
||||
private static class Loader implements CacheLoader<Key, Value> {
|
||||
private static class Loader implements CacheLoader<Key, BytesReference> {
|
||||
|
||||
private final CacheEntity entity;
|
||||
private final Supplier<BytesReference> loader;
|
||||
private boolean loaded;
|
||||
|
||||
Loader(CacheEntity entity) {
|
||||
Loader(CacheEntity entity, Supplier<BytesReference> loader) {
|
||||
this.entity = entity;
|
||||
this.loader = loader;
|
||||
}
|
||||
|
||||
public boolean isLoaded() {
|
||||
|
@ -143,8 +146,8 @@ public final class IndicesRequestCache extends AbstractComponent implements Remo
|
|||
}
|
||||
|
||||
@Override
|
||||
public Value load(Key key) throws Exception {
|
||||
Value value = entity.loadValue();
|
||||
public BytesReference load(Key key) throws Exception {
|
||||
BytesReference value = loader.get();
|
||||
entity.onCached(key, value);
|
||||
loaded = true;
|
||||
return value;
|
||||
|
@ -154,16 +157,12 @@ public final class IndicesRequestCache extends AbstractComponent implements Remo
|
|||
/**
|
||||
* Basic interface to make this cache testable.
|
||||
*/
|
||||
interface CacheEntity {
|
||||
/**
|
||||
* Loads the actual cache value. this is the heavy lifting part.
|
||||
*/
|
||||
Value loadValue() throws IOException;
|
||||
interface CacheEntity extends Accountable {
|
||||
|
||||
/**
|
||||
* Called after the value was loaded via {@link #loadValue()}
|
||||
* Called after the value was loaded.
|
||||
*/
|
||||
void onCached(Key key, Value value);
|
||||
void onCached(Key key, BytesReference value);
|
||||
|
||||
/**
|
||||
* Returns <code>true</code> iff the resource behind this entity is still open ie.
|
||||
|
@ -190,32 +189,12 @@ public final class IndicesRequestCache extends AbstractComponent implements Remo
|
|||
/**
|
||||
* Called when this entity instance is removed
|
||||
*/
|
||||
void onRemoval(RemovalNotification<Key, Value> notification);
|
||||
}
|
||||
|
||||
|
||||
|
||||
static class Value implements Accountable {
|
||||
final BytesReference reference;
|
||||
final long ramBytesUsed;
|
||||
|
||||
Value(BytesReference reference, long ramBytesUsed) {
|
||||
this.reference = reference;
|
||||
this.ramBytesUsed = ramBytesUsed;
|
||||
}
|
||||
|
||||
@Override
|
||||
public long ramBytesUsed() {
|
||||
return ramBytesUsed;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Collection<Accountable> getChildResources() {
|
||||
return Collections.emptyList();
|
||||
}
|
||||
void onRemoval(RemovalNotification<Key, BytesReference> notification);
|
||||
}
|
||||
|
||||
static class Key implements Accountable {
|
||||
private static final long BASE_RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance(Key.class);
|
||||
|
||||
public final CacheEntity entity; // use as identity equality
|
||||
public final long readerVersion; // use the reader version to now keep a reference to a "short" lived reader until its reaped
|
||||
public final BytesReference value;
|
||||
|
@ -228,7 +207,7 @@ public final class IndicesRequestCache extends AbstractComponent implements Remo
|
|||
|
||||
@Override
|
||||
public long ramBytesUsed() {
|
||||
return RamUsageEstimator.NUM_BYTES_OBJECT_REF + Long.BYTES + value.length();
|
||||
return BASE_RAM_BYTES_USED + entity.ramBytesUsed() + value.length();
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -23,11 +23,11 @@ import com.carrotsearch.hppc.cursors.ObjectCursor;
|
|||
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.apache.logging.log4j.util.Supplier;
|
||||
import org.apache.lucene.index.DirectoryReader;
|
||||
import org.apache.lucene.store.LockObtainFailedException;
|
||||
import org.apache.lucene.util.CollectionUtil;
|
||||
import org.apache.lucene.util.IOUtils;
|
||||
import org.apache.lucene.util.RamUsageEstimator;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.action.admin.indices.stats.CommonStats;
|
||||
import org.elasticsearch.action.admin.indices.stats.CommonStatsFlags;
|
||||
|
@ -51,9 +51,11 @@ import org.elasticsearch.common.bytes.BytesArray;
|
|||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.component.AbstractLifecycleComponent;
|
||||
import org.elasticsearch.common.io.FileSystemUtils;
|
||||
import org.elasticsearch.common.io.stream.BytesStreamOutput;
|
||||
import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput;
|
||||
import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.lease.Releasable;
|
||||
import org.elasticsearch.common.settings.ClusterSettings;
|
||||
import org.elasticsearch.common.settings.IndexScopedSettings;
|
||||
|
@ -98,7 +100,6 @@ import org.elasticsearch.index.shard.IndexingOperationListener;
|
|||
import org.elasticsearch.index.shard.IndexingStats;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
import org.elasticsearch.index.store.IndexStoreConfig;
|
||||
import org.elasticsearch.indices.AbstractIndexShardCacheEntity.Loader;
|
||||
import org.elasticsearch.indices.breaker.CircuitBreakerService;
|
||||
import org.elasticsearch.indices.cluster.IndicesClusterStateService;
|
||||
import org.elasticsearch.indices.fielddata.cache.IndicesFieldDataCache;
|
||||
|
@ -132,8 +133,10 @@ import java.util.concurrent.Executors;
|
|||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
import java.util.function.Consumer;
|
||||
import java.util.function.Function;
|
||||
import java.util.function.Predicate;
|
||||
import java.util.function.Supplier;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import static java.util.Collections.emptyList;
|
||||
|
@ -1110,7 +1113,7 @@ public class IndicesService extends AbstractLifecycleComponent
|
|||
if (shard == null) {
|
||||
return;
|
||||
}
|
||||
indicesRequestCache.clear(new IndexShardCacheEntity(shard, null));
|
||||
indicesRequestCache.clear(new IndexShardCacheEntity(shard));
|
||||
logger.trace("{} explicit cache clear", shard.shardId());
|
||||
}
|
||||
|
||||
|
@ -1122,13 +1125,19 @@ public class IndicesService extends AbstractLifecycleComponent
|
|||
*/
|
||||
public void loadIntoContext(ShardSearchRequest request, SearchContext context, QueryPhase queryPhase) throws Exception {
|
||||
assert canCache(request, context);
|
||||
final IndexShardCacheEntity entity = new IndexShardCacheEntity(context.indexShard(), out -> {
|
||||
queryPhase.execute(context);
|
||||
context.queryResult().writeToNoId(out);
|
||||
});
|
||||
final DirectoryReader directoryReader = context.searcher().getDirectoryReader();
|
||||
final BytesReference bytesReference = indicesRequestCache.getOrCompute(entity, directoryReader, request.cacheKey());
|
||||
if (entity.loadedFromCache()) {
|
||||
|
||||
boolean[] loadedFromCache = new boolean[] { true };
|
||||
BytesReference bytesReference = cacheShardLevelResult(context.indexShard(), directoryReader, request.cacheKey(), out -> {
|
||||
queryPhase.execute(context);
|
||||
try {
|
||||
context.queryResult().writeToNoId(out);
|
||||
} catch (IOException e) {
|
||||
throw new AssertionError("Could not serialize response", e);
|
||||
}
|
||||
loadedFromCache[0] = false;
|
||||
});
|
||||
if (loadedFromCache[0]) {
|
||||
// restore the cached query result into the context
|
||||
final QuerySearchResult result = context.queryResult();
|
||||
StreamInput in = new NamedWriteableAwareStreamInput(bytesReference.streamInput(), namedWriteableRegistry);
|
||||
|
@ -1154,7 +1163,11 @@ public class IndicesService extends AbstractLifecycleComponent
|
|||
}
|
||||
BytesReference cacheKey = new BytesArray("fieldstats:" + field);
|
||||
BytesReference statsRef = cacheShardLevelResult(shard, searcher.getDirectoryReader(), cacheKey, out -> {
|
||||
out.writeOptionalWriteable(fieldType.stats(searcher.reader()));
|
||||
try {
|
||||
out.writeOptionalWriteable(fieldType.stats(searcher.reader()));
|
||||
} catch (IOException e) {
|
||||
throw new IllegalStateException("Failed to write field stats output", e);
|
||||
}
|
||||
});
|
||||
try (StreamInput in = statsRef.streamInput()) {
|
||||
return in.readOptionalWriteable(FieldStats::readFrom);
|
||||
|
@ -1173,17 +1186,33 @@ public class IndicesService extends AbstractLifecycleComponent
|
|||
* @param loader loads the data into the cache if needed
|
||||
* @return the contents of the cache or the result of calling the loader
|
||||
*/
|
||||
private BytesReference cacheShardLevelResult(IndexShard shard, DirectoryReader reader, BytesReference cacheKey, Loader loader)
|
||||
private BytesReference cacheShardLevelResult(IndexShard shard, DirectoryReader reader, BytesReference cacheKey, Consumer<StreamOutput> loader)
|
||||
throws Exception {
|
||||
IndexShardCacheEntity cacheEntity = new IndexShardCacheEntity(shard, loader);
|
||||
return indicesRequestCache.getOrCompute(cacheEntity, reader, cacheKey);
|
||||
IndexShardCacheEntity cacheEntity = new IndexShardCacheEntity(shard);
|
||||
Supplier<BytesReference> supplier = () -> {
|
||||
/* BytesStreamOutput allows to pass the expected size but by default uses
|
||||
* BigArrays.PAGE_SIZE_IN_BYTES which is 16k. A common cached result ie.
|
||||
* a date histogram with 3 buckets is ~100byte so 16k might be very wasteful
|
||||
* since we don't shrink to the actual size once we are done serializing.
|
||||
* By passing 512 as the expected size we will resize the byte array in the stream
|
||||
* slowly until we hit the page size and don't waste too much memory for small query
|
||||
* results.*/
|
||||
final int expectedSizeInBytes = 512;
|
||||
try (BytesStreamOutput out = new BytesStreamOutput(expectedSizeInBytes)) {
|
||||
loader.accept(out);
|
||||
// for now, keep the paged data structure, which might have unused bytes to fill a page, but better to keep
|
||||
// the memory properly paged instead of having varied sized bytes
|
||||
return out.bytes();
|
||||
}
|
||||
};
|
||||
return indicesRequestCache.getOrCompute(cacheEntity, supplier, reader, cacheKey);
|
||||
}
|
||||
|
||||
static final class IndexShardCacheEntity extends AbstractIndexShardCacheEntity {
|
||||
private static final long BASE_RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance(IndexShardCacheEntity.class);
|
||||
private final IndexShard indexShard;
|
||||
|
||||
protected IndexShardCacheEntity(IndexShard indexShard, Loader loader) {
|
||||
super(loader);
|
||||
protected IndexShardCacheEntity(IndexShard indexShard) {
|
||||
this.indexShard = indexShard;
|
||||
}
|
||||
|
||||
|
@ -1201,6 +1230,13 @@ public class IndicesService extends AbstractLifecycleComponent
|
|||
public Object getCacheIdentity() {
|
||||
return indexShard;
|
||||
}
|
||||
|
||||
@Override
|
||||
public long ramBytesUsed() {
|
||||
// No need to take the IndexShard into account since it is shared
|
||||
// across many entities
|
||||
return BASE_RAM_BYTES_USED;
|
||||
}
|
||||
}
|
||||
|
||||
@FunctionalInterface
|
||||
|
|
|
@ -116,7 +116,7 @@ public final class IngestMetadata implements MetaData.Custom {
|
|||
|
||||
@Override
|
||||
public EnumSet<MetaData.XContentContext> context() {
|
||||
return MetaData.API_AND_GATEWAY;
|
||||
return MetaData.ALL_CONTEXTS;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
|
||||
package org.elasticsearch.monitor.os;
|
||||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.io.stream.Writeable;
|
||||
|
@ -31,7 +32,7 @@ import java.util.Arrays;
|
|||
import java.util.Objects;
|
||||
|
||||
public class OsStats implements Writeable, ToXContent {
|
||||
|
||||
public static final Version V_5_1_0 = Version.fromId(5010099);
|
||||
private final long timestamp;
|
||||
private final Cpu cpu;
|
||||
private final Mem mem;
|
||||
|
@ -51,7 +52,11 @@ public class OsStats implements Writeable, ToXContent {
|
|||
this.cpu = new Cpu(in);
|
||||
this.mem = new Mem(in);
|
||||
this.swap = new Swap(in);
|
||||
this.cgroup = in.readOptionalWriteable(Cgroup::new);
|
||||
if (in.getVersion().onOrAfter(V_5_1_0)) {
|
||||
this.cgroup = in.readOptionalWriteable(Cgroup::new);
|
||||
} else {
|
||||
this.cgroup = null;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -60,7 +65,9 @@ public class OsStats implements Writeable, ToXContent {
|
|||
cpu.writeTo(out);
|
||||
mem.writeTo(out);
|
||||
swap.writeTo(out);
|
||||
out.writeOptionalWriteable(cgroup);
|
||||
if (out.getVersion().onOrAfter(V_5_1_0)) {
|
||||
out.writeOptionalWriteable(cgroup);
|
||||
}
|
||||
}
|
||||
|
||||
public long getTimestamp() {
|
||||
|
|
|
@ -76,6 +76,9 @@ import org.elasticsearch.common.util.BigArrays;
|
|||
import org.elasticsearch.discovery.Discovery;
|
||||
import org.elasticsearch.discovery.DiscoveryModule;
|
||||
import org.elasticsearch.discovery.DiscoverySettings;
|
||||
import org.elasticsearch.discovery.zen.UnicastHostsProvider;
|
||||
import org.elasticsearch.discovery.zen.UnicastZenPing;
|
||||
import org.elasticsearch.discovery.zen.ZenPing;
|
||||
import org.elasticsearch.env.Environment;
|
||||
import org.elasticsearch.env.NodeEnvironment;
|
||||
import org.elasticsearch.gateway.GatewayAllocator;
|
||||
|
@ -319,7 +322,8 @@ public class Node implements Closeable {
|
|||
final ClusterService clusterService = new ClusterService(settings, settingsModule.getClusterSettings(), threadPool);
|
||||
clusterService.add(scriptModule.getScriptService());
|
||||
resourcesToClose.add(clusterService);
|
||||
final TribeService tribeService = new TribeService(settings, clusterService, nodeEnvironment.nodeId(), classpathPlugins);
|
||||
final TribeService tribeService = new TribeService(settings, clusterService, nodeEnvironment.nodeId(),
|
||||
s -> newTribeClientNode(s, classpathPlugins));
|
||||
resourcesToClose.add(tribeService);
|
||||
final IngestService ingestService = new IngestService(settings, threadPool, this.environment,
|
||||
scriptModule.getScriptService(), analysisModule.getAnalysisRegistry(), pluginsService.filterPlugins(IngestPlugin.class));
|
||||
|
@ -393,7 +397,10 @@ public class Node implements Closeable {
|
|||
b.bind(HttpServer.class).toProvider(Providers.of(null));
|
||||
};
|
||||
}
|
||||
modules.add(new DiscoveryModule(this.settings, transportService, networkService, pluginsService.filterPlugins(DiscoveryPlugin.class)));
|
||||
final DiscoveryModule discoveryModule = new DiscoveryModule(this.settings, transportService, networkService,
|
||||
pluginsService.filterPlugins(DiscoveryPlugin.class));
|
||||
final ZenPing zenPing = newZenPing(settings, threadPool, transportService, discoveryModule.getHostsProvider());
|
||||
modules.add(discoveryModule);
|
||||
pluginsService.processModules(modules);
|
||||
modules.add(b -> {
|
||||
b.bind(IndicesQueriesRegistry.class).toInstance(searchModule.getQueryParserRegistry());
|
||||
|
@ -425,6 +432,7 @@ public class Node implements Closeable {
|
|||
b.bind(UpdateHelper.class).toInstance(new UpdateHelper(settings, scriptModule.getScriptService()));
|
||||
b.bind(MetaDataIndexUpgradeService.class).toInstance(new MetaDataIndexUpgradeService(settings,
|
||||
indicesModule.getMapperRegistry(), settingsModule.getIndexScopedSettings()));
|
||||
b.bind(ZenPing.class).toInstance(zenPing);
|
||||
{
|
||||
RecoverySettings recoverySettings = new RecoverySettings(settings, settingsModule.getClusterSettings());
|
||||
processRecoverySettings(settingsModule.getClusterSettings(), recoverySettings);
|
||||
|
@ -881,4 +889,15 @@ public class Node implements Closeable {
|
|||
}
|
||||
return customNameResolvers;
|
||||
}
|
||||
|
||||
/** Create a new ZenPing instance for use in zen discovery. */
|
||||
protected ZenPing newZenPing(Settings settings, ThreadPool threadPool, TransportService transportService,
|
||||
UnicastHostsProvider hostsProvider) {
|
||||
return new UnicastZenPing(settings, threadPool, transportService, hostsProvider);
|
||||
}
|
||||
|
||||
/** Constructs an internal node used as a client into a cluster fronted by this tribe node. */
|
||||
protected Node newTribeClientNode(Settings settings, Collection<Class<? extends Plugin>> classpathPlugins) {
|
||||
return new Node(new Environment(settings), classpathPlugins);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -19,6 +19,19 @@
|
|||
|
||||
package org.elasticsearch.plugins;
|
||||
|
||||
import joptsimple.OptionSet;
|
||||
import joptsimple.OptionSpec;
|
||||
|
||||
import org.apache.lucene.util.IOUtils;
|
||||
import org.elasticsearch.cli.ExitCodes;
|
||||
import org.elasticsearch.cli.SettingCommand;
|
||||
import org.elasticsearch.cli.Terminal;
|
||||
import org.elasticsearch.cli.UserException;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.env.Environment;
|
||||
import org.elasticsearch.node.internal.InternalSettingsPreparer;
|
||||
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Path;
|
||||
import java.nio.file.StandardCopyOption;
|
||||
|
@ -26,18 +39,6 @@ import java.util.ArrayList;
|
|||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import joptsimple.OptionSet;
|
||||
import joptsimple.OptionSpec;
|
||||
import org.apache.lucene.util.IOUtils;
|
||||
import org.elasticsearch.cli.ExitCodes;
|
||||
import org.elasticsearch.cli.SettingCommand;
|
||||
import org.elasticsearch.cli.UserException;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.cli.Terminal;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.env.Environment;
|
||||
import org.elasticsearch.node.internal.InternalSettingsPreparer;
|
||||
|
||||
import static org.elasticsearch.cli.Terminal.Verbosity.VERBOSE;
|
||||
|
||||
/**
|
||||
|
@ -67,7 +68,7 @@ final class RemovePluginCommand extends SettingCommand {
|
|||
final Path pluginDir = env.pluginsFile().resolve(pluginName);
|
||||
if (Files.exists(pluginDir) == false) {
|
||||
throw new UserException(
|
||||
ExitCodes.USAGE,
|
||||
ExitCodes.CONFIG,
|
||||
"plugin " + pluginName + " not found; run 'elasticsearch-plugin list' to get list of installed plugins");
|
||||
}
|
||||
|
||||
|
|
|
@ -192,7 +192,6 @@ public class RestActions {
|
|||
queryBuilder.defaultField(request.param("df"));
|
||||
queryBuilder.analyzer(request.param("analyzer"));
|
||||
queryBuilder.analyzeWildcard(request.paramAsBoolean("analyze_wildcard", false));
|
||||
queryBuilder.lowercaseExpandedTerms(request.paramAsBoolean("lowercase_expanded_terms", true));
|
||||
queryBuilder.lenient(request.paramAsBoolean("lenient", null));
|
||||
String defaultOperator = request.param("default_operator");
|
||||
if (defaultOperator != null) {
|
||||
|
|
|
@ -34,11 +34,22 @@ public abstract class RestBuilderListener<Response> extends RestResponseListener
|
|||
|
||||
@Override
|
||||
public final RestResponse buildResponse(Response response) throws Exception {
|
||||
return buildResponse(response, channel.newBuilder());
|
||||
try (XContentBuilder builder = channel.newBuilder()) {
|
||||
final RestResponse restResponse = buildResponse(response, builder);
|
||||
assert assertBuilderClosed(builder);
|
||||
return restResponse;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Builds a response to send back over the channel.
|
||||
* Builds a response to send back over the channel. Implementors should ensure that they close the provided {@link XContentBuilder}
|
||||
* using the {@link XContentBuilder#close()} method.
|
||||
*/
|
||||
public abstract RestResponse buildResponse(Response response, XContentBuilder builder) throws Exception;
|
||||
|
||||
// pkg private method that we can override for testing
|
||||
boolean assertBuilderClosed(XContentBuilder xContentBuilder) {
|
||||
assert xContentBuilder.generator().isClosed() : "callers should ensure the XContentBuilder is closed themselves";
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -20,9 +20,11 @@
|
|||
package org.elasticsearch.rest.action.admin.indices;
|
||||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.admin.indices.upgrade.get.UpgradeStatusRequest;
|
||||
import org.elasticsearch.action.admin.indices.upgrade.get.UpgradeStatusResponse;
|
||||
import org.elasticsearch.action.admin.indices.upgrade.post.UpgradeRequest;
|
||||
import org.elasticsearch.action.admin.indices.upgrade.post.UpgradeResponse;
|
||||
import org.elasticsearch.action.support.IndicesOptions;
|
||||
import org.elasticsearch.client.node.NodeClient;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.collect.Tuple;
|
||||
|
@ -68,20 +70,22 @@ public class RestUpgradeAction extends BaseRestHandler {
|
|||
}
|
||||
|
||||
private RestChannelConsumer handleGet(final RestRequest request, NodeClient client) {
|
||||
return channel -> client.admin().indices().prepareUpgradeStatus(Strings.splitStringByCommaToArray(request.param("index")))
|
||||
.execute(new RestBuilderListener<UpgradeStatusResponse>(channel) {
|
||||
@Override
|
||||
public RestResponse buildResponse(UpgradeStatusResponse response, XContentBuilder builder) throws Exception {
|
||||
builder.startObject();
|
||||
response.toXContent(builder, request);
|
||||
builder.endObject();
|
||||
return new BytesRestResponse(OK, builder);
|
||||
}
|
||||
});
|
||||
UpgradeStatusRequest statusRequest = new UpgradeStatusRequest(Strings.splitStringByCommaToArray(request.param("index")));
|
||||
statusRequest.indicesOptions(IndicesOptions.fromRequest(request, statusRequest.indicesOptions()));
|
||||
return channel -> client.admin().indices().upgradeStatus(statusRequest, new RestBuilderListener<UpgradeStatusResponse>(channel) {
|
||||
@Override
|
||||
public RestResponse buildResponse(UpgradeStatusResponse response, XContentBuilder builder) throws Exception {
|
||||
builder.startObject();
|
||||
response.toXContent(builder, request);
|
||||
builder.endObject();
|
||||
return new BytesRestResponse(OK, builder);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
private RestChannelConsumer handlePost(final RestRequest request, NodeClient client) {
|
||||
UpgradeRequest upgradeReq = new UpgradeRequest(Strings.splitStringByCommaToArray(request.param("index")));
|
||||
upgradeReq.indicesOptions(IndicesOptions.fromRequest(request, upgradeReq.indicesOptions()));
|
||||
upgradeReq.upgradeOnlyAncientSegments(request.paramAsBoolean("only_ancient_segments", false));
|
||||
return channel -> client.admin().indices().upgrade(upgradeReq, new RestBuilderListener<UpgradeResponse>(channel) {
|
||||
@Override
|
||||
|
|
|
@ -85,7 +85,7 @@ public class RestNodesAction extends AbstractCatAction {
|
|||
clusterStateRequest.clear().nodes(true);
|
||||
clusterStateRequest.local(request.paramAsBoolean("local", clusterStateRequest.local()));
|
||||
clusterStateRequest.masterNodeTimeout(request.paramAsTime("master_timeout", clusterStateRequest.masterNodeTimeout()));
|
||||
|
||||
final boolean fullId = request.paramAsBoolean("full_id", false);
|
||||
return channel -> client.admin().cluster().state(clusterStateRequest, new RestActionListener<ClusterStateResponse>(channel) {
|
||||
@Override
|
||||
public void processResponse(final ClusterStateResponse clusterStateResponse) {
|
||||
|
@ -99,7 +99,8 @@ public class RestNodesAction extends AbstractCatAction {
|
|||
client.admin().cluster().nodesStats(nodesStatsRequest, new RestResponseListener<NodesStatsResponse>(channel) {
|
||||
@Override
|
||||
public RestResponse buildResponse(NodesStatsResponse nodesStatsResponse) throws Exception {
|
||||
return RestTable.buildResponse(buildTable(request, clusterStateResponse, nodesInfoResponse, nodesStatsResponse), channel);
|
||||
return RestTable.buildResponse(buildTable(fullId, request, clusterStateResponse, nodesInfoResponse,
|
||||
nodesStatsResponse), channel);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
@ -129,7 +130,8 @@ public class RestNodesAction extends AbstractCatAction {
|
|||
table.addCell("ram.percent", "alias:rp,ramPercent;text-align:right;desc:used machine memory ratio");
|
||||
table.addCell("ram.max", "default:false;alias:rm,ramMax;text-align:right;desc:total machine memory");
|
||||
table.addCell("file_desc.current", "default:false;alias:fdc,fileDescriptorCurrent;text-align:right;desc:used file descriptors");
|
||||
table.addCell("file_desc.percent", "default:false;alias:fdp,fileDescriptorPercent;text-align:right;desc:used file descriptor ratio");
|
||||
table.addCell("file_desc.percent",
|
||||
"default:false;alias:fdp,fileDescriptorPercent;text-align:right;desc:used file descriptor ratio");
|
||||
table.addCell("file_desc.max", "default:false;alias:fdm,fileDescriptorMax;text-align:right;desc:max file descriptors");
|
||||
|
||||
table.addCell("cpu", "alias:cpu;text-align:right;desc:recent cpu usage");
|
||||
|
@ -137,7 +139,8 @@ public class RestNodesAction extends AbstractCatAction {
|
|||
table.addCell("load_5m", "alias:l;text-align:right;desc:5m load avg");
|
||||
table.addCell("load_15m", "alias:l;text-align:right;desc:15m load avg");
|
||||
table.addCell("uptime", "default:false;alias:u;text-align:right;desc:node uptime");
|
||||
table.addCell("node.role", "alias:r,role,nodeRole;desc:m:master eligible node, d:data node, i:ingest node, -:coordinating node only");
|
||||
table.addCell("node.role",
|
||||
"alias:r,role,nodeRole;desc:m:master eligible node, d:data node, i:ingest node, -:coordinating node only");
|
||||
table.addCell("master", "alias:m;desc:*:current master");
|
||||
table.addCell("name", "alias:n;desc:node name");
|
||||
|
||||
|
@ -150,9 +153,12 @@ public class RestNodesAction extends AbstractCatAction {
|
|||
table.addCell("query_cache.evictions", "alias:qce,queryCacheEvictions;default:false;text-align:right;desc:query cache evictions");
|
||||
|
||||
table.addCell("request_cache.memory_size", "alias:rcm,requestCacheMemory;default:false;text-align:right;desc:used request cache");
|
||||
table.addCell("request_cache.evictions", "alias:rce,requestCacheEvictions;default:false;text-align:right;desc:request cache evictions");
|
||||
table.addCell("request_cache.hit_count", "alias:rchc,requestCacheHitCount;default:false;text-align:right;desc:request cache hit counts");
|
||||
table.addCell("request_cache.miss_count", "alias:rcmc,requestCacheMissCount;default:false;text-align:right;desc:request cache miss counts");
|
||||
table.addCell("request_cache.evictions",
|
||||
"alias:rce,requestCacheEvictions;default:false;text-align:right;desc:request cache evictions");
|
||||
table.addCell("request_cache.hit_count",
|
||||
"alias:rchc,requestCacheHitCount;default:false;text-align:right;desc:request cache hit counts");
|
||||
table.addCell("request_cache.miss_count",
|
||||
"alias:rcmc,requestCacheMissCount;default:false;text-align:right;desc:request cache miss counts");
|
||||
|
||||
table.addCell("flush.total", "alias:ft,flushTotal;default:false;text-align:right;desc:number of flushes");
|
||||
table.addCell("flush.total_time", "alias:ftt,flushTotalTime;default:false;text-align:right;desc:time spent in flush");
|
||||
|
@ -165,16 +171,20 @@ public class RestNodesAction extends AbstractCatAction {
|
|||
table.addCell("get.missing_time", "alias:gmti,getMissingTime;default:false;text-align:right;desc:time spent in failed gets");
|
||||
table.addCell("get.missing_total", "alias:gmto,getMissingTotal;default:false;text-align:right;desc:number of failed gets");
|
||||
|
||||
table.addCell("indexing.delete_current", "alias:idc,indexingDeleteCurrent;default:false;text-align:right;desc:number of current deletions");
|
||||
table.addCell("indexing.delete_current",
|
||||
"alias:idc,indexingDeleteCurrent;default:false;text-align:right;desc:number of current deletions");
|
||||
table.addCell("indexing.delete_time", "alias:idti,indexingDeleteTime;default:false;text-align:right;desc:time spent in deletions");
|
||||
table.addCell("indexing.delete_total", "alias:idto,indexingDeleteTotal;default:false;text-align:right;desc:number of delete ops");
|
||||
table.addCell("indexing.index_current", "alias:iic,indexingIndexCurrent;default:false;text-align:right;desc:number of current indexing ops");
|
||||
table.addCell("indexing.index_current",
|
||||
"alias:iic,indexingIndexCurrent;default:false;text-align:right;desc:number of current indexing ops");
|
||||
table.addCell("indexing.index_time", "alias:iiti,indexingIndexTime;default:false;text-align:right;desc:time spent in indexing");
|
||||
table.addCell("indexing.index_total", "alias:iito,indexingIndexTotal;default:false;text-align:right;desc:number of indexing ops");
|
||||
table.addCell("indexing.index_failed", "alias:iif,indexingIndexFailed;default:false;text-align:right;desc:number of failed indexing ops");
|
||||
table.addCell("indexing.index_failed",
|
||||
"alias:iif,indexingIndexFailed;default:false;text-align:right;desc:number of failed indexing ops");
|
||||
|
||||
table.addCell("merges.current", "alias:mc,mergesCurrent;default:false;text-align:right;desc:number of current merges");
|
||||
table.addCell("merges.current_docs", "alias:mcd,mergesCurrentDocs;default:false;text-align:right;desc:number of current merging docs");
|
||||
table.addCell("merges.current_docs",
|
||||
"alias:mcd,mergesCurrentDocs;default:false;text-align:right;desc:number of current merging docs");
|
||||
table.addCell("merges.current_size", "alias:mcs,mergesCurrentSize;default:false;text-align:right;desc:size of current merges");
|
||||
table.addCell("merges.total", "alias:mt,mergesTotal;default:false;text-align:right;desc:number of completed merge ops");
|
||||
table.addCell("merges.total_docs", "alias:mtd,mergesTotalDocs;default:false;text-align:right;desc:docs merged");
|
||||
|
@ -185,7 +195,8 @@ public class RestNodesAction extends AbstractCatAction {
|
|||
table.addCell("refresh.time", "alias:rti,refreshTime;default:false;text-align:right;desc:time spent in refreshes");
|
||||
|
||||
table.addCell("script.compilations", "alias:scrcc,scriptCompilations;default:false;text-align:right;desc:script compilations");
|
||||
table.addCell("script.cache_evictions", "alias:scrce,scriptCacheEvictions;default:false;text-align:right;desc:script cache evictions");
|
||||
table.addCell("script.cache_evictions",
|
||||
"alias:scrce,scriptCacheEvictions;default:false;text-align:right;desc:script cache evictions");
|
||||
|
||||
table.addCell("search.fetch_current", "alias:sfc,searchFetchCurrent;default:false;text-align:right;desc:current fetch phase ops");
|
||||
table.addCell("search.fetch_time", "alias:sfti,searchFetchTime;default:false;text-align:right;desc:time spent in fetch phase");
|
||||
|
@ -195,14 +206,19 @@ public class RestNodesAction extends AbstractCatAction {
|
|||
table.addCell("search.query_time", "alias:sqti,searchQueryTime;default:false;text-align:right;desc:time spent in query phase");
|
||||
table.addCell("search.query_total", "alias:sqto,searchQueryTotal;default:false;text-align:right;desc:total query phase ops");
|
||||
table.addCell("search.scroll_current", "alias:scc,searchScrollCurrent;default:false;text-align:right;desc:open scroll contexts");
|
||||
table.addCell("search.scroll_time", "alias:scti,searchScrollTime;default:false;text-align:right;desc:time scroll contexts held open");
|
||||
table.addCell("search.scroll_time",
|
||||
"alias:scti,searchScrollTime;default:false;text-align:right;desc:time scroll contexts held open");
|
||||
table.addCell("search.scroll_total", "alias:scto,searchScrollTotal;default:false;text-align:right;desc:completed scroll contexts");
|
||||
|
||||
table.addCell("segments.count", "alias:sc,segmentsCount;default:false;text-align:right;desc:number of segments");
|
||||
table.addCell("segments.memory", "alias:sm,segmentsMemory;default:false;text-align:right;desc:memory used by segments");
|
||||
table.addCell("segments.index_writer_memory", "alias:siwm,segmentsIndexWriterMemory;default:false;text-align:right;desc:memory used by index writer");
|
||||
table.addCell("segments.version_map_memory", "alias:svmm,segmentsVersionMapMemory;default:false;text-align:right;desc:memory used by version map");
|
||||
table.addCell("segments.fixed_bitset_memory", "alias:sfbm,fixedBitsetMemory;default:false;text-align:right;desc:memory used by fixed bit sets for nested object field types and type filters for types referred in _parent fields");
|
||||
table.addCell("segments.index_writer_memory",
|
||||
"alias:siwm,segmentsIndexWriterMemory;default:false;text-align:right;desc:memory used by index writer");
|
||||
table.addCell("segments.version_map_memory",
|
||||
"alias:svmm,segmentsVersionMapMemory;default:false;text-align:right;desc:memory used by version map");
|
||||
table.addCell("segments.fixed_bitset_memory",
|
||||
"alias:sfbm,fixedBitsetMemory;default:false;text-align:right;desc:memory used by fixed bit sets for nested object field types" +
|
||||
" and type filters for types referred in _parent fields");
|
||||
|
||||
table.addCell("suggest.current", "alias:suc,suggestCurrent;default:false;text-align:right;desc:number of current suggest ops");
|
||||
table.addCell("suggest.time", "alias:suti,suggestTime;default:false;text-align:right;desc:time spend in suggest");
|
||||
|
@ -212,8 +228,8 @@ public class RestNodesAction extends AbstractCatAction {
|
|||
return table;
|
||||
}
|
||||
|
||||
private Table buildTable(RestRequest req, ClusterStateResponse state, NodesInfoResponse nodesInfo, NodesStatsResponse nodesStats) {
|
||||
boolean fullId = req.paramAsBoolean("full_id", false);
|
||||
private Table buildTable(boolean fullId, RestRequest req, ClusterStateResponse state, NodesInfoResponse nodesInfo,
|
||||
NodesStatsResponse nodesStats) {
|
||||
|
||||
DiscoveryNodes nodes = state.getState().nodes();
|
||||
String masterId = nodes.getMasterNodeId();
|
||||
|
@ -255,14 +271,18 @@ public class RestNodesAction extends AbstractCatAction {
|
|||
table.addCell(osStats == null ? null : osStats.getMem() == null ? null : osStats.getMem().getUsedPercent());
|
||||
table.addCell(osStats == null ? null : osStats.getMem() == null ? null : osStats.getMem().getTotal());
|
||||
table.addCell(processStats == null ? null : processStats.getOpenFileDescriptors());
|
||||
table.addCell(processStats == null ? null : calculatePercentage(processStats.getOpenFileDescriptors(), processStats.getMaxFileDescriptors()));
|
||||
table.addCell(processStats == null ? null : calculatePercentage(processStats.getOpenFileDescriptors(),
|
||||
processStats.getMaxFileDescriptors()));
|
||||
table.addCell(processStats == null ? null : processStats.getMaxFileDescriptors());
|
||||
|
||||
table.addCell(osStats == null ? null : Short.toString(osStats.getCpu().getPercent()));
|
||||
boolean hasLoadAverage = osStats != null && osStats.getCpu().getLoadAverage() != null;
|
||||
table.addCell(!hasLoadAverage || osStats.getCpu().getLoadAverage()[0] == -1 ? null : String.format(Locale.ROOT, "%.2f", osStats.getCpu().getLoadAverage()[0]));
|
||||
table.addCell(!hasLoadAverage || osStats.getCpu().getLoadAverage()[1] == -1 ? null : String.format(Locale.ROOT, "%.2f", osStats.getCpu().getLoadAverage()[1]));
|
||||
table.addCell(!hasLoadAverage || osStats.getCpu().getLoadAverage()[2] == -1 ? null : String.format(Locale.ROOT, "%.2f", osStats.getCpu().getLoadAverage()[2]));
|
||||
table.addCell(!hasLoadAverage || osStats.getCpu().getLoadAverage()[0] == -1 ? null :
|
||||
String.format(Locale.ROOT, "%.2f", osStats.getCpu().getLoadAverage()[0]));
|
||||
table.addCell(!hasLoadAverage || osStats.getCpu().getLoadAverage()[1] == -1 ? null :
|
||||
String.format(Locale.ROOT, "%.2f", osStats.getCpu().getLoadAverage()[1]));
|
||||
table.addCell(!hasLoadAverage || osStats.getCpu().getLoadAverage()[2] == -1 ? null :
|
||||
String.format(Locale.ROOT, "%.2f", osStats.getCpu().getLoadAverage()[2]));
|
||||
table.addCell(jvmStats == null ? null : jvmStats.getUptime());
|
||||
|
||||
final String roles;
|
||||
|
|
|
@ -109,7 +109,7 @@ public final class Script implements ToXContent, Writeable {
|
|||
boolean hasType = type != null;
|
||||
out.writeBoolean(hasType);
|
||||
if (hasType) {
|
||||
ScriptType.writeTo(type, out);
|
||||
type.writeTo(out);
|
||||
}
|
||||
out.writeOptionalString(lang);
|
||||
out.writeMap(params);
|
||||
|
|
|
@ -131,7 +131,7 @@ public final class ScriptMetaData implements MetaData.Custom {
|
|||
|
||||
@Override
|
||||
public EnumSet<MetaData.XContentContext> context() {
|
||||
return MetaData.API_AND_GATEWAY;
|
||||
return MetaData.ALL_CONTEXTS;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -72,7 +72,7 @@ public class ScriptModes {
|
|||
}
|
||||
|
||||
static String sourceKey(ScriptType scriptType) {
|
||||
return SCRIPT_SETTINGS_PREFIX + "." + scriptType.getScriptType();
|
||||
return SCRIPT_SETTINGS_PREFIX + "." + scriptType.getName();
|
||||
}
|
||||
|
||||
static String getGlobalKey(String lang, ScriptType scriptType) {
|
||||
|
|
|
@ -50,7 +50,7 @@ public class ScriptSettings {
|
|||
for (ScriptType scriptType : ScriptType.values()) {
|
||||
scriptTypeSettingMap.put(scriptType, Setting.boolSetting(
|
||||
ScriptModes.sourceKey(scriptType),
|
||||
scriptType.getDefaultScriptEnabled(),
|
||||
scriptType.isDefaultEnabled(),
|
||||
Property.NodeScope));
|
||||
}
|
||||
SCRIPT_TYPE_SETTING_MAP = Collections.unmodifiableMap(scriptTypeSettingMap);
|
||||
|
@ -102,7 +102,7 @@ public class ScriptSettings {
|
|||
boolean defaultLangAndType = defaultNonFileScriptMode;
|
||||
// Files are treated differently because they are never default-deny
|
||||
if (ScriptType.FILE == scriptType) {
|
||||
defaultLangAndType = ScriptType.FILE.getDefaultScriptEnabled();
|
||||
defaultLangAndType = ScriptType.FILE.isDefaultEnabled();
|
||||
}
|
||||
final boolean defaultIfNothingSet = defaultLangAndType;
|
||||
|
||||
|
|
|
@ -22,68 +22,118 @@ package org.elasticsearch.script;
|
|||
import org.elasticsearch.common.ParseField;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.io.stream.Writeable;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Locale;
|
||||
|
||||
/**
|
||||
* The type of a script, more specifically where it gets loaded from:
|
||||
* - provided dynamically at request time
|
||||
* - loaded from an index
|
||||
* - loaded from file
|
||||
* ScriptType represents the way a script is stored and retrieved from the {@link ScriptService}.
|
||||
* It's also used to by {@link ScriptSettings} and {@link ScriptModes} to determine whether or not
|
||||
* a {@link Script} is allowed to be executed based on both default and user-defined settings.
|
||||
*/
|
||||
public enum ScriptType {
|
||||
public enum ScriptType implements Writeable {
|
||||
|
||||
INLINE(0, "inline", "inline", false),
|
||||
STORED(1, "id", "stored", false),
|
||||
FILE(2, "file", "file", true);
|
||||
/**
|
||||
* INLINE scripts are specified in numerous queries and compiled on-the-fly.
|
||||
* They will be cached based on the lang and code of the script.
|
||||
* They are turned off by default because most languages are insecure
|
||||
* (Groovy and others), but can be overriden by the specific {@link ScriptEngineService}
|
||||
* if the language is naturally secure (Painless, Mustache, and Expressions).
|
||||
*/
|
||||
INLINE ( 0 , new ParseField("inline") , false ),
|
||||
|
||||
private final int val;
|
||||
private final ParseField parseField;
|
||||
private final String scriptType;
|
||||
private final boolean defaultScriptEnabled;
|
||||
/**
|
||||
* STORED scripts are saved as part of the {@link org.elasticsearch.cluster.ClusterState}
|
||||
* based on user requests. They will be cached when they are first used in a query.
|
||||
* They are turned off by default because most languages are insecure
|
||||
* (Groovy and others), but can be overriden by the specific {@link ScriptEngineService}
|
||||
* if the language is naturally secure (Painless, Mustache, and Expressions).
|
||||
*/
|
||||
STORED ( 1 , new ParseField("stored", "id") , false ),
|
||||
|
||||
/**
|
||||
* FILE scripts are loaded from disk either on start-up or on-the-fly depending on
|
||||
* user-defined settings. They will be compiled and cached as soon as they are loaded
|
||||
* from disk. They are turned on by default as they should always be safe to execute.
|
||||
*/
|
||||
FILE ( 2 , new ParseField("file") , true );
|
||||
|
||||
/**
|
||||
* Reads an int from the input stream and converts it to a {@link ScriptType}.
|
||||
* @return The ScriptType read from the stream. Throws an {@link IllegalStateException}
|
||||
* if no ScriptType is found based on the id.
|
||||
*/
|
||||
public static ScriptType readFrom(StreamInput in) throws IOException {
|
||||
int scriptTypeVal = in.readVInt();
|
||||
for (ScriptType type : values()) {
|
||||
if (type.val == scriptTypeVal) {
|
||||
return type;
|
||||
}
|
||||
}
|
||||
throw new IllegalArgumentException("Unexpected value read for ScriptType got [" + scriptTypeVal + "] expected one of ["
|
||||
+ INLINE.val + "," + FILE.val + "," + STORED.val + "]");
|
||||
}
|
||||
int id = in.readVInt();
|
||||
|
||||
public static void writeTo(ScriptType scriptType, StreamOutput out) throws IOException{
|
||||
if (scriptType != null) {
|
||||
out.writeVInt(scriptType.val);
|
||||
if (FILE.id == id) {
|
||||
return FILE;
|
||||
} else if (STORED.id == id) {
|
||||
return STORED;
|
||||
} else if (INLINE.id == id) {
|
||||
return INLINE;
|
||||
} else {
|
||||
out.writeVInt(INLINE.val); //Default to inline
|
||||
throw new IllegalStateException("Error reading ScriptType id [" + id + "] from stream, expected one of [" +
|
||||
FILE.id + " [" + FILE.parseField.getPreferredName() + "], " +
|
||||
STORED.id + " [" + STORED.parseField.getPreferredName() + "], " +
|
||||
INLINE.id + " [" + INLINE.parseField.getPreferredName() + "]]");
|
||||
}
|
||||
}
|
||||
|
||||
ScriptType(int val, String name, String scriptType, boolean defaultScriptEnabled) {
|
||||
this.val = val;
|
||||
this.parseField = new ParseField(name);
|
||||
this.scriptType = scriptType;
|
||||
this.defaultScriptEnabled = defaultScriptEnabled;
|
||||
private final int id;
|
||||
private final ParseField parseField;
|
||||
private final boolean defaultEnabled;
|
||||
|
||||
/**
|
||||
* Standard constructor.
|
||||
* @param id A unique identifier for a type that can be read/written to a stream.
|
||||
* @param parseField Specifies the name used to parse input from queries.
|
||||
* @param defaultEnabled Whether or not a {@link ScriptType} can be run by default.
|
||||
*/
|
||||
ScriptType(int id, ParseField parseField, boolean defaultEnabled) {
|
||||
this.id = id;
|
||||
this.parseField = parseField;
|
||||
this.defaultEnabled = defaultEnabled;
|
||||
}
|
||||
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
out.writeVInt(id);
|
||||
}
|
||||
|
||||
/**
|
||||
* @return The unique id for this {@link ScriptType}.
|
||||
*/
|
||||
public int getId() {
|
||||
return id;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return The unique name for this {@link ScriptType} based on the {@link ParseField}.
|
||||
*/
|
||||
public String getName() {
|
||||
return parseField.getPreferredName();
|
||||
}
|
||||
|
||||
/**
|
||||
* @return Specifies the name used to parse input from queries.
|
||||
*/
|
||||
public ParseField getParseField() {
|
||||
return parseField;
|
||||
}
|
||||
|
||||
public boolean getDefaultScriptEnabled() {
|
||||
return defaultScriptEnabled;
|
||||
}
|
||||
|
||||
public String getScriptType() {
|
||||
return scriptType;
|
||||
/**
|
||||
* @return Whether or not a {@link ScriptType} can be run by default. Note
|
||||
* this can be potentially overriden by any {@link ScriptEngineService}.
|
||||
*/
|
||||
public boolean isDefaultEnabled() {
|
||||
return defaultEnabled;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return The same as calling {@link #getName()}.
|
||||
*/
|
||||
@Override
|
||||
public String toString() {
|
||||
return name().toLowerCase(Locale.ROOT);
|
||||
return getName();
|
||||
}
|
||||
|
||||
}
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue