Merge branch 'master' into keystore
This commit is contained in:
commit
cd6e3f4cea
|
@ -55,7 +55,7 @@ dependencies {
|
|||
runtime 'org.apache.commons:commons-math3:3.2'
|
||||
}
|
||||
|
||||
compileJava.options.compilerArgs << "-Xlint:-cast,-deprecation,-rawtypes,-try,-unchecked"
|
||||
compileJava.options.compilerArgs << "-Xlint:-cast,-deprecation,-rawtypes,-try,-unchecked,-processing"
|
||||
// enable the JMH's BenchmarkProcessor to generate the final benchmark classes
|
||||
// needs to be added separately otherwise Gradle will quote it and javac will fail
|
||||
compileJava.options.compilerArgs.addAll(["-processor", "org.openjdk.jmh.generators.BenchmarkProcessor"])
|
||||
|
|
|
@ -21,6 +21,7 @@ package org.elasticsearch.gradle
|
|||
import nebula.plugin.extraconfigurations.ProvidedBasePlugin
|
||||
import org.elasticsearch.gradle.precommit.PrecommitTasks
|
||||
import org.gradle.api.GradleException
|
||||
import org.gradle.api.InvalidUserDataException
|
||||
import org.gradle.api.JavaVersion
|
||||
import org.gradle.api.Plugin
|
||||
import org.gradle.api.Project
|
||||
|
@ -54,6 +55,11 @@ class BuildPlugin implements Plugin<Project> {
|
|||
|
||||
@Override
|
||||
void apply(Project project) {
|
||||
if (project.pluginManager.hasPlugin('elasticsearch.standalone-rest-test')) {
|
||||
throw new InvalidUserDataException('elasticsearch.standalone-test, '
|
||||
+ 'elasticearch.standalone-rest-test, and elasticsearch.build '
|
||||
+ 'are mutually exclusive')
|
||||
}
|
||||
project.pluginManager.apply('java')
|
||||
project.pluginManager.apply('carrotsearch.randomized-testing')
|
||||
// these plugins add lots of info to our jars
|
||||
|
|
|
@ -30,6 +30,7 @@ public class DocsTestPlugin extends RestTestPlugin {
|
|||
|
||||
@Override
|
||||
public void apply(Project project) {
|
||||
project.pluginManager.apply('elasticsearch.standalone-rest-test')
|
||||
super.apply(project)
|
||||
Map<String, String> defaultSubstitutions = [
|
||||
/* These match up with the asciidoc syntax for substitutions but
|
||||
|
|
|
@ -72,10 +72,12 @@ class ClusterConfiguration {
|
|||
boolean useMinimumMasterNodes = true
|
||||
|
||||
@Input
|
||||
String jvmArgs = "-Xms" + System.getProperty('tests.heap.size', '512m') +
|
||||
String jvmArgs = "-ea" +
|
||||
" " + "-Xms" + System.getProperty('tests.heap.size', '512m') +
|
||||
" " + "-Xmx" + System.getProperty('tests.heap.size', '512m') +
|
||||
" " + System.getProperty('tests.jvm.argline', '')
|
||||
|
||||
|
||||
/**
|
||||
* A closure to call which returns the unicast host to connect to for cluster formation.
|
||||
*
|
||||
|
|
|
@ -39,6 +39,7 @@ import org.gradle.api.tasks.Delete
|
|||
import org.gradle.api.tasks.Exec
|
||||
|
||||
import java.nio.file.Paths
|
||||
import java.util.concurrent.TimeUnit
|
||||
|
||||
/**
|
||||
* A helper for creating tasks to build a cluster that is used by a task, and tear down the cluster when the task is finished.
|
||||
|
@ -91,6 +92,8 @@ class ClusterFormationTasks {
|
|||
configureBwcPluginDependency("${task.name}_elasticsearchBwcPlugins", project, entry.getValue(),
|
||||
project.configurations.elasticsearchBwcPlugins, config.bwcVersion)
|
||||
}
|
||||
project.configurations.elasticsearchBwcDistro.resolutionStrategy.cacheChangingModulesFor(0, TimeUnit.SECONDS)
|
||||
project.configurations.elasticsearchBwcPlugins.resolutionStrategy.cacheChangingModulesFor(0, TimeUnit.SECONDS)
|
||||
}
|
||||
for (int i = 0; i < config.numNodes; i++) {
|
||||
// we start N nodes and out of these N nodes there might be M bwc nodes.
|
||||
|
|
|
@ -18,15 +18,29 @@
|
|||
*/
|
||||
package org.elasticsearch.gradle.test
|
||||
|
||||
import org.elasticsearch.gradle.BuildPlugin
|
||||
import org.gradle.api.InvalidUserDataException
|
||||
import org.gradle.api.Plugin
|
||||
import org.gradle.api.Project
|
||||
|
||||
/** A plugin to add rest integration tests. Used for qa projects. */
|
||||
/**
|
||||
* Adds support for starting an Elasticsearch cluster before running integration
|
||||
* tests. Used in conjunction with {@link StandaloneRestTestPlugin} for qa
|
||||
* projects and in conjunction with {@link BuildPlugin} for testing the rest
|
||||
* client.
|
||||
*/
|
||||
public class RestTestPlugin implements Plugin<Project> {
|
||||
List REQUIRED_PLUGINS = [
|
||||
'elasticsearch.build',
|
||||
'elasticsearch.standalone-rest-test']
|
||||
|
||||
@Override
|
||||
public void apply(Project project) {
|
||||
project.pluginManager.apply(StandaloneTestBasePlugin)
|
||||
if (false == REQUIRED_PLUGINS.any {project.pluginManager.hasPlugin(it)}) {
|
||||
throw new InvalidUserDataException('elasticsearch.rest-test '
|
||||
+ 'requires either elasticsearch.build or '
|
||||
+ 'elasticsearch.standalone-test')
|
||||
}
|
||||
|
||||
RestIntegTestTask integTest = project.tasks.create('integTest', RestIntegTestTask.class)
|
||||
integTest.cluster.distribution = 'zip' // rest tests should run with the real zip
|
||||
|
|
|
@ -24,15 +24,26 @@ import com.carrotsearch.gradle.junit4.RandomizedTestingPlugin
|
|||
import org.elasticsearch.gradle.BuildPlugin
|
||||
import org.elasticsearch.gradle.VersionProperties
|
||||
import org.elasticsearch.gradle.precommit.PrecommitTasks
|
||||
import org.gradle.api.InvalidUserDataException
|
||||
import org.gradle.api.Plugin
|
||||
import org.gradle.api.Project
|
||||
import org.gradle.api.Task
|
||||
import org.gradle.api.plugins.JavaBasePlugin
|
||||
|
||||
/** Configures the build to have a rest integration test. */
|
||||
public class StandaloneTestBasePlugin implements Plugin<Project> {
|
||||
/**
|
||||
* Configures the build to compile tests against Elasticsearch's test framework
|
||||
* and run REST tests. Use BuildPlugin if you want to build main code as well
|
||||
* as tests.
|
||||
*/
|
||||
public class StandaloneRestTestPlugin implements Plugin<Project> {
|
||||
|
||||
@Override
|
||||
public void apply(Project project) {
|
||||
if (project.pluginManager.hasPlugin('elasticsearch.build')) {
|
||||
throw new InvalidUserDataException('elasticsearch.standalone-test, '
|
||||
+ 'elasticsearch.standalone-test, and elasticsearch.build are '
|
||||
+ 'mutually exclusive')
|
||||
}
|
||||
project.pluginManager.apply(JavaBasePlugin)
|
||||
project.pluginManager.apply(RandomizedTestingPlugin)
|
||||
|
|
@ -25,12 +25,15 @@ import org.gradle.api.Plugin
|
|||
import org.gradle.api.Project
|
||||
import org.gradle.api.plugins.JavaBasePlugin
|
||||
|
||||
/** A plugin to add tests only. Used for QA tests that run arbitrary unit tests. */
|
||||
/**
|
||||
* Configures the build to compile against Elasticsearch's test framework and
|
||||
* run integration and unit tests. Use BuildPlugin if you want to build main
|
||||
* code as well as tests. */
|
||||
public class StandaloneTestPlugin implements Plugin<Project> {
|
||||
|
||||
@Override
|
||||
public void apply(Project project) {
|
||||
project.pluginManager.apply(StandaloneTestBasePlugin)
|
||||
project.pluginManager.apply(StandaloneRestTestPlugin)
|
||||
|
||||
Map testOptions = [
|
||||
name: 'test',
|
||||
|
|
|
@ -0,0 +1,20 @@
|
|||
#
|
||||
# Licensed to Elasticsearch under one or more contributor
|
||||
# license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright
|
||||
# ownership. Elasticsearch licenses this file to you under
|
||||
# the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing,
|
||||
# software distributed under the License is distributed on an
|
||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
|
||||
implementation-class=org.elasticsearch.gradle.test.StandaloneRestTestPlugin
|
|
@ -10,9 +10,6 @@
|
|||
<suppress files="org[/\\]elasticsearch[/\\]painless[/\\]antlr[/\\]PainlessLexer\.java" checks="." />
|
||||
<suppress files="org[/\\]elasticsearch[/\\]painless[/\\]antlr[/\\]PainlessParser(|BaseVisitor|Visitor)\.java" checks="." />
|
||||
|
||||
<!-- ThrowableProxy is a forked copy from Log4j to hack around a bug; this can be removed when the hack is removed -->
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]apache[/\\]logging[/\\]log4j[/\\]core[/\\]impl[/\\]ThrowableProxy.java" checks="RegexpSinglelineJava" />
|
||||
|
||||
<!-- Hopefully temporary suppression of LineLength on files that don't pass it. We should remove these when we the
|
||||
files start to pass. -->
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]apache[/\\]lucene[/\\]queries[/\\]BlendedTermQuery.java" checks="LineLength" />
|
||||
|
@ -352,12 +349,10 @@
|
|||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]get[/\\]ShardGetService.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]mapper[/\\]DocumentFieldMappers.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]mapper[/\\]DocumentMapper.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]mapper[/\\]DocumentMapperParser.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]mapper[/\\]DocumentParser.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]mapper[/\\]FieldMapper.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]mapper[/\\]FieldTypeLookup.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]mapper[/\\]MappedFieldType.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]mapper[/\\]Mapper.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]mapper[/\\]MapperService.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]mapper[/\\]Mapping.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]mapper[/\\]MetadataFieldMapper.java" checks="LineLength" />
|
||||
|
@ -391,7 +386,6 @@
|
|||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]query[/\\]QueryBuilders.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]query[/\\]QueryValidationException.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]query[/\\]support[/\\]InnerHitsQueryParserHelper.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]query[/\\]support[/\\]QueryParsers.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]search[/\\]MatchQuery.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]search[/\\]MultiMatchQuery.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]search[/\\]geo[/\\]IndexedGeoBoundingBoxQuery.java" checks="LineLength" />
|
||||
|
@ -1001,7 +995,6 @@
|
|||
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]engine[/\\]AssertingSearcher.java" checks="LineLength" />
|
||||
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]engine[/\\]MockEngineSupport.java" checks="LineLength" />
|
||||
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]hamcrest[/\\]ElasticsearchAssertions.java" checks="LineLength" />
|
||||
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]junit[/\\]listeners[/\\]LoggingListener.java" checks="LineLength" />
|
||||
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]store[/\\]MockFSDirectoryService.java" checks="LineLength" />
|
||||
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]store[/\\]MockFSIndexStore.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]cli[/\\]CliTool.java" checks="LineLength" />
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
elasticsearch = 6.0.0-alpha1
|
||||
lucene = 6.4.0-snapshot-ec38570
|
||||
lucene = 6.4.0-snapshot-084f7a0
|
||||
|
||||
# optional dependencies
|
||||
spatial4j = 0.6
|
||||
|
@ -20,5 +20,6 @@ commonslogging = 1.1.3
|
|||
commonscodec = 1.10
|
||||
hamcrest = 1.3
|
||||
securemock = 1.2
|
||||
mocksocket = 1.1
|
||||
# benchmark dependencies
|
||||
jmh = 1.17.3
|
||||
|
|
|
@ -0,0 +1,41 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
apply plugin: 'elasticsearch.build'
|
||||
apply plugin: 'elasticsearch.rest-test'
|
||||
|
||||
group = 'org.elasticsearch.client'
|
||||
|
||||
dependencies {
|
||||
compile "org.elasticsearch:elasticsearch:${version}"
|
||||
compile "org.elasticsearch.client:rest:${version}"
|
||||
|
||||
testCompile "org.elasticsearch.client:test:${version}"
|
||||
testCompile "org.elasticsearch.test:framework:${version}"
|
||||
testCompile "com.carrotsearch.randomizedtesting:randomizedtesting-runner:${versions.randomizedrunner}"
|
||||
testCompile "junit:junit:${versions.junit}"
|
||||
testCompile "org.hamcrest:hamcrest-all:${versions.hamcrest}"
|
||||
}
|
||||
|
||||
dependencyLicenses {
|
||||
// Don't check licenses for dependency that are part of the elasticsearch project
|
||||
// But any other dependency should have its license/notice/sha1
|
||||
dependencies = project.configurations.runtime.fileCollection {
|
||||
it.group.startsWith('org.elasticsearch') == false
|
||||
}
|
||||
}
|
|
@ -0,0 +1,53 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.client;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.http.Header;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Objects;
|
||||
|
||||
/**
|
||||
* High level REST client that wraps an instance of the low level {@link RestClient} and allows to build requests and read responses.
|
||||
* The provided {@link RestClient} is externally built and closed.
|
||||
*/
|
||||
public final class RestHighLevelClient {
|
||||
|
||||
private static final Log logger = LogFactory.getLog(RestHighLevelClient.class);
|
||||
|
||||
private final RestClient client;
|
||||
|
||||
public RestHighLevelClient(RestClient client) {
|
||||
this.client = Objects.requireNonNull(client);
|
||||
}
|
||||
|
||||
public boolean ping(Header... headers) {
|
||||
try {
|
||||
client.performRequest("HEAD", "/", headers);
|
||||
return true;
|
||||
} catch(IOException exception) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
}
|
|
@ -0,0 +1,48 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.client;
|
||||
|
||||
import org.elasticsearch.test.rest.ESRestTestCase;
|
||||
import org.junit.AfterClass;
|
||||
import org.junit.Before;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
public abstract class ESRestHighLevelClientTestCase extends ESRestTestCase {
|
||||
|
||||
private static RestHighLevelClient restHighLevelClient;
|
||||
|
||||
@Before
|
||||
public void initHighLevelClient() throws IOException {
|
||||
super.initClient();
|
||||
if (restHighLevelClient == null) {
|
||||
restHighLevelClient = new RestHighLevelClient(client());
|
||||
}
|
||||
}
|
||||
|
||||
@AfterClass
|
||||
public static void cleanupClient() throws IOException {
|
||||
restHighLevelClient = null;
|
||||
}
|
||||
|
||||
protected static RestHighLevelClient highLevelClient() {
|
||||
return restHighLevelClient;
|
||||
}
|
||||
}
|
|
@ -17,18 +17,11 @@
|
|||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.common.xcontent;
|
||||
package org.elasticsearch.client;
|
||||
|
||||
import org.elasticsearch.common.ParseFieldMatcher;
|
||||
public class MainActionIT extends ESRestHighLevelClientTestCase {
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
/**
|
||||
* Indicates that the class supports XContent deserialization.
|
||||
*/
|
||||
public interface FromXContentBuilder<T> {
|
||||
/**
|
||||
* Parses an object with the type T from parser
|
||||
*/
|
||||
T fromXContent(XContentParser parser, ParseFieldMatcher parseFieldMatcher) throws IOException;
|
||||
public void testPing() {
|
||||
assertTrue(highLevelClient().ping());
|
||||
}
|
||||
}
|
|
@ -0,0 +1,87 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.client;
|
||||
|
||||
import org.apache.http.Header;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.junit.Before;
|
||||
import org.mockito.ArgumentMatcher;
|
||||
import org.mockito.internal.matchers.ArrayEquals;
|
||||
import org.mockito.internal.matchers.VarargMatcher;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.net.SocketTimeoutException;
|
||||
|
||||
import static org.mockito.Matchers.any;
|
||||
import static org.mockito.Matchers.argThat;
|
||||
import static org.mockito.Matchers.eq;
|
||||
import static org.mockito.Mockito.mock;
|
||||
import static org.mockito.Mockito.verify;
|
||||
import static org.mockito.Mockito.when;
|
||||
|
||||
public class RestHighLevelClientTests extends ESTestCase {
|
||||
|
||||
private RestClient restClient;
|
||||
private RestHighLevelClient restHighLevelClient;
|
||||
|
||||
@Before
|
||||
public void initClient() throws IOException {
|
||||
restClient = mock(RestClient.class);
|
||||
restHighLevelClient = new RestHighLevelClient(restClient);
|
||||
}
|
||||
|
||||
public void testPing() throws IOException {
|
||||
assertTrue(restHighLevelClient.ping());
|
||||
verify(restClient).performRequest(eq("HEAD"), eq("/"), argThat(new HeadersVarargMatcher()));
|
||||
}
|
||||
|
||||
public void testPingFailure() throws IOException {
|
||||
when(restClient.performRequest(any(), any())).thenThrow(new IllegalStateException());
|
||||
expectThrows(IllegalStateException.class, () -> restHighLevelClient.ping());
|
||||
}
|
||||
|
||||
public void testPingFailed() throws IOException {
|
||||
when(restClient.performRequest(any(), any())).thenThrow(new SocketTimeoutException());
|
||||
assertFalse(restHighLevelClient.ping());
|
||||
}
|
||||
|
||||
public void testPingWithHeaders() throws IOException {
|
||||
Header[] headers = RestClientTestUtil.randomHeaders(random(), "Header");
|
||||
assertTrue(restHighLevelClient.ping(headers));
|
||||
verify(restClient).performRequest(eq("HEAD"), eq("/"), argThat(new HeadersVarargMatcher(headers)));
|
||||
}
|
||||
|
||||
private class HeadersVarargMatcher extends ArgumentMatcher<Header[]> implements VarargMatcher {
|
||||
private Header[] expectedHeaders;
|
||||
|
||||
HeadersVarargMatcher(Header... expectedHeaders) {
|
||||
this.expectedHeaders = expectedHeaders;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean matches(Object varargArgument) {
|
||||
if (varargArgument instanceof Header[]) {
|
||||
Header[] actualHeaders = (Header[]) varargArgument;
|
||||
return new ArrayEquals(expectedHeaders).matches(actualHeaders);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
|
@ -43,6 +43,7 @@ dependencies {
|
|||
testCompile "junit:junit:${versions.junit}"
|
||||
testCompile "org.hamcrest:hamcrest-all:${versions.hamcrest}"
|
||||
testCompile "org.elasticsearch:securemock:${versions.securemock}"
|
||||
testCompile "org.elasticsearch:mocksocket:${versions.mocksocket}"
|
||||
testCompile "org.codehaus.mojo:animal-sniffer-annotations:1.15"
|
||||
signature "org.codehaus.mojo.signature:java17:1.0@signature"
|
||||
}
|
||||
|
|
|
@ -24,6 +24,7 @@ import com.sun.net.httpserver.HttpHandler;
|
|||
import com.sun.net.httpserver.HttpServer;
|
||||
import org.apache.http.HttpHost;
|
||||
import org.codehaus.mojo.animal_sniffer.IgnoreJRERequirement;
|
||||
import org.elasticsearch.mocksocket.MockHttpServer;
|
||||
import org.junit.AfterClass;
|
||||
import org.junit.Before;
|
||||
import org.junit.BeforeClass;
|
||||
|
@ -80,7 +81,7 @@ public class RestClientMultipleHostsIntegTests extends RestClientTestCase {
|
|||
}
|
||||
|
||||
private static HttpServer createHttpServer() throws Exception {
|
||||
HttpServer httpServer = HttpServer.create(new InetSocketAddress(InetAddress.getLoopbackAddress(), 0), 0);
|
||||
HttpServer httpServer = MockHttpServer.createHttp(new InetSocketAddress(InetAddress.getLoopbackAddress(), 0), 0);
|
||||
httpServer.start();
|
||||
//returns a different status code depending on the path
|
||||
for (int statusCode : getAllStatusCodes()) {
|
||||
|
|
|
@ -29,6 +29,7 @@ import org.apache.http.HttpHost;
|
|||
import org.apache.http.entity.StringEntity;
|
||||
import org.apache.http.util.EntityUtils;
|
||||
import org.codehaus.mojo.animal_sniffer.IgnoreJRERequirement;
|
||||
import org.elasticsearch.mocksocket.MockHttpServer;
|
||||
import org.junit.AfterClass;
|
||||
import org.junit.BeforeClass;
|
||||
|
||||
|
@ -39,7 +40,6 @@ import java.net.InetAddress;
|
|||
import java.net.InetSocketAddress;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
@ -49,7 +49,6 @@ import static org.elasticsearch.client.RestClientTestUtil.getAllStatusCodes;
|
|||
import static org.elasticsearch.client.RestClientTestUtil.getHttpMethods;
|
||||
import static org.elasticsearch.client.RestClientTestUtil.randomStatusCode;
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertNotNull;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
|
||||
/**
|
||||
|
@ -76,8 +75,7 @@ public class RestClientSingleHostIntegTests extends RestClientTestCase {
|
|||
}
|
||||
|
||||
httpServer = createHttpServer();
|
||||
int numHeaders = randomIntBetween(0, 5);
|
||||
defaultHeaders = generateHeaders("Header-default", "Header-array", numHeaders);
|
||||
defaultHeaders = RestClientTestUtil.randomHeaders(getRandom(), "Header-default");
|
||||
RestClientBuilder restClientBuilder = RestClient.builder(
|
||||
new HttpHost(httpServer.getAddress().getHostString(), httpServer.getAddress().getPort())).setDefaultHeaders(defaultHeaders);
|
||||
if (pathPrefix.length() > 0) {
|
||||
|
@ -87,7 +85,7 @@ public class RestClientSingleHostIntegTests extends RestClientTestCase {
|
|||
}
|
||||
|
||||
private static HttpServer createHttpServer() throws Exception {
|
||||
HttpServer httpServer = HttpServer.create(new InetSocketAddress(InetAddress.getLoopbackAddress(), 0), 0);
|
||||
HttpServer httpServer = MockHttpServer.createHttp(new InetSocketAddress(InetAddress.getLoopbackAddress(), 0), 0);
|
||||
httpServer.start();
|
||||
//returns a different status code depending on the path
|
||||
for (int statusCode : getAllStatusCodes()) {
|
||||
|
@ -150,17 +148,11 @@ public class RestClientSingleHostIntegTests extends RestClientTestCase {
|
|||
if (method.equals("HEAD") == false) {
|
||||
standardHeaders.add("Content-length");
|
||||
}
|
||||
|
||||
final int numHeaders = randomIntBetween(1, 5);
|
||||
final Header[] headers = generateHeaders("Header", "Header-array", numHeaders);
|
||||
final Map<String, List<String>> expectedHeaders = new HashMap<>();
|
||||
|
||||
addHeaders(expectedHeaders, defaultHeaders, headers);
|
||||
|
||||
final Header[] requestHeaders = RestClientTestUtil.randomHeaders(getRandom(), "Header");
|
||||
final int statusCode = randomStatusCode(getRandom());
|
||||
Response esResponse;
|
||||
try {
|
||||
esResponse = restClient.performRequest(method, "/" + statusCode, Collections.<String, String>emptyMap(), headers);
|
||||
esResponse = restClient.performRequest(method, "/" + statusCode, Collections.<String, String>emptyMap(), requestHeaders);
|
||||
} catch(ResponseException e) {
|
||||
esResponse = e.getResponse();
|
||||
}
|
||||
|
@ -168,24 +160,13 @@ public class RestClientSingleHostIntegTests extends RestClientTestCase {
|
|||
assertEquals(method, esResponse.getRequestLine().getMethod());
|
||||
assertEquals(statusCode, esResponse.getStatusLine().getStatusCode());
|
||||
assertEquals((pathPrefix.length() > 0 ? pathPrefix : "") + "/" + statusCode, esResponse.getRequestLine().getUri());
|
||||
|
||||
assertHeaders(defaultHeaders, requestHeaders, esResponse.getHeaders(), standardHeaders);
|
||||
for (final Header responseHeader : esResponse.getHeaders()) {
|
||||
final String name = responseHeader.getName();
|
||||
final String value = responseHeader.getValue();
|
||||
if (name.startsWith("Header")) {
|
||||
final List<String> values = expectedHeaders.get(name);
|
||||
assertNotNull("found response header [" + name + "] that wasn't originally sent: " + value, values);
|
||||
assertTrue("found incorrect response header [" + name + "]: " + value, values.remove(value));
|
||||
|
||||
// we've collected them all
|
||||
if (values.isEmpty()) {
|
||||
expectedHeaders.remove(name);
|
||||
}
|
||||
} else {
|
||||
String name = responseHeader.getName();
|
||||
if (name.startsWith("Header") == false) {
|
||||
assertTrue("unknown header was returned " + name, standardHeaders.remove(name));
|
||||
}
|
||||
}
|
||||
assertTrue("some headers that were sent weren't returned: " + expectedHeaders, expectedHeaders.isEmpty());
|
||||
assertTrue("some expected standard headers weren't returned: " + standardHeaders, standardHeaders.isEmpty());
|
||||
}
|
||||
}
|
||||
|
|
|
@ -56,7 +56,6 @@ import java.util.Arrays;
|
|||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.Future;
|
||||
|
@ -70,7 +69,6 @@ import static org.hamcrest.CoreMatchers.equalTo;
|
|||
import static org.hamcrest.CoreMatchers.instanceOf;
|
||||
import static org.junit.Assert.assertArrayEquals;
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertNotNull;
|
||||
import static org.junit.Assert.assertThat;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
import static org.junit.Assert.fail;
|
||||
|
@ -131,9 +129,7 @@ public class RestClientSingleHostTests extends RestClientTestCase {
|
|||
}
|
||||
});
|
||||
|
||||
|
||||
int numHeaders = randomIntBetween(0, 3);
|
||||
defaultHeaders = generateHeaders("Header-default", "Header-array", numHeaders);
|
||||
defaultHeaders = RestClientTestUtil.randomHeaders(getRandom(), "Header-default");
|
||||
httpHost = new HttpHost("localhost", 9200);
|
||||
failureListener = new HostsTrackingFailureListener();
|
||||
restClient = new RestClient(httpClient, 10000, defaultHeaders, new HttpHost[]{httpHost}, null, failureListener);
|
||||
|
@ -339,33 +335,16 @@ public class RestClientSingleHostTests extends RestClientTestCase {
|
|||
*/
|
||||
public void testHeaders() throws IOException {
|
||||
for (String method : getHttpMethods()) {
|
||||
final int numHeaders = randomIntBetween(1, 5);
|
||||
final Header[] headers = generateHeaders("Header", null, numHeaders);
|
||||
final Map<String, List<String>> expectedHeaders = new HashMap<>();
|
||||
|
||||
addHeaders(expectedHeaders, defaultHeaders, headers);
|
||||
|
||||
final Header[] requestHeaders = RestClientTestUtil.randomHeaders(getRandom(), "Header");
|
||||
final int statusCode = randomStatusCode(getRandom());
|
||||
Response esResponse;
|
||||
try {
|
||||
esResponse = restClient.performRequest(method, "/" + statusCode, headers);
|
||||
esResponse = restClient.performRequest(method, "/" + statusCode, requestHeaders);
|
||||
} catch(ResponseException e) {
|
||||
esResponse = e.getResponse();
|
||||
}
|
||||
assertThat(esResponse.getStatusLine().getStatusCode(), equalTo(statusCode));
|
||||
for (Header responseHeader : esResponse.getHeaders()) {
|
||||
final String name = responseHeader.getName();
|
||||
final String value = responseHeader.getValue();
|
||||
final List<String> values = expectedHeaders.get(name);
|
||||
assertNotNull("found response header [" + name + "] that wasn't originally sent: " + value, values);
|
||||
assertTrue("found incorrect response header [" + name + "]: " + value, values.remove(value));
|
||||
|
||||
// we've collected them all
|
||||
if (values.isEmpty()) {
|
||||
expectedHeaders.remove(name);
|
||||
}
|
||||
}
|
||||
assertTrue("some headers that were sent weren't returned " + expectedHeaders, expectedHeaders.isEmpty());
|
||||
assertHeaders(defaultHeaders, requestHeaders, esResponse.getHeaders(), Collections.<String>emptySet());
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -424,10 +403,9 @@ public class RestClientSingleHostTests extends RestClientTestCase {
|
|||
}
|
||||
|
||||
Header[] headers = new Header[0];
|
||||
final int numHeaders = randomIntBetween(1, 5);
|
||||
final Set<String> uniqueNames = new HashSet<>(numHeaders);
|
||||
final Set<String> uniqueNames = new HashSet<>();
|
||||
if (randomBoolean()) {
|
||||
headers = generateHeaders("Header", "Header-array", numHeaders);
|
||||
headers = RestClientTestUtil.randomHeaders(getRandom(), "Header");
|
||||
for (Header header : headers) {
|
||||
request.addHeader(header);
|
||||
uniqueNames.add(header.getName());
|
||||
|
|
|
@ -43,6 +43,7 @@ dependencies {
|
|||
testCompile "junit:junit:${versions.junit}"
|
||||
testCompile "org.hamcrest:hamcrest-all:${versions.hamcrest}"
|
||||
testCompile "org.elasticsearch:securemock:${versions.securemock}"
|
||||
testCompile "org.elasticsearch:mocksocket:${versions.mocksocket}"
|
||||
testCompile "org.codehaus.mojo:animal-sniffer-annotations:1.15"
|
||||
signature "org.codehaus.mojo.signature:java17:1.0@signature"
|
||||
}
|
||||
|
|
|
@ -35,6 +35,7 @@ import org.elasticsearch.client.Response;
|
|||
import org.elasticsearch.client.ResponseException;
|
||||
import org.elasticsearch.client.RestClient;
|
||||
import org.elasticsearch.client.RestClientTestCase;
|
||||
import org.elasticsearch.mocksocket.MockHttpServer;
|
||||
import org.junit.After;
|
||||
import org.junit.Before;
|
||||
|
||||
|
@ -141,7 +142,7 @@ public class ElasticsearchHostsSnifferTests extends RestClientTestCase {
|
|||
}
|
||||
|
||||
private static HttpServer createHttpServer(final SniffResponse sniffResponse, final int sniffTimeoutMillis) throws IOException {
|
||||
HttpServer httpServer = HttpServer.create(new InetSocketAddress(InetAddress.getLoopbackAddress(), 0), 0);
|
||||
HttpServer httpServer = MockHttpServer.createHttp(new InetSocketAddress(InetAddress.getLoopbackAddress(), 0), 0);
|
||||
httpServer.createContext("/_nodes/http", new ResponseHandler(sniffTimeoutMillis, sniffResponse));
|
||||
return httpServer;
|
||||
}
|
||||
|
|
|
@ -30,16 +30,19 @@ import com.carrotsearch.randomizedtesting.annotations.ThreadLeakLingering;
|
|||
import com.carrotsearch.randomizedtesting.annotations.ThreadLeakScope;
|
||||
import com.carrotsearch.randomizedtesting.annotations.ThreadLeakZombies;
|
||||
import com.carrotsearch.randomizedtesting.annotations.TimeoutSuite;
|
||||
|
||||
import org.apache.http.Header;
|
||||
import org.apache.http.message.BasicHeader;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertNotNull;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
|
||||
@TestMethodProviders({
|
||||
JUnit3MethodProvider.class
|
||||
})
|
||||
|
@ -53,70 +56,56 @@ import java.util.Set;
|
|||
public abstract class RestClientTestCase extends RandomizedTest {
|
||||
|
||||
/**
|
||||
* Create the specified number of {@link Header}s.
|
||||
* <p>
|
||||
* Generated header names will be the {@code baseName} plus its index or, rarely, the {@code arrayName} if it's supplied.
|
||||
* Assert that the actual headers are the expected ones given the original default and request headers. Some headers can be ignored,
|
||||
* for instance in case the http client is adding its own automatically.
|
||||
*
|
||||
* @param baseName The base name to use for all headers.
|
||||
* @param arrayName The optional ({@code null}able) array name to use randomly.
|
||||
* @param headers The number of headers to create.
|
||||
* @return Never {@code null}.
|
||||
* @param defaultHeaders the default headers set to the REST client instance
|
||||
* @param requestHeaders the request headers sent with a particular request
|
||||
* @param actualHeaders the actual headers as a result of the provided default and request headers
|
||||
* @param ignoreHeaders header keys to be ignored as they are not part of default nor request headers, yet they
|
||||
* will be part of the actual ones
|
||||
*/
|
||||
protected static Header[] generateHeaders(final String baseName, final String arrayName, final int headers) {
|
||||
final Header[] generated = new Header[headers];
|
||||
for (int i = 0; i < headers; i++) {
|
||||
String headerName = baseName + i;
|
||||
if (arrayName != null && rarely()) {
|
||||
headerName = arrayName;
|
||||
protected static void assertHeaders(final Header[] defaultHeaders, final Header[] requestHeaders,
|
||||
final Header[] actualHeaders, final Set<String> ignoreHeaders) {
|
||||
final Map<String, List<String>> expectedHeaders = new HashMap<>();
|
||||
final Set<String> requestHeaderKeys = new HashSet<>();
|
||||
for (final Header header : requestHeaders) {
|
||||
final String name = header.getName();
|
||||
addValueToListEntry(expectedHeaders, name, header.getValue());
|
||||
requestHeaderKeys.add(name);
|
||||
}
|
||||
for (final Header defaultHeader : defaultHeaders) {
|
||||
final String name = defaultHeader.getName();
|
||||
if (requestHeaderKeys.contains(name) == false) {
|
||||
addValueToListEntry(expectedHeaders, name, defaultHeader.getValue());
|
||||
}
|
||||
}
|
||||
Set<String> actualIgnoredHeaders = new HashSet<>();
|
||||
for (Header responseHeader : actualHeaders) {
|
||||
final String name = responseHeader.getName();
|
||||
if (ignoreHeaders.contains(name)) {
|
||||
expectedHeaders.remove(name);
|
||||
actualIgnoredHeaders.add(name);
|
||||
continue;
|
||||
}
|
||||
final String value = responseHeader.getValue();
|
||||
final List<String> values = expectedHeaders.get(name);
|
||||
assertNotNull("found response header [" + name + "] that wasn't originally sent: " + value, values);
|
||||
assertTrue("found incorrect response header [" + name + "]: " + value, values.remove(value));
|
||||
if (values.isEmpty()) {
|
||||
expectedHeaders.remove(name);
|
||||
}
|
||||
}
|
||||
assertEquals("some headers meant to be ignored were not part of the actual headers", ignoreHeaders, actualIgnoredHeaders);
|
||||
assertTrue("some headers that were sent weren't returned " + expectedHeaders, expectedHeaders.isEmpty());
|
||||
}
|
||||
|
||||
generated[i] = new BasicHeader(headerName, randomAsciiOfLengthBetween(3, 10));
|
||||
}
|
||||
return generated;
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a new {@link List} within the {@code map} if none exists for {@code name} or append to the existing list.
|
||||
*
|
||||
* @param map The map to manipulate.
|
||||
* @param name The name to create/append the list for.
|
||||
* @param value The value to add.
|
||||
*/
|
||||
private static void createOrAppendList(final Map<String, List<String>> map, final String name, final String value) {
|
||||
private static void addValueToListEntry(final Map<String, List<String>> map, final String name, final String value) {
|
||||
List<String> values = map.get(name);
|
||||
|
||||
if (values == null) {
|
||||
values = new ArrayList<>();
|
||||
map.put(name, values);
|
||||
}
|
||||
|
||||
values.add(value);
|
||||
}
|
||||
|
||||
/**
|
||||
* Add the {@code headers} to the {@code map} so that related tests can more easily assert that they exist.
|
||||
* <p>
|
||||
* If both the {@code defaultHeaders} and {@code headers} contain the same {@link Header}, based on its
|
||||
* {@linkplain Header#getName() name}, then this will only use the {@code Header}(s) from {@code headers}.
|
||||
*
|
||||
* @param map The map to build with name/value(s) pairs.
|
||||
* @param defaultHeaders The headers to add to the map representing default headers.
|
||||
* @param headers The headers to add to the map representing request-level headers.
|
||||
* @see #createOrAppendList(Map, String, String)
|
||||
*/
|
||||
protected static void addHeaders(final Map<String, List<String>> map, final Header[] defaultHeaders, final Header[] headers) {
|
||||
final Set<String> uniqueHeaders = new HashSet<>();
|
||||
for (final Header header : headers) {
|
||||
final String name = header.getName();
|
||||
createOrAppendList(map, name, header.getValue());
|
||||
uniqueHeaders.add(name);
|
||||
}
|
||||
for (final Header defaultHeader : defaultHeaders) {
|
||||
final String name = defaultHeader.getName();
|
||||
if (uniqueHeaders.contains(name) == false) {
|
||||
createOrAppendList(map, name, defaultHeader.getValue());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -19,7 +19,11 @@
|
|||
|
||||
package org.elasticsearch.client;
|
||||
|
||||
import com.carrotsearch.randomizedtesting.generators.RandomNumbers;
|
||||
import com.carrotsearch.randomizedtesting.generators.RandomPicks;
|
||||
import com.carrotsearch.randomizedtesting.generators.RandomStrings;
|
||||
import org.apache.http.Header;
|
||||
import org.apache.http.message.BasicHeader;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
|
@ -81,4 +85,23 @@ final class RestClientTestUtil {
|
|||
static List<Integer> getAllStatusCodes() {
|
||||
return ALL_STATUS_CODES;
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a random number of {@link Header}s.
|
||||
* Generated header names will either be the {@code baseName} plus its index, or exactly the provided {@code baseName} so that the
|
||||
* we test also support for multiple headers with same key and different values.
|
||||
*/
|
||||
static Header[] randomHeaders(Random random, final String baseName) {
|
||||
int numHeaders = RandomNumbers.randomIntBetween(random, 0, 5);
|
||||
final Header[] headers = new Header[numHeaders];
|
||||
for (int i = 0; i < numHeaders; i++) {
|
||||
String headerName = baseName;
|
||||
//randomly exercise the code path that supports multiple headers with same key
|
||||
if (random.nextBoolean()) {
|
||||
headerName = headerName + i;
|
||||
}
|
||||
headers[i] = new BasicHeader(headerName, RandomStrings.randomAsciiOfLengthBetween(random, 3, 10));
|
||||
}
|
||||
return headers;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -37,13 +37,12 @@ import java.util.Collections;
|
|||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
/**
|
||||
* A builder to create an instance of {@link TransportClient}
|
||||
* This class pre-installs the
|
||||
* A builder to create an instance of {@link TransportClient}. This class pre-installs the
|
||||
* {@link Netty4Plugin},
|
||||
* {@link ReindexPlugin},
|
||||
* {@link PercolatorPlugin},
|
||||
* and {@link MustachePlugin}
|
||||
* for the client. These plugins are all elasticsearch core modules required.
|
||||
* plugins for the client. These plugins are all the required modules for Elasticsearch.
|
||||
*/
|
||||
@SuppressWarnings({"unchecked","varargs"})
|
||||
public class PreBuiltTransportClient extends TransportClient {
|
||||
|
@ -63,6 +62,8 @@ public class PreBuiltTransportClient extends TransportClient {
|
|||
final String noUnsafe = System.getProperty(noUnsafeKey);
|
||||
if (noUnsafe == null) {
|
||||
// disable Netty from using unsafe
|
||||
// while permissions are needed to set this, if a security exception is thrown the permission needed can either be granted or
|
||||
// the system property can be set directly before starting the JVM; therefore, we do not catch a security exception here
|
||||
System.setProperty(noUnsafeKey, Boolean.toString(true));
|
||||
}
|
||||
|
||||
|
@ -70,6 +71,8 @@ public class PreBuiltTransportClient extends TransportClient {
|
|||
final String noKeySetOptimization = System.getProperty(noKeySetOptimizationKey);
|
||||
if (noKeySetOptimization == null) {
|
||||
// disable Netty from replacing the selector key set
|
||||
// while permissions are needed to set this, if a security exception is thrown the permission needed can either be granted or
|
||||
// the system property can be set directly before starting the JVM; therefore, we do not catch a security exception here
|
||||
System.setProperty(noKeySetOptimizationKey, Boolean.toString(true));
|
||||
}
|
||||
}
|
||||
|
@ -82,9 +85,9 @@ public class PreBuiltTransportClient extends TransportClient {
|
|||
PercolatorPlugin.class,
|
||||
MustachePlugin.class));
|
||||
|
||||
|
||||
/**
|
||||
* Creates a new transport client with pre-installed plugins.
|
||||
*
|
||||
* @param settings the settings passed to this transport client
|
||||
* @param plugins an optional array of additional plugins to run with this client
|
||||
*/
|
||||
|
@ -93,9 +96,9 @@ public class PreBuiltTransportClient extends TransportClient {
|
|||
this(settings, Arrays.asList(plugins));
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Creates a new transport client with pre-installed plugins.
|
||||
*
|
||||
* @param settings the settings passed to this transport client
|
||||
* @param plugins a collection of additional plugins to run with this client
|
||||
*/
|
||||
|
@ -105,11 +108,14 @@ public class PreBuiltTransportClient extends TransportClient {
|
|||
|
||||
/**
|
||||
* Creates a new transport client with pre-installed plugins.
|
||||
*
|
||||
* @param settings the settings passed to this transport client
|
||||
* @param plugins a collection of additional plugins to run with this client
|
||||
* @param hostFailureListener a failure listener that is invoked if a node is disconnected. This can be <code>null</code>
|
||||
* @param hostFailureListener a failure listener that is invoked if a node is disconnected; this can be <code>null</code>
|
||||
*/
|
||||
public PreBuiltTransportClient(Settings settings, Collection<Class<? extends Plugin>> plugins,
|
||||
public PreBuiltTransportClient(
|
||||
Settings settings,
|
||||
Collection<Class<? extends Plugin>> plugins,
|
||||
HostFailureListener hostFailureListener) {
|
||||
super(settings, Settings.EMPTY, addPlugins(plugins, PRE_INSTALLED_PLUGINS), hostFailureListener);
|
||||
}
|
||||
|
|
|
@ -0,0 +1 @@
|
|||
ad1553dd2eed3a7cd5778bc7520821ac926b56df
|
|
@ -1 +0,0 @@
|
|||
770114e0188dd8b4f30e5878b4f6c8677cecf1be
|
|
@ -0,0 +1 @@
|
|||
dde630b1d09ff928a1f358951747cfad5c46b334
|
|
@ -1 +0,0 @@
|
|||
f4eb0257e8419beaa9f84da6a51375fda4e491f2
|
|
@ -0,0 +1 @@
|
|||
1789bff323a0c013b126f4e51f1f269ebc631277
|
|
@ -1 +0,0 @@
|
|||
c80ad16cd36c41012abb8a8bb1c7328c6d680b4a
|
|
@ -0,0 +1 @@
|
|||
8cb17916d0e63705f1f715fe0d03ed32916a077a
|
|
@ -1 +0,0 @@
|
|||
070d4e370f4fe0b8a04b2bce5b4381201b0c783f
|
|
@ -0,0 +1 @@
|
|||
79d6ba8fa629a52ad3eb829d085836f5fd2f7a87
|
|
@ -1 +0,0 @@
|
|||
131d9a86f5943675493a85def0e692842f396458
|
|
@ -0,0 +1 @@
|
|||
19794d8f15402c991d9533bfcd67e2e7a34677ef
|
|
@ -1 +0,0 @@
|
|||
385b2202036b50a764e4d2b032e21496b74a1c8e
|
|
@ -0,0 +1 @@
|
|||
33e42d3019e072752258bd778912c8d4365470a1
|
|
@ -1 +0,0 @@
|
|||
e8742a44ef4849a17d5e59ef36e9a52a8f2370c2
|
|
@ -0,0 +1 @@
|
|||
a1b3271b3800da349c8b98f7b1a25b2b6192252a
|
|
@ -1 +0,0 @@
|
|||
7ce2e4948fb66393a34f4200a6131cfde43e47bd
|
|
@ -0,0 +1 @@
|
|||
792716d805fcc5091931874c2f2f86f35da8b401
|
|
@ -1 +0,0 @@
|
|||
6c1c385a597ce797b0049d9b2281b09593e1488a
|
|
@ -0,0 +1 @@
|
|||
c3f8bbc6ebe8d31da41fcdb1fa73f13d8170ee62
|
|
@ -1 +0,0 @@
|
|||
fafaa22906c067e6894f9f2b18ad03ded98e2f38
|
|
@ -0,0 +1 @@
|
|||
263901a19686c6cce7dd5c32a4934c42c62454dc
|
|
@ -1 +0,0 @@
|
|||
19c64a84617f42bb4c11b1e266df4009cd37fdd0
|
|
@ -0,0 +1 @@
|
|||
85426164fcc264a7e3bacc1a70602513540a261a
|
|
@ -1 +0,0 @@
|
|||
bc8613fb61c0ae95dd3680b0f65e3380c3fd0d6c
|
|
@ -0,0 +1 @@
|
|||
332cbfaa6b1ee0bf4d820018872988e15cd413d2
|
|
@ -1 +0,0 @@
|
|||
0fa2c3e722294e863f3c70a15e97a18397391fb4
|
|
@ -0,0 +1 @@
|
|||
3fe3e902b971f4aa2b4a3a417ba5dcf83e968428
|
|
@ -1 +0,0 @@
|
|||
db74c6313965ffdd10d9b19be2eed4ae2c76d2e3
|
|
@ -0,0 +1 @@
|
|||
c4863fe45853163abfbe5c8b8bd7bdcf9a9c7b40
|
|
@ -1 +0,0 @@
|
|||
b85ae1121b5fd56df985615a3cdd7b3879e9b92d
|
|
@ -1,291 +0,0 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.lucene.analysis.synonym;
|
||||
|
||||
import static org.apache.lucene.util.automaton.Operations.DEFAULT_MAX_DETERMINIZED_STATES;
|
||||
|
||||
import org.apache.lucene.analysis.TokenStream;
|
||||
import org.apache.lucene.analysis.tokenattributes.BytesTermAttribute;
|
||||
import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
|
||||
import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
|
||||
import org.apache.lucene.analysis.tokenattributes.PositionLengthAttribute;
|
||||
import org.apache.lucene.analysis.tokenattributes.TermToBytesRefAttribute;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.apache.lucene.util.IntsRef;
|
||||
import org.apache.lucene.util.automaton.Automaton;
|
||||
import org.apache.lucene.util.automaton.FiniteStringsIterator;
|
||||
import org.apache.lucene.util.automaton.Operations;
|
||||
import org.apache.lucene.util.automaton.Transition;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
/**
|
||||
* Creates a list of {@link TokenStream} where each stream is the tokens that make up a finite string in graph token stream. To do this,
|
||||
* the graph token stream is converted to an {@link Automaton} and from there we use a {@link FiniteStringsIterator} to collect the various
|
||||
* token streams for each finite string.
|
||||
*/
|
||||
public class GraphTokenStreamFiniteStrings {
|
||||
private final Automaton.Builder builder;
|
||||
Automaton det;
|
||||
private final Map<BytesRef, Integer> termToID = new HashMap<>();
|
||||
private final Map<Integer, BytesRef> idToTerm = new HashMap<>();
|
||||
private int anyTermID = -1;
|
||||
|
||||
public GraphTokenStreamFiniteStrings() {
|
||||
this.builder = new Automaton.Builder();
|
||||
}
|
||||
|
||||
private static class BytesRefArrayTokenStream extends TokenStream {
|
||||
private final BytesTermAttribute termAtt = addAttribute(BytesTermAttribute.class);
|
||||
private final BytesRef[] terms;
|
||||
private int offset;
|
||||
|
||||
BytesRefArrayTokenStream(BytesRef[] terms) {
|
||||
this.terms = terms;
|
||||
offset = 0;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean incrementToken() throws IOException {
|
||||
if (offset < terms.length) {
|
||||
clearAttributes();
|
||||
termAtt.setBytesRef(terms[offset]);
|
||||
offset = offset + 1;
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets
|
||||
*/
|
||||
public List<TokenStream> getTokenStreams(final TokenStream in) throws IOException {
|
||||
// build automation
|
||||
build(in);
|
||||
|
||||
List<TokenStream> tokenStreams = new ArrayList<>();
|
||||
final FiniteStringsIterator finiteStrings = new FiniteStringsIterator(det);
|
||||
for (IntsRef string; (string = finiteStrings.next()) != null; ) {
|
||||
final BytesRef[] tokens = new BytesRef[string.length];
|
||||
for (int idx = string.offset, len = string.offset + string.length; idx < len; idx++) {
|
||||
tokens[idx - string.offset] = idToTerm.get(string.ints[idx]);
|
||||
}
|
||||
|
||||
tokenStreams.add(new BytesRefArrayTokenStream(tokens));
|
||||
}
|
||||
|
||||
return tokenStreams;
|
||||
}
|
||||
|
||||
private void build(final TokenStream in) throws IOException {
|
||||
if (det != null) {
|
||||
throw new IllegalStateException("Automation already built");
|
||||
}
|
||||
|
||||
final TermToBytesRefAttribute termBytesAtt = in.addAttribute(TermToBytesRefAttribute.class);
|
||||
final PositionIncrementAttribute posIncAtt = in.addAttribute(PositionIncrementAttribute.class);
|
||||
final PositionLengthAttribute posLengthAtt = in.addAttribute(PositionLengthAttribute.class);
|
||||
final OffsetAttribute offsetAtt = in.addAttribute(OffsetAttribute.class);
|
||||
|
||||
in.reset();
|
||||
|
||||
int pos = -1;
|
||||
int lastPos = 0;
|
||||
int maxOffset = 0;
|
||||
int maxPos = -1;
|
||||
int state = -1;
|
||||
while (in.incrementToken()) {
|
||||
int posInc = posIncAtt.getPositionIncrement();
|
||||
assert pos > -1 || posInc > 0;
|
||||
|
||||
if (posInc > 1) {
|
||||
throw new IllegalArgumentException("cannot handle holes; to accept any term, use '*' term");
|
||||
}
|
||||
|
||||
if (posInc > 0) {
|
||||
// New node:
|
||||
pos += posInc;
|
||||
}
|
||||
|
||||
int endPos = pos + posLengthAtt.getPositionLength();
|
||||
while (state < endPos) {
|
||||
state = createState();
|
||||
}
|
||||
|
||||
BytesRef term = termBytesAtt.getBytesRef();
|
||||
//System.out.println(pos + "-" + endPos + ": " + term.utf8ToString() + ": posInc=" + posInc);
|
||||
if (term.length == 1 && term.bytes[term.offset] == (byte) '*') {
|
||||
addAnyTransition(pos, endPos);
|
||||
} else {
|
||||
addTransition(pos, endPos, term);
|
||||
}
|
||||
|
||||
maxOffset = Math.max(maxOffset, offsetAtt.endOffset());
|
||||
maxPos = Math.max(maxPos, endPos);
|
||||
}
|
||||
|
||||
in.end();
|
||||
|
||||
// TODO: look at endOffset? ts2a did...
|
||||
|
||||
// TODO: this (setting "last" state as the only accept state) may be too simplistic?
|
||||
setAccept(state, true);
|
||||
finish();
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns a new state; state 0 is always the initial state.
|
||||
*/
|
||||
private int createState() {
|
||||
return builder.createState();
|
||||
}
|
||||
|
||||
/**
|
||||
* Marks the specified state as accept or not.
|
||||
*/
|
||||
private void setAccept(int state, boolean accept) {
|
||||
builder.setAccept(state, accept);
|
||||
}
|
||||
|
||||
/**
|
||||
* Adds a transition to the automaton.
|
||||
*/
|
||||
private void addTransition(int source, int dest, String term) {
|
||||
addTransition(source, dest, new BytesRef(term));
|
||||
}
|
||||
|
||||
/**
|
||||
* Adds a transition to the automaton.
|
||||
*/
|
||||
private void addTransition(int source, int dest, BytesRef term) {
|
||||
if (term == null) {
|
||||
throw new NullPointerException("term should not be null");
|
||||
}
|
||||
builder.addTransition(source, dest, getTermID(term));
|
||||
}
|
||||
|
||||
/**
|
||||
* Adds a transition matching any term.
|
||||
*/
|
||||
private void addAnyTransition(int source, int dest) {
|
||||
builder.addTransition(source, dest, getTermID(null));
|
||||
}
|
||||
|
||||
/**
|
||||
* Call this once you are done adding states/transitions.
|
||||
*/
|
||||
private void finish() {
|
||||
finish(DEFAULT_MAX_DETERMINIZED_STATES);
|
||||
}
|
||||
|
||||
/**
|
||||
* Call this once you are done adding states/transitions.
|
||||
*
|
||||
* @param maxDeterminizedStates Maximum number of states created when determinizing the automaton. Higher numbers allow this operation
|
||||
* to consume more memory but allow more complex automatons.
|
||||
*/
|
||||
private void finish(int maxDeterminizedStates) {
|
||||
Automaton automaton = builder.finish();
|
||||
|
||||
// System.out.println("before det:\n" + automaton.toDot());
|
||||
|
||||
Transition t = new Transition();
|
||||
|
||||
// TODO: should we add "eps back to initial node" for all states,
|
||||
// and det that? then we don't need to revisit initial node at
|
||||
// every position? but automaton could blow up? And, this makes it
|
||||
// harder to skip useless positions at search time?
|
||||
|
||||
if (anyTermID != -1) {
|
||||
|
||||
// Make sure there are no leading or trailing ANY:
|
||||
int count = automaton.initTransition(0, t);
|
||||
for (int i = 0; i < count; i++) {
|
||||
automaton.getNextTransition(t);
|
||||
if (anyTermID >= t.min && anyTermID <= t.max) {
|
||||
throw new IllegalStateException("automaton cannot lead with an ANY transition");
|
||||
}
|
||||
}
|
||||
|
||||
int numStates = automaton.getNumStates();
|
||||
for (int i = 0; i < numStates; i++) {
|
||||
count = automaton.initTransition(i, t);
|
||||
for (int j = 0; j < count; j++) {
|
||||
automaton.getNextTransition(t);
|
||||
if (automaton.isAccept(t.dest) && anyTermID >= t.min && anyTermID <= t.max) {
|
||||
throw new IllegalStateException("automaton cannot end with an ANY transition");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
int termCount = termToID.size();
|
||||
|
||||
// We have to carefully translate these transitions so automaton
|
||||
// realizes they also match all other terms:
|
||||
Automaton newAutomaton = new Automaton();
|
||||
for (int i = 0; i < numStates; i++) {
|
||||
newAutomaton.createState();
|
||||
newAutomaton.setAccept(i, automaton.isAccept(i));
|
||||
}
|
||||
|
||||
for (int i = 0; i < numStates; i++) {
|
||||
count = automaton.initTransition(i, t);
|
||||
for (int j = 0; j < count; j++) {
|
||||
automaton.getNextTransition(t);
|
||||
int min, max;
|
||||
if (t.min <= anyTermID && anyTermID <= t.max) {
|
||||
// Match any term
|
||||
min = 0;
|
||||
max = termCount - 1;
|
||||
} else {
|
||||
min = t.min;
|
||||
max = t.max;
|
||||
}
|
||||
newAutomaton.addTransition(t.source, t.dest, min, max);
|
||||
}
|
||||
}
|
||||
newAutomaton.finishState();
|
||||
automaton = newAutomaton;
|
||||
}
|
||||
|
||||
det = Operations.removeDeadStates(Operations.determinize(automaton, maxDeterminizedStates));
|
||||
}
|
||||
|
||||
private int getTermID(BytesRef term) {
|
||||
Integer id = termToID.get(term);
|
||||
if (id == null) {
|
||||
id = termToID.size();
|
||||
if (term != null) {
|
||||
term = BytesRef.deepCopyOf(term);
|
||||
}
|
||||
termToID.put(term, id);
|
||||
idToTerm.put(id, term);
|
||||
if (term == null) {
|
||||
anyTermID = id;
|
||||
}
|
||||
}
|
||||
|
||||
return id;
|
||||
}
|
||||
}
|
|
@ -1,588 +0,0 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.lucene.analysis.synonym;
|
||||
|
||||
import org.apache.lucene.analysis.TokenFilter;
|
||||
import org.apache.lucene.analysis.TokenStream;
|
||||
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
|
||||
import org.apache.lucene.analysis.tokenattributes.FlagsAttribute;
|
||||
import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
|
||||
import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
|
||||
import org.apache.lucene.analysis.tokenattributes.PositionLengthAttribute;
|
||||
import org.apache.lucene.analysis.tokenattributes.TypeAttribute;
|
||||
import org.apache.lucene.store.ByteArrayDataInput;
|
||||
import org.apache.lucene.util.AttributeSource;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.apache.lucene.util.CharsRefBuilder;
|
||||
import org.apache.lucene.util.RollingBuffer;
|
||||
import org.apache.lucene.util.fst.FST;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.LinkedList;
|
||||
import java.util.List;
|
||||
|
||||
// TODO: maybe we should resolve token -> wordID then run
|
||||
// FST on wordIDs, for better perf?
|
||||
|
||||
// TODO: a more efficient approach would be Aho/Corasick's
|
||||
// algorithm
|
||||
// http://en.wikipedia.org/wiki/Aho%E2%80%93Corasick_string_matching_algorithm
|
||||
// It improves over the current approach here
|
||||
// because it does not fully re-start matching at every
|
||||
// token. For example if one pattern is "a b c x"
|
||||
// and another is "b c d" and the input is "a b c d", on
|
||||
// trying to parse "a b c x" but failing when you got to x,
|
||||
// rather than starting over again your really should
|
||||
// immediately recognize that "b c d" matches at the next
|
||||
// input. I suspect this won't matter that much in
|
||||
// practice, but it's possible on some set of synonyms it
|
||||
// will. We'd have to modify Aho/Corasick to enforce our
|
||||
// conflict resolving (eg greedy matching) because that algo
|
||||
// finds all matches. This really amounts to adding a .*
|
||||
// closure to the FST and then determinizing it.
|
||||
//
|
||||
// Another possible solution is described at http://www.cis.uni-muenchen.de/people/Schulz/Pub/dictle5.ps
|
||||
|
||||
/**
|
||||
* Applies single- or multi-token synonyms from a {@link SynonymMap}
|
||||
* to an incoming {@link TokenStream}, producing a fully correct graph
|
||||
* output. This is a replacement for {@link SynonymFilter}, which produces
|
||||
* incorrect graphs for multi-token synonyms.
|
||||
*
|
||||
* <b>NOTE</b>: this cannot consume an incoming graph; results will
|
||||
* be undefined.
|
||||
*/
|
||||
public final class SynonymGraphFilter extends TokenFilter {
|
||||
|
||||
public static final String TYPE_SYNONYM = "SYNONYM";
|
||||
public static final int GRAPH_FLAG = 8;
|
||||
|
||||
private final CharTermAttribute termAtt = addAttribute(CharTermAttribute.class);
|
||||
private final PositionIncrementAttribute posIncrAtt = addAttribute(PositionIncrementAttribute.class);
|
||||
private final PositionLengthAttribute posLenAtt = addAttribute(PositionLengthAttribute.class);
|
||||
private final FlagsAttribute flagsAtt = addAttribute(FlagsAttribute.class);
|
||||
|
||||
private final TypeAttribute typeAtt = addAttribute(TypeAttribute.class);
|
||||
private final OffsetAttribute offsetAtt = addAttribute(OffsetAttribute.class);
|
||||
|
||||
private final SynonymMap synonyms;
|
||||
private final boolean ignoreCase;
|
||||
|
||||
private final FST<BytesRef> fst;
|
||||
|
||||
private final FST.BytesReader fstReader;
|
||||
private final FST.Arc<BytesRef> scratchArc;
|
||||
private final ByteArrayDataInput bytesReader = new ByteArrayDataInput();
|
||||
private final BytesRef scratchBytes = new BytesRef();
|
||||
private final CharsRefBuilder scratchChars = new CharsRefBuilder();
|
||||
private final LinkedList<BufferedOutputToken> outputBuffer = new LinkedList<>();
|
||||
|
||||
private int nextNodeOut;
|
||||
private int lastNodeOut;
|
||||
private int maxLookaheadUsed;
|
||||
|
||||
// For testing:
|
||||
private int captureCount;
|
||||
|
||||
private boolean liveToken;
|
||||
|
||||
// Start/end offset of the current match:
|
||||
private int matchStartOffset;
|
||||
private int matchEndOffset;
|
||||
|
||||
// True once the input TokenStream is exhausted:
|
||||
private boolean finished;
|
||||
|
||||
private int lookaheadNextRead;
|
||||
private int lookaheadNextWrite;
|
||||
|
||||
private RollingBuffer<BufferedInputToken> lookahead = new RollingBuffer<BufferedInputToken>() {
|
||||
@Override
|
||||
protected BufferedInputToken newInstance() {
|
||||
return new BufferedInputToken();
|
||||
}
|
||||
};
|
||||
|
||||
static class BufferedInputToken implements RollingBuffer.Resettable {
|
||||
final CharsRefBuilder term = new CharsRefBuilder();
|
||||
AttributeSource.State state;
|
||||
int startOffset = -1;
|
||||
int endOffset = -1;
|
||||
|
||||
@Override
|
||||
public void reset() {
|
||||
state = null;
|
||||
term.clear();
|
||||
|
||||
// Intentionally invalid to ferret out bugs:
|
||||
startOffset = -1;
|
||||
endOffset = -1;
|
||||
}
|
||||
}
|
||||
|
||||
static class BufferedOutputToken {
|
||||
final String term;
|
||||
|
||||
// Non-null if this was an incoming token:
|
||||
final State state;
|
||||
|
||||
final int startNode;
|
||||
final int endNode;
|
||||
|
||||
public BufferedOutputToken(State state, String term, int startNode, int endNode) {
|
||||
this.state = state;
|
||||
this.term = term;
|
||||
this.startNode = startNode;
|
||||
this.endNode = endNode;
|
||||
}
|
||||
}
|
||||
|
||||
public SynonymGraphFilter(TokenStream input, SynonymMap synonyms, boolean ignoreCase) {
|
||||
super(input);
|
||||
this.synonyms = synonyms;
|
||||
this.fst = synonyms.fst;
|
||||
if (fst == null) {
|
||||
throw new IllegalArgumentException("fst must be non-null");
|
||||
}
|
||||
this.fstReader = fst.getBytesReader();
|
||||
scratchArc = new FST.Arc<>();
|
||||
this.ignoreCase = ignoreCase;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean incrementToken() throws IOException {
|
||||
//System.out.println("\nS: incrToken lastNodeOut=" + lastNodeOut + " nextNodeOut=" + nextNodeOut);
|
||||
|
||||
assert lastNodeOut <= nextNodeOut;
|
||||
|
||||
if (outputBuffer.isEmpty() == false) {
|
||||
// We still have pending outputs from a prior synonym match:
|
||||
releaseBufferedToken();
|
||||
//System.out.println(" syn: ret buffered=" + this);
|
||||
assert liveToken == false;
|
||||
return true;
|
||||
}
|
||||
|
||||
// Try to parse a new synonym match at the current token:
|
||||
|
||||
if (parse()) {
|
||||
// A new match was found:
|
||||
releaseBufferedToken();
|
||||
//System.out.println(" syn: after parse, ret buffered=" + this);
|
||||
assert liveToken == false;
|
||||
return true;
|
||||
}
|
||||
|
||||
if (lookaheadNextRead == lookaheadNextWrite) {
|
||||
|
||||
// Fast path: parse pulled one token, but it didn't match
|
||||
// the start for any synonym, so we now return it "live" w/o having
|
||||
// cloned all of its atts:
|
||||
if (finished) {
|
||||
//System.out.println(" syn: ret END");
|
||||
return false;
|
||||
}
|
||||
|
||||
assert liveToken;
|
||||
liveToken = false;
|
||||
|
||||
// NOTE: no need to change posInc since it's relative, i.e. whatever
|
||||
// node our output is upto will just increase by the incoming posInc.
|
||||
// We also don't need to change posLen, but only because we cannot
|
||||
// consume a graph, so the incoming token can never span a future
|
||||
// synonym match.
|
||||
|
||||
} else {
|
||||
// We still have buffered lookahead tokens from a previous
|
||||
// parse attempt that required lookahead; just replay them now:
|
||||
//System.out.println(" restore buffer");
|
||||
assert lookaheadNextRead < lookaheadNextWrite : "read=" + lookaheadNextRead + " write=" + lookaheadNextWrite;
|
||||
BufferedInputToken token = lookahead.get(lookaheadNextRead);
|
||||
lookaheadNextRead++;
|
||||
|
||||
restoreState(token.state);
|
||||
|
||||
lookahead.freeBefore(lookaheadNextRead);
|
||||
|
||||
//System.out.println(" after restore offset=" + offsetAtt.startOffset() + "-" + offsetAtt.endOffset());
|
||||
assert liveToken == false;
|
||||
}
|
||||
|
||||
lastNodeOut += posIncrAtt.getPositionIncrement();
|
||||
nextNodeOut = lastNodeOut + posLenAtt.getPositionLength();
|
||||
|
||||
//System.out.println(" syn: ret lookahead=" + this);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
private void releaseBufferedToken() throws IOException {
|
||||
//System.out.println(" releaseBufferedToken");
|
||||
|
||||
BufferedOutputToken token = outputBuffer.pollFirst();
|
||||
|
||||
if (token.state != null) {
|
||||
// This is an original input token (keepOrig=true case):
|
||||
//System.out.println(" hasState");
|
||||
restoreState(token.state);
|
||||
//System.out.println(" startOffset=" + offsetAtt.startOffset() + " endOffset=" + offsetAtt.endOffset());
|
||||
} else {
|
||||
clearAttributes();
|
||||
//System.out.println(" no state");
|
||||
termAtt.append(token.term);
|
||||
|
||||
// We better have a match already:
|
||||
assert matchStartOffset != -1;
|
||||
|
||||
offsetAtt.setOffset(matchStartOffset, matchEndOffset);
|
||||
//System.out.println(" startOffset=" + matchStartOffset + " endOffset=" + matchEndOffset);
|
||||
typeAtt.setType(TYPE_SYNONYM);
|
||||
}
|
||||
|
||||
//System.out.println(" lastNodeOut=" + lastNodeOut);
|
||||
//System.out.println(" term=" + termAtt);
|
||||
|
||||
posIncrAtt.setPositionIncrement(token.startNode - lastNodeOut);
|
||||
lastNodeOut = token.startNode;
|
||||
posLenAtt.setPositionLength(token.endNode - token.startNode);
|
||||
flagsAtt.setFlags(flagsAtt.getFlags() | GRAPH_FLAG); // set the graph flag
|
||||
}
|
||||
|
||||
/**
|
||||
* Scans the next input token(s) to see if a synonym matches. Returns true
|
||||
* if a match was found.
|
||||
*/
|
||||
private boolean parse() throws IOException {
|
||||
// System.out.println(Thread.currentThread().getName() + ": S: parse: " + System.identityHashCode(this));
|
||||
|
||||
// Holds the longest match we've seen so far:
|
||||
BytesRef matchOutput = null;
|
||||
int matchInputLength = 0;
|
||||
|
||||
BytesRef pendingOutput = fst.outputs.getNoOutput();
|
||||
fst.getFirstArc(scratchArc);
|
||||
|
||||
assert scratchArc.output == fst.outputs.getNoOutput();
|
||||
|
||||
// How many tokens in the current match
|
||||
int matchLength = 0;
|
||||
boolean doFinalCapture = false;
|
||||
|
||||
int lookaheadUpto = lookaheadNextRead;
|
||||
matchStartOffset = -1;
|
||||
|
||||
byToken:
|
||||
while (true) {
|
||||
//System.out.println(" cycle lookaheadUpto=" + lookaheadUpto + " maxPos=" + lookahead.getMaxPos());
|
||||
|
||||
// Pull next token's chars:
|
||||
final char[] buffer;
|
||||
final int bufferLen;
|
||||
final int inputEndOffset;
|
||||
|
||||
if (lookaheadUpto <= lookahead.getMaxPos()) {
|
||||
// Still in our lookahead buffer
|
||||
BufferedInputToken token = lookahead.get(lookaheadUpto);
|
||||
lookaheadUpto++;
|
||||
buffer = token.term.chars();
|
||||
bufferLen = token.term.length();
|
||||
inputEndOffset = token.endOffset;
|
||||
//System.out.println(" use buffer now max=" + lookahead.getMaxPos());
|
||||
if (matchStartOffset == -1) {
|
||||
matchStartOffset = token.startOffset;
|
||||
}
|
||||
} else {
|
||||
|
||||
// We used up our lookahead buffer of input tokens
|
||||
// -- pull next real input token:
|
||||
|
||||
assert finished || liveToken == false;
|
||||
|
||||
if (finished) {
|
||||
//System.out.println(" break: finished");
|
||||
break;
|
||||
} else if (input.incrementToken()) {
|
||||
//System.out.println(" input.incrToken");
|
||||
liveToken = true;
|
||||
buffer = termAtt.buffer();
|
||||
bufferLen = termAtt.length();
|
||||
if (matchStartOffset == -1) {
|
||||
matchStartOffset = offsetAtt.startOffset();
|
||||
}
|
||||
inputEndOffset = offsetAtt.endOffset();
|
||||
|
||||
lookaheadUpto++;
|
||||
} else {
|
||||
// No more input tokens
|
||||
finished = true;
|
||||
//System.out.println(" break: now set finished");
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
matchLength++;
|
||||
//System.out.println(" cycle term=" + new String(buffer, 0, bufferLen));
|
||||
|
||||
// Run each char in this token through the FST:
|
||||
int bufUpto = 0;
|
||||
while (bufUpto < bufferLen) {
|
||||
final int codePoint = Character.codePointAt(buffer, bufUpto, bufferLen);
|
||||
if (fst.findTargetArc(ignoreCase ? Character.toLowerCase(codePoint) : codePoint, scratchArc, scratchArc, fstReader) ==
|
||||
null) {
|
||||
break byToken;
|
||||
}
|
||||
|
||||
// Accum the output
|
||||
pendingOutput = fst.outputs.add(pendingOutput, scratchArc.output);
|
||||
bufUpto += Character.charCount(codePoint);
|
||||
}
|
||||
|
||||
assert bufUpto == bufferLen;
|
||||
|
||||
// OK, entire token matched; now see if this is a final
|
||||
// state in the FST (a match):
|
||||
if (scratchArc.isFinal()) {
|
||||
matchOutput = fst.outputs.add(pendingOutput, scratchArc.nextFinalOutput);
|
||||
matchInputLength = matchLength;
|
||||
matchEndOffset = inputEndOffset;
|
||||
//System.out.println(" ** match");
|
||||
}
|
||||
|
||||
// See if the FST can continue matching (ie, needs to
|
||||
// see the next input token):
|
||||
if (fst.findTargetArc(SynonymMap.WORD_SEPARATOR, scratchArc, scratchArc, fstReader) == null) {
|
||||
// No further rules can match here; we're done
|
||||
// searching for matching rules starting at the
|
||||
// current input position.
|
||||
break;
|
||||
} else {
|
||||
// More matching is possible -- accum the output (if
|
||||
// any) of the WORD_SEP arc:
|
||||
pendingOutput = fst.outputs.add(pendingOutput, scratchArc.output);
|
||||
doFinalCapture = true;
|
||||
if (liveToken) {
|
||||
capture();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (doFinalCapture && liveToken && finished == false) {
|
||||
// Must capture the final token if we captured any prior tokens:
|
||||
capture();
|
||||
}
|
||||
|
||||
if (matchOutput != null) {
|
||||
|
||||
if (liveToken) {
|
||||
// Single input token synonym; we must buffer it now:
|
||||
capture();
|
||||
}
|
||||
|
||||
// There is a match!
|
||||
bufferOutputTokens(matchOutput, matchInputLength);
|
||||
lookaheadNextRead += matchInputLength;
|
||||
//System.out.println(" precmatch; set lookaheadNextRead=" + lookaheadNextRead + " now max=" + lookahead.getMaxPos());
|
||||
lookahead.freeBefore(lookaheadNextRead);
|
||||
//System.out.println(" match; set lookaheadNextRead=" + lookaheadNextRead + " now max=" + lookahead.getMaxPos());
|
||||
return true;
|
||||
} else {
|
||||
//System.out.println(" no match; lookaheadNextRead=" + lookaheadNextRead);
|
||||
return false;
|
||||
}
|
||||
|
||||
//System.out.println(" parse done inputSkipCount=" + inputSkipCount + " nextRead=" + nextRead + " nextWrite=" + nextWrite);
|
||||
}
|
||||
|
||||
/**
|
||||
* Expands the output graph into the necessary tokens, adding
|
||||
* synonyms as side paths parallel to the input tokens, and
|
||||
* buffers them in the output token buffer.
|
||||
*/
|
||||
private void bufferOutputTokens(BytesRef bytes, int matchInputLength) {
|
||||
bytesReader.reset(bytes.bytes, bytes.offset, bytes.length);
|
||||
|
||||
final int code = bytesReader.readVInt();
|
||||
final boolean keepOrig = (code & 0x1) == 0;
|
||||
//System.out.println(" buffer: keepOrig=" + keepOrig + " matchInputLength=" + matchInputLength);
|
||||
|
||||
// How many nodes along all paths; we need this to assign the
|
||||
// node ID for the final end node where all paths merge back:
|
||||
int totalPathNodes;
|
||||
if (keepOrig) {
|
||||
assert matchInputLength > 0;
|
||||
totalPathNodes = matchInputLength - 1;
|
||||
} else {
|
||||
totalPathNodes = 0;
|
||||
}
|
||||
|
||||
// How many synonyms we will insert over this match:
|
||||
final int count = code >>> 1;
|
||||
|
||||
// TODO: we could encode this instead into the FST:
|
||||
|
||||
// 1st pass: count how many new nodes we need
|
||||
List<List<String>> paths = new ArrayList<>();
|
||||
for (int outputIDX = 0; outputIDX < count; outputIDX++) {
|
||||
int wordID = bytesReader.readVInt();
|
||||
synonyms.words.get(wordID, scratchBytes);
|
||||
scratchChars.copyUTF8Bytes(scratchBytes);
|
||||
int lastStart = 0;
|
||||
|
||||
List<String> path = new ArrayList<>();
|
||||
paths.add(path);
|
||||
int chEnd = scratchChars.length();
|
||||
for (int chUpto = 0; chUpto <= chEnd; chUpto++) {
|
||||
if (chUpto == chEnd || scratchChars.charAt(chUpto) == SynonymMap.WORD_SEPARATOR) {
|
||||
path.add(new String(scratchChars.chars(), lastStart, chUpto - lastStart));
|
||||
lastStart = 1 + chUpto;
|
||||
}
|
||||
}
|
||||
|
||||
assert path.size() > 0;
|
||||
totalPathNodes += path.size() - 1;
|
||||
}
|
||||
//System.out.println(" totalPathNodes=" + totalPathNodes);
|
||||
|
||||
// 2nd pass: buffer tokens for the graph fragment
|
||||
|
||||
// NOTE: totalPathNodes will be 0 in the case where the matched
|
||||
// input is a single token and all outputs are also a single token
|
||||
|
||||
// We "spawn" a side-path for each of the outputs for this matched
|
||||
// synonym, all ending back at this end node:
|
||||
|
||||
int startNode = nextNodeOut;
|
||||
|
||||
int endNode = startNode + totalPathNodes + 1;
|
||||
//System.out.println(" " + paths.size() + " new side-paths");
|
||||
|
||||
// First, fanout all tokens departing start node for these new side paths:
|
||||
int newNodeCount = 0;
|
||||
for (List<String> path : paths) {
|
||||
int pathEndNode;
|
||||
//System.out.println(" path size=" + path.size());
|
||||
if (path.size() == 1) {
|
||||
// Single token output, so there are no intermediate nodes:
|
||||
pathEndNode = endNode;
|
||||
} else {
|
||||
pathEndNode = nextNodeOut + newNodeCount + 1;
|
||||
newNodeCount += path.size() - 1;
|
||||
}
|
||||
outputBuffer.add(new BufferedOutputToken(null, path.get(0), startNode, pathEndNode));
|
||||
}
|
||||
|
||||
// We must do the original tokens last, else the offsets "go backwards":
|
||||
if (keepOrig) {
|
||||
BufferedInputToken token = lookahead.get(lookaheadNextRead);
|
||||
int inputEndNode;
|
||||
if (matchInputLength == 1) {
|
||||
// Single token matched input, so there are no intermediate nodes:
|
||||
inputEndNode = endNode;
|
||||
} else {
|
||||
inputEndNode = nextNodeOut + newNodeCount + 1;
|
||||
}
|
||||
|
||||
//System.out.println(" keepOrig first token: " + token.term);
|
||||
|
||||
outputBuffer.add(new BufferedOutputToken(token.state, token.term.toString(), startNode, inputEndNode));
|
||||
}
|
||||
|
||||
nextNodeOut = endNode;
|
||||
|
||||
// Do full side-path for each syn output:
|
||||
for (int pathID = 0; pathID < paths.size(); pathID++) {
|
||||
List<String> path = paths.get(pathID);
|
||||
if (path.size() > 1) {
|
||||
int lastNode = outputBuffer.get(pathID).endNode;
|
||||
for (int i = 1; i < path.size() - 1; i++) {
|
||||
outputBuffer.add(new BufferedOutputToken(null, path.get(i), lastNode, lastNode + 1));
|
||||
lastNode++;
|
||||
}
|
||||
outputBuffer.add(new BufferedOutputToken(null, path.get(path.size() - 1), lastNode, endNode));
|
||||
}
|
||||
}
|
||||
|
||||
if (keepOrig && matchInputLength > 1) {
|
||||
// Do full "side path" with the original tokens:
|
||||
int lastNode = outputBuffer.get(paths.size()).endNode;
|
||||
for (int i = 1; i < matchInputLength - 1; i++) {
|
||||
BufferedInputToken token = lookahead.get(lookaheadNextRead + i);
|
||||
outputBuffer.add(new BufferedOutputToken(token.state, token.term.toString(), lastNode, lastNode + 1));
|
||||
lastNode++;
|
||||
}
|
||||
BufferedInputToken token = lookahead.get(lookaheadNextRead + matchInputLength - 1);
|
||||
outputBuffer.add(new BufferedOutputToken(token.state, token.term.toString(), lastNode, endNode));
|
||||
}
|
||||
|
||||
/*
|
||||
System.out.println(" after buffer: " + outputBuffer.size() + " tokens:");
|
||||
for(BufferedOutputToken token : outputBuffer) {
|
||||
System.out.println(" tok: " + token.term + " startNode=" + token.startNode + " endNode=" + token.endNode);
|
||||
}
|
||||
*/
|
||||
}
|
||||
|
||||
/**
|
||||
* Buffers the current input token into lookahead buffer.
|
||||
*/
|
||||
private void capture() {
|
||||
assert liveToken;
|
||||
liveToken = false;
|
||||
BufferedInputToken token = lookahead.get(lookaheadNextWrite);
|
||||
lookaheadNextWrite++;
|
||||
|
||||
token.state = captureState();
|
||||
token.startOffset = offsetAtt.startOffset();
|
||||
token.endOffset = offsetAtt.endOffset();
|
||||
assert token.term.length() == 0;
|
||||
token.term.append(termAtt);
|
||||
|
||||
captureCount++;
|
||||
maxLookaheadUsed = Math.max(maxLookaheadUsed, lookahead.getBufferSize());
|
||||
//System.out.println(" maxLookaheadUsed=" + maxLookaheadUsed);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void reset() throws IOException {
|
||||
super.reset();
|
||||
lookahead.reset();
|
||||
lookaheadNextWrite = 0;
|
||||
lookaheadNextRead = 0;
|
||||
captureCount = 0;
|
||||
lastNodeOut = -1;
|
||||
nextNodeOut = 0;
|
||||
matchStartOffset = -1;
|
||||
matchEndOffset = -1;
|
||||
finished = false;
|
||||
liveToken = false;
|
||||
outputBuffer.clear();
|
||||
maxLookaheadUsed = 0;
|
||||
//System.out.println("S: reset");
|
||||
}
|
||||
|
||||
// for testing
|
||||
int getCaptureCount() {
|
||||
return captureCount;
|
||||
}
|
||||
|
||||
// for testing
|
||||
int getMaxLookaheadUsed() {
|
||||
return maxLookaheadUsed;
|
||||
}
|
||||
}
|
|
@ -1,115 +0,0 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.lucene.search;
|
||||
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.Objects;
|
||||
|
||||
/**
|
||||
* A query that wraps multiple sub-queries generated from a graph token stream.
|
||||
*/
|
||||
public final class GraphQuery extends Query {
|
||||
private final Query[] queries;
|
||||
private final boolean hasBoolean;
|
||||
|
||||
/**
|
||||
* Constructor sets the queries and checks if any of them are
|
||||
* a boolean query.
|
||||
*
|
||||
* @param queries the non-null array of queries
|
||||
*/
|
||||
public GraphQuery(Query... queries) {
|
||||
this.queries = Objects.requireNonNull(queries).clone();
|
||||
for (Query query : queries) {
|
||||
if (query instanceof BooleanQuery) {
|
||||
hasBoolean = true;
|
||||
return;
|
||||
}
|
||||
}
|
||||
hasBoolean = false;
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the queries
|
||||
*
|
||||
* @return unmodifiable list of Query
|
||||
*/
|
||||
public List<Query> getQueries() {
|
||||
return Collections.unmodifiableList(Arrays.asList(queries));
|
||||
}
|
||||
|
||||
/**
|
||||
* If there is at least one boolean query or not.
|
||||
*
|
||||
* @return true if there is a boolean, false if not
|
||||
*/
|
||||
public boolean hasBoolean() {
|
||||
return hasBoolean;
|
||||
}
|
||||
|
||||
/**
|
||||
* Rewrites to a single query or a boolean query where each query is a SHOULD clause.
|
||||
*/
|
||||
@Override
|
||||
public Query rewrite(IndexReader reader) throws IOException {
|
||||
if (queries.length == 0) {
|
||||
return new BooleanQuery.Builder().build();
|
||||
}
|
||||
|
||||
if (queries.length == 1) {
|
||||
return queries[0];
|
||||
}
|
||||
|
||||
BooleanQuery.Builder q = new BooleanQuery.Builder();
|
||||
q.setDisableCoord(true);
|
||||
for (Query clause : queries) {
|
||||
q.add(clause, BooleanClause.Occur.SHOULD);
|
||||
}
|
||||
|
||||
return q.build();
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString(String field) {
|
||||
StringBuilder builder = new StringBuilder("Graph(");
|
||||
for (int i = 0; i < queries.length; i++) {
|
||||
if (i != 0) {
|
||||
builder.append(", ");
|
||||
}
|
||||
builder.append(Objects.toString(queries[i]));
|
||||
}
|
||||
builder.append(")");
|
||||
return builder.toString();
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object other) {
|
||||
return sameClassAs(other) &&
|
||||
Arrays.equals(queries, ((GraphQuery) other).queries);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return 31 * classHash() + Arrays.hashCode(queries);
|
||||
}
|
||||
}
|
|
@ -28,8 +28,9 @@ import org.elasticsearch.common.settings.Settings;
|
|||
import org.elasticsearch.monitor.jvm.JvmInfo;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Comparator;
|
||||
|
||||
public class Version {
|
||||
public class Version implements Comparable<Version> {
|
||||
/*
|
||||
* The logic for ID is: XXYYZZAA, where XX is major version, YY is minor version, ZZ is revision, and AA is alpha/beta/rc indicator AA
|
||||
* values below 25 are for alpha builder (since 5.0), and above 25 and below 50 are beta builds, and below 99 are RC builds, with 99
|
||||
|
@ -102,6 +103,8 @@ public class Version {
|
|||
public static final Version V_5_1_2_UNRELEASED = new Version(V_5_1_2_ID_UNRELEASED, org.apache.lucene.util.Version.LUCENE_6_3_0);
|
||||
public static final int V_5_2_0_ID_UNRELEASED = 5020099;
|
||||
public static final Version V_5_2_0_UNRELEASED = new Version(V_5_2_0_ID_UNRELEASED, org.apache.lucene.util.Version.LUCENE_6_3_0);
|
||||
public static final int V_5_3_0_ID_UNRELEASED = 5030099;
|
||||
public static final Version V_5_3_0_UNRELEASED = new Version(V_5_3_0_ID_UNRELEASED, org.apache.lucene.util.Version.LUCENE_6_4_0);
|
||||
public static final int V_6_0_0_alpha1_ID_UNRELEASED = 6000001;
|
||||
public static final Version V_6_0_0_alpha1_UNRELEASED =
|
||||
new Version(V_6_0_0_alpha1_ID_UNRELEASED, org.apache.lucene.util.Version.LUCENE_6_4_0);
|
||||
|
@ -122,6 +125,8 @@ public class Version {
|
|||
switch (id) {
|
||||
case V_6_0_0_alpha1_ID_UNRELEASED:
|
||||
return V_6_0_0_alpha1_UNRELEASED;
|
||||
case V_5_3_0_ID_UNRELEASED:
|
||||
return V_5_3_0_UNRELEASED;
|
||||
case V_5_2_0_ID_UNRELEASED:
|
||||
return V_5_2_0_UNRELEASED;
|
||||
case V_5_1_2_ID_UNRELEASED:
|
||||
|
@ -310,6 +315,11 @@ public class Version {
|
|||
return version.id >= id;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int compareTo(Version other) {
|
||||
return Integer.compare(this.id, other.id);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the minimum compatible version based on the current
|
||||
* version. Ie a node needs to have at least the return version in order
|
||||
|
|
|
@ -20,6 +20,7 @@
|
|||
package org.elasticsearch.action.admin.cluster.allocation;
|
||||
|
||||
import org.elasticsearch.ElasticsearchParseException;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.ActionRequestValidationException;
|
||||
import org.elasticsearch.action.support.master.MasterNodeRequest;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
|
@ -46,30 +47,42 @@ public class ClusterAllocationExplainRequest extends MasterNodeRequest<ClusterAl
|
|||
PARSER.declareString(ClusterAllocationExplainRequest::setIndex, new ParseField("index"));
|
||||
PARSER.declareInt(ClusterAllocationExplainRequest::setShard, new ParseField("shard"));
|
||||
PARSER.declareBoolean(ClusterAllocationExplainRequest::setPrimary, new ParseField("primary"));
|
||||
PARSER.declareString(ClusterAllocationExplainRequest::setCurrentNode, new ParseField("current_node"));
|
||||
}
|
||||
|
||||
@Nullable
|
||||
private String index;
|
||||
@Nullable
|
||||
private Integer shard;
|
||||
@Nullable
|
||||
private Boolean primary;
|
||||
@Nullable
|
||||
private String currentNode;
|
||||
private boolean includeYesDecisions = false;
|
||||
private boolean includeDiskInfo = false;
|
||||
|
||||
/** Explain the first unassigned shard */
|
||||
/**
|
||||
* Create a new allocation explain request to explain any unassigned shard in the cluster.
|
||||
*/
|
||||
public ClusterAllocationExplainRequest() {
|
||||
this.index = null;
|
||||
this.shard = null;
|
||||
this.primary = null;
|
||||
this.currentNode = null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a new allocation explain request. If {@code primary} is false, the first unassigned replica
|
||||
* will be picked for explanation. If no replicas are unassigned, the first assigned replica will
|
||||
* be explained.
|
||||
*
|
||||
* Package private for testing.
|
||||
*/
|
||||
public ClusterAllocationExplainRequest(String index, int shard, boolean primary) {
|
||||
ClusterAllocationExplainRequest(String index, int shard, boolean primary, @Nullable String currentNode) {
|
||||
this.index = index;
|
||||
this.shard = shard;
|
||||
this.primary = primary;
|
||||
this.currentNode = currentNode;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -93,54 +106,103 @@ public class ClusterAllocationExplainRequest extends MasterNodeRequest<ClusterAl
|
|||
* Returns {@code true} iff the first unassigned shard is to be used
|
||||
*/
|
||||
public boolean useAnyUnassignedShard() {
|
||||
return this.index == null && this.shard == null && this.primary == null;
|
||||
return this.index == null && this.shard == null && this.primary == null && this.currentNode == null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the index name of the shard to explain.
|
||||
*/
|
||||
public ClusterAllocationExplainRequest setIndex(String index) {
|
||||
this.index = index;
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the index name of the shard to explain, or {@code null} to use any unassigned shard (see {@link #useAnyUnassignedShard()}).
|
||||
*/
|
||||
@Nullable
|
||||
public String getIndex() {
|
||||
return this.index;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the shard id of the shard to explain.
|
||||
*/
|
||||
public ClusterAllocationExplainRequest setShard(Integer shard) {
|
||||
this.shard = shard;
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the shard id of the shard to explain, or {@code null} to use any unassigned shard (see {@link #useAnyUnassignedShard()}).
|
||||
*/
|
||||
@Nullable
|
||||
public Integer getShard() {
|
||||
return this.shard;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets whether to explain the allocation of the primary shard or a replica shard copy
|
||||
* for the shard id (see {@link #getShard()}).
|
||||
*/
|
||||
public ClusterAllocationExplainRequest setPrimary(Boolean primary) {
|
||||
this.primary = primary;
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns {@code true} if explaining the primary shard for the shard id (see {@link #getShard()}),
|
||||
* {@code false} if explaining a replica shard copy for the shard id, or {@code null} to use any
|
||||
* unassigned shard (see {@link #useAnyUnassignedShard()}).
|
||||
*/
|
||||
@Nullable
|
||||
public Boolean isPrimary() {
|
||||
return this.primary;
|
||||
}
|
||||
|
||||
/**
|
||||
* Requests the explain API to explain an already assigned replica shard currently allocated to
|
||||
* the given node.
|
||||
*/
|
||||
public ClusterAllocationExplainRequest setCurrentNode(String currentNodeId) {
|
||||
this.currentNode = currentNodeId;
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the node holding the replica shard to be explained. Returns {@code null} if any replica shard
|
||||
* can be explained.
|
||||
*/
|
||||
@Nullable
|
||||
public String getCurrentNode() {
|
||||
return currentNode;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set to {@code true} to include yes decisions for a particular node.
|
||||
*/
|
||||
public void includeYesDecisions(boolean includeYesDecisions) {
|
||||
this.includeYesDecisions = includeYesDecisions;
|
||||
}
|
||||
|
||||
/** Returns true if all decisions should be included. Otherwise only "NO" and "THROTTLE" decisions are returned */
|
||||
/**
|
||||
* Returns {@code true} if yes decisions should be included. Otherwise only "no" and "throttle"
|
||||
* decisions are returned.
|
||||
*/
|
||||
public boolean includeYesDecisions() {
|
||||
return this.includeYesDecisions;
|
||||
}
|
||||
|
||||
/** {@code true} to include information about the gathered disk information of nodes in the cluster */
|
||||
/**
|
||||
* Set to {@code true} to include information about the gathered disk information of nodes in the cluster.
|
||||
*/
|
||||
public void includeDiskInfo(boolean includeDiskInfo) {
|
||||
this.includeDiskInfo = includeDiskInfo;
|
||||
}
|
||||
|
||||
/** Returns true if information about disk usage and shard sizes should also be returned */
|
||||
/**
|
||||
* Returns {@code true} if information about disk usage and shard sizes should also be returned.
|
||||
*/
|
||||
public boolean includeDiskInfo() {
|
||||
return this.includeDiskInfo;
|
||||
}
|
||||
|
@ -154,6 +216,9 @@ public class ClusterAllocationExplainRequest extends MasterNodeRequest<ClusterAl
|
|||
sb.append("index=").append(index);
|
||||
sb.append(",shard=").append(shard);
|
||||
sb.append(",primary?=").append(primary);
|
||||
if (currentNode != null) {
|
||||
sb.append(",currentNode=").append(currentNode);
|
||||
}
|
||||
}
|
||||
sb.append(",includeYesDecisions?=").append(includeYesDecisions);
|
||||
return sb.toString();
|
||||
|
@ -170,21 +235,32 @@ public class ClusterAllocationExplainRequest extends MasterNodeRequest<ClusterAl
|
|||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
checkVersion(in.getVersion());
|
||||
super.readFrom(in);
|
||||
this.index = in.readOptionalString();
|
||||
this.shard = in.readOptionalVInt();
|
||||
this.primary = in.readOptionalBoolean();
|
||||
this.currentNode = in.readOptionalString();
|
||||
this.includeYesDecisions = in.readBoolean();
|
||||
this.includeDiskInfo = in.readBoolean();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
checkVersion(out.getVersion());
|
||||
super.writeTo(out);
|
||||
out.writeOptionalString(index);
|
||||
out.writeOptionalVInt(shard);
|
||||
out.writeOptionalBoolean(primary);
|
||||
out.writeOptionalString(currentNode);
|
||||
out.writeBoolean(includeYesDecisions);
|
||||
out.writeBoolean(includeDiskInfo);
|
||||
}
|
||||
|
||||
private void checkVersion(Version version) {
|
||||
if (version.before(Version.V_5_2_0_UNRELEASED)) {
|
||||
throw new IllegalArgumentException("cannot explain shards in a mixed-cluster with pre-" + Version.V_5_2_0_UNRELEASED +
|
||||
" nodes, node version [" + version + "]");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -19,7 +19,6 @@
|
|||
|
||||
package org.elasticsearch.action.admin.cluster.allocation;
|
||||
|
||||
import org.elasticsearch.action.support.IndicesOptions;
|
||||
import org.elasticsearch.action.support.master.MasterNodeOperationRequestBuilder;
|
||||
import org.elasticsearch.client.ElasticsearchClient;
|
||||
|
||||
|
@ -65,6 +64,15 @@ public class ClusterAllocationExplainRequestBuilder
|
|||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Requests the explain API to explain an already assigned replica shard currently allocated to
|
||||
* the given node.
|
||||
*/
|
||||
public ClusterAllocationExplainRequestBuilder setCurrentNode(String currentNode) {
|
||||
request.setCurrentNode(currentNode);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Signal that the first unassigned shard should be used
|
||||
*/
|
||||
|
|
|
@ -20,7 +20,6 @@
|
|||
package org.elasticsearch.action.admin.cluster.allocation;
|
||||
|
||||
import org.elasticsearch.action.ActionResponse;
|
||||
import org.elasticsearch.cluster.ClusterName;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
|
||||
|
|
|
@ -21,285 +21,184 @@ package org.elasticsearch.action.admin.cluster.allocation;
|
|||
|
||||
import org.elasticsearch.cluster.ClusterInfo;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.cluster.routing.ShardRoutingState;
|
||||
import org.elasticsearch.cluster.routing.UnassignedInfo;
|
||||
import org.elasticsearch.cluster.routing.allocation.AllocationDecision;
|
||||
import org.elasticsearch.cluster.routing.allocation.ShardAllocationDecision;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.io.stream.Writeable;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
import java.util.Locale;
|
||||
|
||||
import static org.elasticsearch.cluster.routing.allocation.AbstractAllocationDecision.discoveryNodeToXContent;
|
||||
|
||||
/**
|
||||
* A {@code ClusterAllocationExplanation} is an explanation of why a shard may or may not be allocated to nodes. It also includes weights
|
||||
* for where the shard is likely to be assigned. It is an immutable class
|
||||
* A {@code ClusterAllocationExplanation} is an explanation of why a shard is unassigned,
|
||||
* or if it is not unassigned, then which nodes it could possibly be relocated to.
|
||||
* It is an immutable class.
|
||||
*/
|
||||
public final class ClusterAllocationExplanation implements ToXContent, Writeable {
|
||||
|
||||
private final ShardId shard;
|
||||
private final boolean primary;
|
||||
private final boolean hasPendingAsyncFetch;
|
||||
private final String assignedNodeId;
|
||||
private final UnassignedInfo unassignedInfo;
|
||||
private final long allocationDelayMillis;
|
||||
private final long remainingDelayMillis;
|
||||
private final Map<DiscoveryNode, NodeExplanation> nodeExplanations;
|
||||
private final ShardRouting shardRouting;
|
||||
private final DiscoveryNode currentNode;
|
||||
private final DiscoveryNode relocationTargetNode;
|
||||
private final ClusterInfo clusterInfo;
|
||||
private final ShardAllocationDecision shardAllocationDecision;
|
||||
|
||||
public ClusterAllocationExplanation(ShardId shard, boolean primary, @Nullable String assignedNodeId, long allocationDelayMillis,
|
||||
long remainingDelayMillis, @Nullable UnassignedInfo unassignedInfo, boolean hasPendingAsyncFetch,
|
||||
Map<DiscoveryNode, NodeExplanation> nodeExplanations, @Nullable ClusterInfo clusterInfo) {
|
||||
this.shard = shard;
|
||||
this.primary = primary;
|
||||
this.hasPendingAsyncFetch = hasPendingAsyncFetch;
|
||||
this.assignedNodeId = assignedNodeId;
|
||||
this.unassignedInfo = unassignedInfo;
|
||||
this.allocationDelayMillis = allocationDelayMillis;
|
||||
this.remainingDelayMillis = remainingDelayMillis;
|
||||
this.nodeExplanations = nodeExplanations;
|
||||
public ClusterAllocationExplanation(ShardRouting shardRouting, @Nullable DiscoveryNode currentNode,
|
||||
@Nullable DiscoveryNode relocationTargetNode, @Nullable ClusterInfo clusterInfo,
|
||||
ShardAllocationDecision shardAllocationDecision) {
|
||||
this.shardRouting = shardRouting;
|
||||
this.currentNode = currentNode;
|
||||
this.relocationTargetNode = relocationTargetNode;
|
||||
this.clusterInfo = clusterInfo;
|
||||
this.shardAllocationDecision = shardAllocationDecision;
|
||||
}
|
||||
|
||||
public ClusterAllocationExplanation(StreamInput in) throws IOException {
|
||||
this.shard = ShardId.readShardId(in);
|
||||
this.primary = in.readBoolean();
|
||||
this.hasPendingAsyncFetch = in.readBoolean();
|
||||
this.assignedNodeId = in.readOptionalString();
|
||||
this.unassignedInfo = in.readOptionalWriteable(UnassignedInfo::new);
|
||||
this.allocationDelayMillis = in.readVLong();
|
||||
this.remainingDelayMillis = in.readVLong();
|
||||
|
||||
int mapSize = in.readVInt();
|
||||
Map<DiscoveryNode, NodeExplanation> nodeToExplanation = new HashMap<>(mapSize);
|
||||
for (int i = 0; i < mapSize; i++) {
|
||||
NodeExplanation nodeExplanation = new NodeExplanation(in);
|
||||
nodeToExplanation.put(nodeExplanation.getNode(), nodeExplanation);
|
||||
}
|
||||
this.nodeExplanations = nodeToExplanation;
|
||||
if (in.readBoolean()) {
|
||||
this.clusterInfo = new ClusterInfo(in);
|
||||
} else {
|
||||
this.clusterInfo = null;
|
||||
}
|
||||
this.shardRouting = new ShardRouting(in);
|
||||
this.currentNode = in.readOptionalWriteable(DiscoveryNode::new);
|
||||
this.relocationTargetNode = in.readOptionalWriteable(DiscoveryNode::new);
|
||||
this.clusterInfo = in.readOptionalWriteable(ClusterInfo::new);
|
||||
this.shardAllocationDecision = new ShardAllocationDecision(in);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
this.getShard().writeTo(out);
|
||||
out.writeBoolean(this.isPrimary());
|
||||
out.writeBoolean(this.isStillFetchingShardData());
|
||||
out.writeOptionalString(this.getAssignedNodeId());
|
||||
out.writeOptionalWriteable(this.getUnassignedInfo());
|
||||
out.writeVLong(allocationDelayMillis);
|
||||
out.writeVLong(remainingDelayMillis);
|
||||
|
||||
out.writeVInt(this.nodeExplanations.size());
|
||||
for (NodeExplanation explanation : this.nodeExplanations.values()) {
|
||||
explanation.writeTo(out);
|
||||
}
|
||||
if (this.clusterInfo != null) {
|
||||
out.writeBoolean(true);
|
||||
this.clusterInfo.writeTo(out);
|
||||
} else {
|
||||
out.writeBoolean(false);
|
||||
}
|
||||
shardRouting.writeTo(out);
|
||||
out.writeOptionalWriteable(currentNode);
|
||||
out.writeOptionalWriteable(relocationTargetNode);
|
||||
out.writeOptionalWriteable(clusterInfo);
|
||||
shardAllocationDecision.writeTo(out);
|
||||
}
|
||||
|
||||
/** Return the shard that the explanation is about */
|
||||
/**
|
||||
* Returns the shard that the explanation is about.
|
||||
*/
|
||||
public ShardId getShard() {
|
||||
return this.shard;
|
||||
return shardRouting.shardId();
|
||||
}
|
||||
|
||||
/** Return true if the explained shard is primary, false otherwise */
|
||||
/**
|
||||
* Returns {@code true} if the explained shard is primary, {@code false} otherwise.
|
||||
*/
|
||||
public boolean isPrimary() {
|
||||
return this.primary;
|
||||
return shardRouting.primary();
|
||||
}
|
||||
|
||||
/** Return turn if shard data is still being fetched for the allocation */
|
||||
public boolean isStillFetchingShardData() {
|
||||
return this.hasPendingAsyncFetch;
|
||||
/**
|
||||
* Returns the current {@link ShardRoutingState} of the shard.
|
||||
*/
|
||||
public ShardRoutingState getShardState() {
|
||||
return shardRouting.state();
|
||||
}
|
||||
|
||||
/** Return turn if the shard is assigned to a node */
|
||||
public boolean isAssigned() {
|
||||
return this.assignedNodeId != null;
|
||||
}
|
||||
|
||||
/** Return the assigned node id or null if not assigned */
|
||||
/**
|
||||
* Returns the currently assigned node, or {@code null} if the shard is unassigned.
|
||||
*/
|
||||
@Nullable
|
||||
public String getAssignedNodeId() {
|
||||
return this.assignedNodeId;
|
||||
public DiscoveryNode getCurrentNode() {
|
||||
return currentNode;
|
||||
}
|
||||
|
||||
/** Return the unassigned info for the shard or null if the shard is assigned */
|
||||
/**
|
||||
* Returns the relocating target node, or {@code null} if the shard is not in the {@link ShardRoutingState#RELOCATING} state.
|
||||
*/
|
||||
@Nullable
|
||||
public DiscoveryNode getRelocationTargetNode() {
|
||||
return relocationTargetNode;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the unassigned info for the shard, or {@code null} if the shard is active.
|
||||
*/
|
||||
@Nullable
|
||||
public UnassignedInfo getUnassignedInfo() {
|
||||
return this.unassignedInfo;
|
||||
return shardRouting.unassignedInfo();
|
||||
}
|
||||
|
||||
/** Return the configured delay before the shard can be allocated in milliseconds */
|
||||
public long getAllocationDelayMillis() {
|
||||
return this.allocationDelayMillis;
|
||||
}
|
||||
|
||||
/** Return the remaining allocation delay for this shard in milliseconds */
|
||||
public long getRemainingDelayMillis() {
|
||||
return this.remainingDelayMillis;
|
||||
}
|
||||
|
||||
/** Return a map of node to the explanation for that node */
|
||||
public Map<DiscoveryNode, NodeExplanation> getNodeExplanations() {
|
||||
return this.nodeExplanations;
|
||||
}
|
||||
|
||||
/** Return the cluster disk info for the cluster or null if none available */
|
||||
/**
|
||||
* Returns the cluster disk info for the cluster, or {@code null} if none available.
|
||||
*/
|
||||
@Nullable
|
||||
public ClusterInfo getClusterInfo() {
|
||||
return this.clusterInfo;
|
||||
}
|
||||
|
||||
/** \
|
||||
* Returns the shard allocation decision for attempting to assign or move the shard.
|
||||
*/
|
||||
public ShardAllocationDecision getShardAllocationDecision() {
|
||||
return shardAllocationDecision;
|
||||
}
|
||||
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.startObject(); {
|
||||
builder.startObject("shard"); {
|
||||
builder.field("index", shard.getIndexName());
|
||||
builder.field("index_uuid", shard.getIndex().getUUID());
|
||||
builder.field("id", shard.getId());
|
||||
builder.field("primary", primary);
|
||||
builder.field("index", shardRouting.getIndexName());
|
||||
builder.field("shard", shardRouting.getId());
|
||||
builder.field("primary", shardRouting.primary());
|
||||
builder.field("current_state", shardRouting.state().toString().toLowerCase(Locale.ROOT));
|
||||
if (shardRouting.unassignedInfo() != null) {
|
||||
unassignedInfoToXContent(shardRouting.unassignedInfo(), builder);
|
||||
}
|
||||
builder.endObject(); // end shard
|
||||
builder.field("assigned", this.assignedNodeId != null);
|
||||
// If assigned, show the node id of the node it's assigned to
|
||||
if (assignedNodeId != null) {
|
||||
builder.field("assigned_node_id", this.assignedNodeId);
|
||||
}
|
||||
builder.field("shard_state_fetch_pending", this.hasPendingAsyncFetch);
|
||||
// If we have unassigned info, show that
|
||||
if (unassignedInfo != null) {
|
||||
unassignedInfo.toXContent(builder, params);
|
||||
builder.timeValueField("allocation_delay_in_millis", "allocation_delay", TimeValue.timeValueMillis(allocationDelayMillis));
|
||||
builder.timeValueField("remaining_delay_in_millis", "remaining_delay", TimeValue.timeValueMillis(remainingDelayMillis));
|
||||
}
|
||||
builder.startObject("nodes"); {
|
||||
for (NodeExplanation explanation : nodeExplanations.values()) {
|
||||
explanation.toXContent(builder, params);
|
||||
if (currentNode != null) {
|
||||
builder.startObject("current_node");
|
||||
{
|
||||
discoveryNodeToXContent(currentNode, true, builder);
|
||||
if (shardAllocationDecision.getMoveDecision().isDecisionTaken()
|
||||
&& shardAllocationDecision.getMoveDecision().getCurrentNodeRanking() > 0) {
|
||||
builder.field("weight_ranking", shardAllocationDecision.getMoveDecision().getCurrentNodeRanking());
|
||||
}
|
||||
}
|
||||
builder.endObject(); // end nodes
|
||||
builder.endObject();
|
||||
}
|
||||
if (this.clusterInfo != null) {
|
||||
builder.startObject("cluster_info"); {
|
||||
this.clusterInfo.toXContent(builder, params);
|
||||
}
|
||||
builder.endObject(); // end "cluster_info"
|
||||
}
|
||||
if (shardAllocationDecision.isDecisionTaken()) {
|
||||
shardAllocationDecision.toXContent(builder, params);
|
||||
} else {
|
||||
String explanation;
|
||||
if (shardRouting.state() == ShardRoutingState.RELOCATING) {
|
||||
explanation = "the shard is in the process of relocating from node [" + currentNode.getName() + "] " +
|
||||
"to node [" + relocationTargetNode.getName() + "], wait until relocation has completed";
|
||||
} else {
|
||||
assert shardRouting.state() == ShardRoutingState.INITIALIZING;
|
||||
explanation = "the shard is in the process of initializing on node [" + currentNode.getName() + "], " +
|
||||
"wait until initialization has completed";
|
||||
}
|
||||
builder.field("explanation", explanation);
|
||||
}
|
||||
}
|
||||
builder.endObject(); // end wrapping object
|
||||
return builder;
|
||||
}
|
||||
|
||||
/** An Enum representing the final decision for a shard allocation on a node */
|
||||
public enum FinalDecision {
|
||||
// Yes, the shard can be assigned
|
||||
YES((byte) 0),
|
||||
// No, the shard cannot be assigned
|
||||
NO((byte) 1),
|
||||
// The shard is already assigned to this node
|
||||
ALREADY_ASSIGNED((byte) 2);
|
||||
private XContentBuilder unassignedInfoToXContent(UnassignedInfo unassignedInfo, XContentBuilder builder)
|
||||
throws IOException {
|
||||
|
||||
private final byte id;
|
||||
|
||||
FinalDecision (byte id) {
|
||||
this.id = id;
|
||||
}
|
||||
|
||||
private static FinalDecision fromId(byte id) {
|
||||
switch (id) {
|
||||
case 0: return YES;
|
||||
case 1: return NO;
|
||||
case 2: return ALREADY_ASSIGNED;
|
||||
default:
|
||||
throw new IllegalArgumentException("unknown id for final decision: [" + id + "]");
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
switch (id) {
|
||||
case 0: return "YES";
|
||||
case 1: return "NO";
|
||||
case 2: return "ALREADY_ASSIGNED";
|
||||
default:
|
||||
throw new IllegalArgumentException("unknown id for final decision: [" + id + "]");
|
||||
}
|
||||
}
|
||||
|
||||
static FinalDecision readFrom(StreamInput in) throws IOException {
|
||||
return fromId(in.readByte());
|
||||
}
|
||||
|
||||
void writeTo(StreamOutput out) throws IOException {
|
||||
out.writeByte(id);
|
||||
}
|
||||
}
|
||||
|
||||
/** An Enum representing the state of the shard store's copy of the data on a node */
|
||||
public enum StoreCopy {
|
||||
// No data for this shard is on the node
|
||||
NONE((byte) 0),
|
||||
// A copy of the data is available on this node
|
||||
AVAILABLE((byte) 1),
|
||||
// The copy of the data on the node is corrupt
|
||||
CORRUPT((byte) 2),
|
||||
// There was an error reading this node's copy of the data
|
||||
IO_ERROR((byte) 3),
|
||||
// The copy of the data on the node is stale
|
||||
STALE((byte) 4),
|
||||
// It's unknown what the copy of the data is
|
||||
UNKNOWN((byte) 5);
|
||||
|
||||
private final byte id;
|
||||
|
||||
StoreCopy (byte id) {
|
||||
this.id = id;
|
||||
}
|
||||
|
||||
private static StoreCopy fromId(byte id) {
|
||||
switch (id) {
|
||||
case 0: return NONE;
|
||||
case 1: return AVAILABLE;
|
||||
case 2: return CORRUPT;
|
||||
case 3: return IO_ERROR;
|
||||
case 4: return STALE;
|
||||
case 5: return UNKNOWN;
|
||||
default:
|
||||
throw new IllegalArgumentException("unknown id for store copy: [" + id + "]");
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
switch (id) {
|
||||
case 0: return "NONE";
|
||||
case 1: return "AVAILABLE";
|
||||
case 2: return "CORRUPT";
|
||||
case 3: return "IO_ERROR";
|
||||
case 4: return "STALE";
|
||||
case 5: return "UNKNOWN";
|
||||
default:
|
||||
throw new IllegalArgumentException("unknown id for store copy: [" + id + "]");
|
||||
}
|
||||
}
|
||||
|
||||
static StoreCopy readFrom(StreamInput in) throws IOException {
|
||||
return fromId(in.readByte());
|
||||
}
|
||||
|
||||
void writeTo(StreamOutput out) throws IOException {
|
||||
out.writeByte(id);
|
||||
builder.startObject("unassigned_info");
|
||||
builder.field("reason", unassignedInfo.getReason());
|
||||
builder.field("at", UnassignedInfo.DATE_TIME_FORMATTER.printer().print(unassignedInfo.getUnassignedTimeInMillis()));
|
||||
if (unassignedInfo.getNumFailedAllocations() > 0) {
|
||||
builder.field("failed_allocation_attempts", unassignedInfo.getNumFailedAllocations());
|
||||
}
|
||||
String details = unassignedInfo.getDetails();
|
||||
if (details != null) {
|
||||
builder.field("details", details);
|
||||
}
|
||||
builder.field("last_allocation_status", AllocationDecision.fromAllocationStatus(unassignedInfo.getLastAllocationStatus()));
|
||||
builder.endObject();
|
||||
return builder;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,147 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.admin.cluster.allocation;
|
||||
|
||||
import org.elasticsearch.ExceptionsHelper;
|
||||
import org.elasticsearch.action.admin.indices.shards.IndicesShardStoresResponse;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.Decision;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.io.stream.Writeable;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Map;
|
||||
/** The cluster allocation explanation for a single node */
|
||||
public class NodeExplanation implements Writeable, ToXContent {
|
||||
private final DiscoveryNode node;
|
||||
private final Decision nodeDecision;
|
||||
private final Float nodeWeight;
|
||||
private final IndicesShardStoresResponse.StoreStatus storeStatus;
|
||||
private final ClusterAllocationExplanation.FinalDecision finalDecision;
|
||||
private final ClusterAllocationExplanation.StoreCopy storeCopy;
|
||||
private final String finalExplanation;
|
||||
|
||||
public NodeExplanation(final DiscoveryNode node, final Decision nodeDecision, final Float nodeWeight,
|
||||
@Nullable final IndicesShardStoresResponse.StoreStatus storeStatus,
|
||||
final ClusterAllocationExplanation.FinalDecision finalDecision,
|
||||
final String finalExplanation,
|
||||
final ClusterAllocationExplanation.StoreCopy storeCopy) {
|
||||
this.node = node;
|
||||
this.nodeDecision = nodeDecision;
|
||||
this.nodeWeight = nodeWeight;
|
||||
this.storeStatus = storeStatus;
|
||||
this.finalDecision = finalDecision;
|
||||
this.finalExplanation = finalExplanation;
|
||||
this.storeCopy = storeCopy;
|
||||
}
|
||||
|
||||
public NodeExplanation(StreamInput in) throws IOException {
|
||||
this.node = new DiscoveryNode(in);
|
||||
this.nodeDecision = Decision.readFrom(in);
|
||||
this.nodeWeight = in.readFloat();
|
||||
if (in.readBoolean()) {
|
||||
this.storeStatus = IndicesShardStoresResponse.StoreStatus.readStoreStatus(in);
|
||||
} else {
|
||||
this.storeStatus = null;
|
||||
}
|
||||
this.finalDecision = ClusterAllocationExplanation.FinalDecision.readFrom(in);
|
||||
this.finalExplanation = in.readString();
|
||||
this.storeCopy = ClusterAllocationExplanation.StoreCopy.readFrom(in);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
node.writeTo(out);
|
||||
nodeDecision.writeTo(out);
|
||||
out.writeFloat(nodeWeight);
|
||||
if (storeStatus == null) {
|
||||
out.writeBoolean(false);
|
||||
} else {
|
||||
out.writeBoolean(true);
|
||||
storeStatus.writeTo(out);
|
||||
}
|
||||
finalDecision.writeTo(out);
|
||||
out.writeString(finalExplanation);
|
||||
storeCopy.writeTo(out);
|
||||
}
|
||||
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.startObject(node.getId()); {
|
||||
builder.field("node_name", node.getName());
|
||||
builder.startObject("node_attributes"); {
|
||||
for (Map.Entry<String, String> attrEntry : node.getAttributes().entrySet()) {
|
||||
builder.field(attrEntry.getKey(), attrEntry.getValue());
|
||||
}
|
||||
}
|
||||
builder.endObject(); // end attributes
|
||||
builder.startObject("store"); {
|
||||
builder.field("shard_copy", storeCopy.toString());
|
||||
if (storeStatus != null) {
|
||||
final Throwable storeErr = storeStatus.getStoreException();
|
||||
if (storeErr != null) {
|
||||
builder.field("store_exception", ExceptionsHelper.detailedMessage(storeErr));
|
||||
}
|
||||
}
|
||||
}
|
||||
builder.endObject(); // end store
|
||||
builder.field("final_decision", finalDecision.toString());
|
||||
builder.field("final_explanation", finalExplanation);
|
||||
builder.field("weight", nodeWeight);
|
||||
builder.startArray("decisions");
|
||||
nodeDecision.toXContent(builder, params);
|
||||
builder.endArray();
|
||||
}
|
||||
builder.endObject(); // end node <uuid>
|
||||
return builder;
|
||||
}
|
||||
|
||||
public DiscoveryNode getNode() {
|
||||
return this.node;
|
||||
}
|
||||
|
||||
public Decision getDecision() {
|
||||
return this.nodeDecision;
|
||||
}
|
||||
|
||||
public Float getWeight() {
|
||||
return this.nodeWeight;
|
||||
}
|
||||
|
||||
@Nullable
|
||||
public IndicesShardStoresResponse.StoreStatus getStoreStatus() {
|
||||
return this.storeStatus;
|
||||
}
|
||||
|
||||
public ClusterAllocationExplanation.FinalDecision getFinalDecision() {
|
||||
return this.finalDecision;
|
||||
}
|
||||
|
||||
public String getFinalExplanation() {
|
||||
return this.finalExplanation;
|
||||
}
|
||||
|
||||
public ClusterAllocationExplanation.StoreCopy getStoreCopy() {
|
||||
return this.storeCopy;
|
||||
}
|
||||
}
|
|
@ -19,13 +19,7 @@
|
|||
|
||||
package org.elasticsearch.action.admin.cluster.allocation;
|
||||
|
||||
import org.apache.lucene.index.CorruptIndexException;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.ExceptionsHelper;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.admin.indices.shards.IndicesShardStoresRequest;
|
||||
import org.elasticsearch.action.admin.indices.shards.IndicesShardStoresResponse;
|
||||
import org.elasticsearch.action.admin.indices.shards.TransportIndicesShardStoresAction;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.master.TransportMasterNodeAction;
|
||||
import org.elasticsearch.cluster.ClusterInfo;
|
||||
|
@ -33,34 +27,25 @@ import org.elasticsearch.cluster.ClusterInfoService;
|
|||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.block.ClusterBlockException;
|
||||
import org.elasticsearch.cluster.block.ClusterBlockLevel;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.cluster.metadata.MetaData;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.cluster.routing.RecoverySource;
|
||||
import org.elasticsearch.cluster.routing.RoutingNode;
|
||||
import org.elasticsearch.cluster.routing.RoutingNodes;
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.cluster.routing.UnassignedInfo;
|
||||
import org.elasticsearch.cluster.routing.allocation.MoveDecision;
|
||||
import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
|
||||
import org.elasticsearch.cluster.routing.allocation.AllocateUnassignedDecision;
|
||||
import org.elasticsearch.cluster.routing.allocation.RoutingAllocation.DebugMode;
|
||||
import org.elasticsearch.cluster.routing.allocation.ShardAllocationDecision;
|
||||
import org.elasticsearch.cluster.routing.allocation.allocator.ShardsAllocator;
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders;
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.Decision;
|
||||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.common.collect.ImmutableOpenIntMap;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.gateway.GatewayAllocator;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
||||
import static org.elasticsearch.cluster.routing.UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING;
|
||||
|
||||
/**
|
||||
* The {@code TransportClusterAllocationExplainAction} is responsible for actually executing the explanation of a shard's allocation on the
|
||||
|
@ -72,7 +57,6 @@ public class TransportClusterAllocationExplainAction
|
|||
private final ClusterInfoService clusterInfoService;
|
||||
private final AllocationDeciders allocationDeciders;
|
||||
private final ShardsAllocator shardAllocator;
|
||||
private final TransportIndicesShardStoresAction shardStoresAction;
|
||||
private final GatewayAllocator gatewayAllocator;
|
||||
|
||||
@Inject
|
||||
|
@ -80,14 +64,12 @@ public class TransportClusterAllocationExplainAction
|
|||
ThreadPool threadPool, ActionFilters actionFilters,
|
||||
IndexNameExpressionResolver indexNameExpressionResolver,
|
||||
ClusterInfoService clusterInfoService, AllocationDeciders allocationDeciders,
|
||||
ShardsAllocator shardAllocator, TransportIndicesShardStoresAction shardStoresAction,
|
||||
GatewayAllocator gatewayAllocator) {
|
||||
ShardsAllocator shardAllocator, GatewayAllocator gatewayAllocator) {
|
||||
super(settings, ClusterAllocationExplainAction.NAME, transportService, clusterService, threadPool, actionFilters,
|
||||
indexNameExpressionResolver, ClusterAllocationExplainRequest::new);
|
||||
this.clusterInfoService = clusterInfoService;
|
||||
this.allocationDeciders = allocationDeciders;
|
||||
this.shardAllocator = shardAllocator;
|
||||
this.shardStoresAction = shardStoresAction;
|
||||
this.gatewayAllocator = gatewayAllocator;
|
||||
}
|
||||
|
||||
|
@ -106,172 +88,6 @@ public class TransportClusterAllocationExplainAction
|
|||
return new ClusterAllocationExplainResponse();
|
||||
}
|
||||
|
||||
/**
|
||||
* Return the decisions for the given {@code ShardRouting} on the given {@code RoutingNode}. If {@code includeYesDecisions} is not true,
|
||||
* only non-YES (NO and THROTTLE) decisions are returned.
|
||||
*/
|
||||
public static Decision tryShardOnNode(ShardRouting shard, RoutingNode node, RoutingAllocation allocation, boolean includeYesDecisions) {
|
||||
Decision d = allocation.deciders().canAllocate(shard, node, allocation);
|
||||
if (includeYesDecisions) {
|
||||
return d;
|
||||
} else {
|
||||
Decision.Multi nonYesDecisions = new Decision.Multi();
|
||||
List<Decision> decisions = d.getDecisions();
|
||||
for (Decision decision : decisions) {
|
||||
if (decision.type() != Decision.Type.YES) {
|
||||
nonYesDecisions.add(decision);
|
||||
}
|
||||
}
|
||||
return nonYesDecisions;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Construct a {@code WeightedDecision} object for the given shard given all the metadata. This also attempts to construct the human
|
||||
* readable FinalDecision and final explanation as part of the explanation.
|
||||
*/
|
||||
public static NodeExplanation calculateNodeExplanation(ShardRouting shard,
|
||||
IndexMetaData indexMetaData,
|
||||
DiscoveryNode node,
|
||||
Decision nodeDecision,
|
||||
Float nodeWeight,
|
||||
IndicesShardStoresResponse.StoreStatus storeStatus,
|
||||
String assignedNodeId,
|
||||
Set<String> activeAllocationIds,
|
||||
boolean hasPendingAsyncFetch) {
|
||||
final ClusterAllocationExplanation.FinalDecision finalDecision;
|
||||
final ClusterAllocationExplanation.StoreCopy storeCopy;
|
||||
final String finalExplanation;
|
||||
|
||||
if (storeStatus == null) {
|
||||
// No copies of the data
|
||||
storeCopy = ClusterAllocationExplanation.StoreCopy.NONE;
|
||||
} else {
|
||||
final Exception storeErr = storeStatus.getStoreException();
|
||||
if (storeErr != null) {
|
||||
if (ExceptionsHelper.unwrapCause(storeErr) instanceof CorruptIndexException) {
|
||||
storeCopy = ClusterAllocationExplanation.StoreCopy.CORRUPT;
|
||||
} else {
|
||||
storeCopy = ClusterAllocationExplanation.StoreCopy.IO_ERROR;
|
||||
}
|
||||
} else if (activeAllocationIds.isEmpty()) {
|
||||
// The ids are only empty if dealing with a legacy index
|
||||
// TODO: fetch the shard state versions and display here?
|
||||
storeCopy = ClusterAllocationExplanation.StoreCopy.UNKNOWN;
|
||||
} else if (activeAllocationIds.contains(storeStatus.getAllocationId())) {
|
||||
storeCopy = ClusterAllocationExplanation.StoreCopy.AVAILABLE;
|
||||
} else {
|
||||
// Otherwise, this is a stale copy of the data (allocation ids don't match)
|
||||
storeCopy = ClusterAllocationExplanation.StoreCopy.STALE;
|
||||
}
|
||||
}
|
||||
|
||||
if (node.getId().equals(assignedNodeId)) {
|
||||
finalDecision = ClusterAllocationExplanation.FinalDecision.ALREADY_ASSIGNED;
|
||||
finalExplanation = "the shard is already assigned to this node";
|
||||
} else if (shard.unassigned() && shard.primary() == false &&
|
||||
shard.unassignedInfo().getReason() != UnassignedInfo.Reason.INDEX_CREATED && nodeDecision.type() != Decision.Type.YES) {
|
||||
finalExplanation = "the shard cannot be assigned because allocation deciders return a " + nodeDecision.type().name() +
|
||||
" decision";
|
||||
finalDecision = ClusterAllocationExplanation.FinalDecision.NO;
|
||||
} else if (shard.unassigned() && shard.primary() == false &&
|
||||
shard.unassignedInfo().getReason() != UnassignedInfo.Reason.INDEX_CREATED && hasPendingAsyncFetch) {
|
||||
finalExplanation = "the shard's state is still being fetched so it cannot be allocated";
|
||||
finalDecision = ClusterAllocationExplanation.FinalDecision.NO;
|
||||
} else if (shard.primary() && shard.unassigned() &&
|
||||
(shard.recoverySource().getType() == RecoverySource.Type.EXISTING_STORE ||
|
||||
shard.recoverySource().getType() == RecoverySource.Type.SNAPSHOT)
|
||||
&& hasPendingAsyncFetch) {
|
||||
finalExplanation = "the shard's state is still being fetched so it cannot be allocated";
|
||||
finalDecision = ClusterAllocationExplanation.FinalDecision.NO;
|
||||
} else if (shard.primary() && shard.unassigned() && shard.recoverySource().getType() == RecoverySource.Type.EXISTING_STORE &&
|
||||
storeCopy == ClusterAllocationExplanation.StoreCopy.STALE) {
|
||||
finalExplanation = "the copy of the shard is stale, allocation ids do not match";
|
||||
finalDecision = ClusterAllocationExplanation.FinalDecision.NO;
|
||||
} else if (shard.primary() && shard.unassigned() && shard.recoverySource().getType() == RecoverySource.Type.EXISTING_STORE &&
|
||||
storeCopy == ClusterAllocationExplanation.StoreCopy.NONE) {
|
||||
finalExplanation = "there is no copy of the shard available";
|
||||
finalDecision = ClusterAllocationExplanation.FinalDecision.NO;
|
||||
} else if (shard.primary() && shard.unassigned() && shard.recoverySource().getType() == RecoverySource.Type.EXISTING_STORE &&
|
||||
storeCopy == ClusterAllocationExplanation.StoreCopy.CORRUPT) {
|
||||
finalExplanation = "the copy of the shard is corrupt";
|
||||
finalDecision = ClusterAllocationExplanation.FinalDecision.NO;
|
||||
} else if (shard.primary() && shard.unassigned() && shard.recoverySource().getType() == RecoverySource.Type.EXISTING_STORE &&
|
||||
storeCopy == ClusterAllocationExplanation.StoreCopy.IO_ERROR) {
|
||||
finalExplanation = "the copy of the shard cannot be read";
|
||||
finalDecision = ClusterAllocationExplanation.FinalDecision.NO;
|
||||
} else {
|
||||
if (nodeDecision.type() == Decision.Type.NO) {
|
||||
finalDecision = ClusterAllocationExplanation.FinalDecision.NO;
|
||||
finalExplanation = "the shard cannot be assigned because one or more allocation decider returns a 'NO' decision";
|
||||
} else {
|
||||
// TODO: handle throttling decision better here
|
||||
finalDecision = ClusterAllocationExplanation.FinalDecision.YES;
|
||||
if (storeCopy == ClusterAllocationExplanation.StoreCopy.AVAILABLE) {
|
||||
finalExplanation = "the shard can be assigned and the node contains a valid copy of the shard data";
|
||||
} else {
|
||||
finalExplanation = "the shard can be assigned";
|
||||
}
|
||||
}
|
||||
}
|
||||
return new NodeExplanation(node, nodeDecision, nodeWeight, storeStatus, finalDecision, finalExplanation, storeCopy);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* For the given {@code ShardRouting}, return the explanation of the allocation for that shard on all nodes. If {@code
|
||||
* includeYesDecisions} is true, returns all decisions, otherwise returns only 'NO' and 'THROTTLE' decisions.
|
||||
*/
|
||||
public static ClusterAllocationExplanation explainShard(ShardRouting shard, RoutingAllocation allocation, RoutingNodes routingNodes,
|
||||
boolean includeYesDecisions, ShardsAllocator shardAllocator,
|
||||
List<IndicesShardStoresResponse.StoreStatus> shardStores,
|
||||
GatewayAllocator gatewayAllocator, ClusterInfo clusterInfo) {
|
||||
// don't short circuit deciders, we want a full explanation
|
||||
allocation.debugDecision(true);
|
||||
// get the existing unassigned info if available
|
||||
UnassignedInfo ui = shard.unassignedInfo();
|
||||
|
||||
Map<DiscoveryNode, Decision> nodeToDecision = new HashMap<>();
|
||||
for (RoutingNode node : routingNodes) {
|
||||
DiscoveryNode discoNode = node.node();
|
||||
if (discoNode.isDataNode()) {
|
||||
Decision d = tryShardOnNode(shard, node, allocation, includeYesDecisions);
|
||||
nodeToDecision.put(discoNode, d);
|
||||
}
|
||||
}
|
||||
long remainingDelayMillis = 0;
|
||||
final MetaData metadata = allocation.metaData();
|
||||
final IndexMetaData indexMetaData = metadata.index(shard.index());
|
||||
long allocationDelayMillis = INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.get(indexMetaData.getSettings()).getMillis();
|
||||
if (ui != null && ui.isDelayed()) {
|
||||
long remainingDelayNanos = ui.getRemainingDelay(System.nanoTime(), indexMetaData.getSettings());
|
||||
remainingDelayMillis = TimeValue.timeValueNanos(remainingDelayNanos).millis();
|
||||
}
|
||||
|
||||
// Calculate weights for each of the nodes
|
||||
Map<DiscoveryNode, Float> weights = shardAllocator.weighShard(allocation, shard);
|
||||
|
||||
Map<DiscoveryNode, IndicesShardStoresResponse.StoreStatus> nodeToStatus = new HashMap<>(shardStores.size());
|
||||
for (IndicesShardStoresResponse.StoreStatus status : shardStores) {
|
||||
nodeToStatus.put(status.getNode(), status);
|
||||
}
|
||||
|
||||
Map<DiscoveryNode, NodeExplanation> explanations = new HashMap<>(shardStores.size());
|
||||
for (Map.Entry<DiscoveryNode, Decision> entry : nodeToDecision.entrySet()) {
|
||||
DiscoveryNode node = entry.getKey();
|
||||
Decision decision = entry.getValue();
|
||||
Float weight = weights.get(node);
|
||||
IndicesShardStoresResponse.StoreStatus storeStatus = nodeToStatus.get(node);
|
||||
NodeExplanation nodeExplanation = calculateNodeExplanation(shard, indexMetaData, node, decision, weight,
|
||||
storeStatus, shard.currentNodeId(), indexMetaData.inSyncAllocationIds(shard.getId()),
|
||||
allocation.hasPendingAsyncFetch());
|
||||
explanations.put(node, nodeExplanation);
|
||||
}
|
||||
return new ClusterAllocationExplanation(shard.shardId(), shard.primary(),
|
||||
shard.currentNodeId(), allocationDelayMillis, remainingDelayMillis, ui,
|
||||
gatewayAllocator.hasFetchPending(shard.shardId(), shard.primary()), explanations, clusterInfo);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void masterOperation(final ClusterAllocationExplainRequest request, final ClusterState state,
|
||||
final ActionListener<ClusterAllocationExplainResponse> listener) {
|
||||
|
@ -280,31 +96,96 @@ public class TransportClusterAllocationExplainAction
|
|||
final RoutingAllocation allocation = new RoutingAllocation(allocationDeciders, routingNodes, state,
|
||||
clusterInfo, System.nanoTime(), false);
|
||||
|
||||
ShardRouting shardRouting = findShardToExplain(request, allocation);
|
||||
logger.debug("explaining the allocation for [{}], found shard [{}]", request, shardRouting);
|
||||
|
||||
ClusterAllocationExplanation cae = explainShard(shardRouting, allocation,
|
||||
request.includeDiskInfo() ? clusterInfo : null, request.includeYesDecisions(), gatewayAllocator, shardAllocator);
|
||||
listener.onResponse(new ClusterAllocationExplainResponse(cae));
|
||||
}
|
||||
|
||||
// public for testing
|
||||
public static ClusterAllocationExplanation explainShard(ShardRouting shardRouting, RoutingAllocation allocation,
|
||||
ClusterInfo clusterInfo, boolean includeYesDecisions,
|
||||
GatewayAllocator gatewayAllocator, ShardsAllocator shardAllocator) {
|
||||
allocation.setDebugMode(includeYesDecisions ? DebugMode.ON : DebugMode.EXCLUDE_YES_DECISIONS);
|
||||
|
||||
ShardAllocationDecision shardDecision;
|
||||
if (shardRouting.initializing() || shardRouting.relocating()) {
|
||||
shardDecision = ShardAllocationDecision.NOT_TAKEN;
|
||||
} else {
|
||||
AllocateUnassignedDecision allocateDecision = shardRouting.unassigned() ?
|
||||
gatewayAllocator.decideUnassignedShardAllocation(shardRouting, allocation) : AllocateUnassignedDecision.NOT_TAKEN;
|
||||
if (allocateDecision.isDecisionTaken() == false) {
|
||||
shardDecision = shardAllocator.decideShardAllocation(shardRouting, allocation);
|
||||
} else {
|
||||
shardDecision = new ShardAllocationDecision(allocateDecision, MoveDecision.NOT_TAKEN);
|
||||
}
|
||||
}
|
||||
|
||||
return new ClusterAllocationExplanation(shardRouting,
|
||||
shardRouting.currentNodeId() != null ? allocation.nodes().get(shardRouting.currentNodeId()) : null,
|
||||
shardRouting.relocatingNodeId() != null ? allocation.nodes().get(shardRouting.relocatingNodeId()) : null,
|
||||
clusterInfo, shardDecision);
|
||||
}
|
||||
|
||||
// public for testing
|
||||
public static ShardRouting findShardToExplain(ClusterAllocationExplainRequest request, RoutingAllocation allocation) {
|
||||
ShardRouting foundShard = null;
|
||||
if (request.useAnyUnassignedShard()) {
|
||||
// If we can use any shard, just pick the first unassigned one (if there are any)
|
||||
RoutingNodes.UnassignedShards.UnassignedIterator ui = routingNodes.unassigned().iterator();
|
||||
RoutingNodes.UnassignedShards.UnassignedIterator ui = allocation.routingNodes().unassigned().iterator();
|
||||
if (ui.hasNext()) {
|
||||
foundShard = ui.next();
|
||||
}
|
||||
if (foundShard == null) {
|
||||
throw new IllegalStateException("unable to find any unassigned shards to explain [" + request + "]");
|
||||
}
|
||||
} else {
|
||||
String index = request.getIndex();
|
||||
int shard = request.getShard();
|
||||
if (request.isPrimary()) {
|
||||
// If we're looking for the primary shard, there's only one copy, so pick it directly
|
||||
foundShard = allocation.routingTable().shardRoutingTable(index, shard).primaryShard();
|
||||
if (request.getCurrentNode() != null) {
|
||||
DiscoveryNode primaryNode = allocation.nodes().resolveNode(request.getCurrentNode());
|
||||
// the primary is assigned to a node other than the node specified in the request
|
||||
if (primaryNode.getId().equals(foundShard.currentNodeId()) == false) {
|
||||
throw new IllegalStateException("unable to find primary shard assigned to node [" + request.getCurrentNode() + "]");
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// If looking for a replica, go through all the replica shards
|
||||
List<ShardRouting> replicaShardRoutings = allocation.routingTable().shardRoutingTable(index, shard).replicaShards();
|
||||
if (request.getCurrentNode() != null) {
|
||||
// the request is to explain a replica shard already assigned on a particular node,
|
||||
// so find that shard copy
|
||||
DiscoveryNode replicaNode = allocation.nodes().resolveNode(request.getCurrentNode());
|
||||
for (ShardRouting replica : replicaShardRoutings) {
|
||||
if (replicaNode.getId().equals(replica.currentNodeId())) {
|
||||
foundShard = replica;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (foundShard == null) {
|
||||
throw new IllegalStateException("unable to find a replica shard assigned to node [" +
|
||||
request.getCurrentNode() + "]");
|
||||
}
|
||||
} else {
|
||||
if (replicaShardRoutings.size() > 0) {
|
||||
// Pick the first replica at the very least
|
||||
foundShard = replicaShardRoutings.get(0);
|
||||
for (ShardRouting replica : replicaShardRoutings) {
|
||||
// In case there are multiple replicas where some are assigned and some aren't,
|
||||
// try to find one that is unassigned at least
|
||||
for (ShardRouting replica : replicaShardRoutings) {
|
||||
if (replica.unassigned()) {
|
||||
foundShard = replica;
|
||||
break;
|
||||
} else if (replica.started() && (foundShard.initializing() || foundShard.relocating())) {
|
||||
// prefer started shards to initializing or relocating shards because started shards
|
||||
// can be explained
|
||||
foundShard = replica;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -312,34 +193,8 @@ public class TransportClusterAllocationExplainAction
|
|||
}
|
||||
|
||||
if (foundShard == null) {
|
||||
listener.onFailure(new ElasticsearchException("unable to find any shards to explain [{}] in the routing table", request));
|
||||
return;
|
||||
throw new IllegalStateException("unable to find any shards to explain [" + request + "] in the routing table");
|
||||
}
|
||||
final ShardRouting shardRouting = foundShard;
|
||||
logger.debug("explaining the allocation for [{}], found shard [{}]", request, shardRouting);
|
||||
|
||||
getShardStores(shardRouting, new ActionListener<IndicesShardStoresResponse>() {
|
||||
@Override
|
||||
public void onResponse(IndicesShardStoresResponse shardStoreResponse) {
|
||||
ImmutableOpenIntMap<List<IndicesShardStoresResponse.StoreStatus>> shardStatuses =
|
||||
shardStoreResponse.getStoreStatuses().get(shardRouting.getIndexName());
|
||||
List<IndicesShardStoresResponse.StoreStatus> shardStoreStatus = shardStatuses.get(shardRouting.id());
|
||||
ClusterAllocationExplanation cae = explainShard(shardRouting, allocation, routingNodes,
|
||||
request.includeYesDecisions(), shardAllocator, shardStoreStatus, gatewayAllocator,
|
||||
request.includeDiskInfo() ? clusterInfo : null);
|
||||
listener.onResponse(new ClusterAllocationExplainResponse(cae));
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
listener.onFailure(e);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
private void getShardStores(ShardRouting shard, final ActionListener<IndicesShardStoresResponse> listener) {
|
||||
IndicesShardStoresRequest request = new IndicesShardStoresRequest(shard.getIndexName());
|
||||
request.shardStatuses("all");
|
||||
shardStoresAction.execute(request, listener);
|
||||
return foundShard;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -26,6 +26,7 @@ import org.elasticsearch.action.support.ActionFilters;
|
|||
import org.elasticsearch.action.support.ActiveShardCount;
|
||||
import org.elasticsearch.action.support.IndicesOptions;
|
||||
import org.elasticsearch.action.support.master.TransportMasterNodeReadAction;
|
||||
import org.elasticsearch.cluster.LocalClusterUpdateTask;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.ClusterStateObserver;
|
||||
import org.elasticsearch.cluster.ClusterStateUpdateTask;
|
||||
|
@ -85,6 +86,28 @@ public class TransportClusterHealthAction extends TransportMasterNodeReadAction<
|
|||
protected void masterOperation(Task task, final ClusterHealthRequest request, final ClusterState unusedState, final ActionListener<ClusterHealthResponse> listener) {
|
||||
if (request.waitForEvents() != null) {
|
||||
final long endTimeMS = TimeValue.nsecToMSec(System.nanoTime()) + request.timeout().millis();
|
||||
if (request.local()) {
|
||||
clusterService.submitStateUpdateTask("cluster_health (wait_for_events [" + request.waitForEvents() + "])", new LocalClusterUpdateTask(request.waitForEvents()) {
|
||||
@Override
|
||||
public ClusterTasksResult<LocalClusterUpdateTask> execute(ClusterState currentState) {
|
||||
return unchanged();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
|
||||
final long timeoutInMillis = Math.max(0, endTimeMS - TimeValue.nsecToMSec(System.nanoTime()));
|
||||
final TimeValue newTimeout = TimeValue.timeValueMillis(timeoutInMillis);
|
||||
request.timeout(newTimeout);
|
||||
executeHealth(request, listener);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(String source, Exception e) {
|
||||
logger.error((Supplier<?>) () -> new ParameterizedMessage("unexpected failure during [{}]", source), e);
|
||||
listener.onFailure(e);
|
||||
}
|
||||
});
|
||||
} else {
|
||||
clusterService.submitStateUpdateTask("cluster_health (wait_for_events [" + request.waitForEvents() + "])", new ClusterStateUpdateTask(request.waitForEvents()) {
|
||||
@Override
|
||||
public ClusterState execute(ClusterState currentState) {
|
||||
|
@ -110,12 +133,8 @@ public class TransportClusterHealthAction extends TransportMasterNodeReadAction<
|
|||
logger.error((Supplier<?>) () -> new ParameterizedMessage("unexpected failure during [{}]", source), e);
|
||||
listener.onFailure(e);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean runOnlyOnMaster() {
|
||||
return !request.local();
|
||||
}
|
||||
});
|
||||
}
|
||||
} else {
|
||||
executeHealth(request, listener);
|
||||
}
|
||||
|
|
|
@ -59,7 +59,7 @@ public class ClusterRerouteResponse extends AcknowledgedResponse {
|
|||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
state = ClusterState.Builder.readFrom(in, null);
|
||||
state = ClusterState.readFrom(in, null);
|
||||
readAcknowledged(in);
|
||||
explanations = RoutingExplanations.readFrom(in);
|
||||
}
|
||||
|
|
|
@ -75,6 +75,6 @@ public class TransportDeleteSnapshotAction extends TransportMasterNodeAction<Del
|
|||
public void onFailure(Exception e) {
|
||||
listener.onFailure(e);
|
||||
}
|
||||
});
|
||||
}, false);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -52,7 +52,7 @@ public class ClusterStateResponse extends ActionResponse {
|
|||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
clusterName = new ClusterName(in);
|
||||
clusterState = ClusterState.Builder.readFrom(in, null);
|
||||
clusterState = ClusterState.readFrom(in, null);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -57,7 +57,7 @@ public class GetAliasesResponse extends ActionResponse {
|
|||
int valueSize = in.readVInt();
|
||||
List<AliasMetaData> value = new ArrayList<>(valueSize);
|
||||
for (int j = 0; j < valueSize; j++) {
|
||||
value.add(AliasMetaData.Builder.readFrom(in));
|
||||
value.add(new AliasMetaData(in));
|
||||
}
|
||||
aliasesBuilder.put(key, Collections.unmodifiableList(value));
|
||||
}
|
||||
|
|
|
@ -104,7 +104,7 @@ public class GetIndexResponse extends ActionResponse {
|
|||
int valueSize = in.readVInt();
|
||||
ImmutableOpenMap.Builder<String, MappingMetaData> mappingEntryBuilder = ImmutableOpenMap.builder();
|
||||
for (int j = 0; j < valueSize; j++) {
|
||||
mappingEntryBuilder.put(in.readString(), MappingMetaData.PROTO.readFrom(in));
|
||||
mappingEntryBuilder.put(in.readString(), new MappingMetaData(in));
|
||||
}
|
||||
mappingsMapBuilder.put(key, mappingEntryBuilder.build());
|
||||
}
|
||||
|
@ -116,7 +116,7 @@ public class GetIndexResponse extends ActionResponse {
|
|||
int valueSize = in.readVInt();
|
||||
List<AliasMetaData> aliasEntryBuilder = new ArrayList<>();
|
||||
for (int j = 0; j < valueSize; j++) {
|
||||
aliasEntryBuilder.add(AliasMetaData.Builder.readFrom(in));
|
||||
aliasEntryBuilder.add(new AliasMetaData(in));
|
||||
}
|
||||
aliasesMapBuilder.put(key, Collections.unmodifiableList(aliasEntryBuilder));
|
||||
}
|
||||
|
|
|
@ -57,7 +57,7 @@ public class GetMappingsResponse extends ActionResponse {
|
|||
int valueSize = in.readVInt();
|
||||
ImmutableOpenMap.Builder<String, MappingMetaData> typeMapBuilder = ImmutableOpenMap.builder();
|
||||
for (int j = 0; j < valueSize; j++) {
|
||||
typeMapBuilder.put(in.readString(), MappingMetaData.PROTO.readFrom(in));
|
||||
typeMapBuilder.put(in.readString(), new MappingMetaData(in));
|
||||
}
|
||||
indexMapBuilder.put(key, typeMapBuilder.build());
|
||||
}
|
||||
|
|
|
@ -52,7 +52,7 @@ public class GetIndexTemplatesResponse extends ActionResponse implements ToXCont
|
|||
int size = in.readVInt();
|
||||
indexTemplates = new ArrayList<>(size);
|
||||
for (int i = 0 ; i < size ; i++) {
|
||||
indexTemplates.add(0, IndexTemplateMetaData.Builder.readFrom(in));
|
||||
indexTemplates.add(0, IndexTemplateMetaData.readFrom(in));
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -349,7 +349,7 @@ public class BulkRequest extends ActionRequest implements CompositeIndicesReques
|
|||
} else if ("fields".equals(currentFieldName)) {
|
||||
throw new IllegalArgumentException("Action/metadata line [" + line + "] contains a simple value for parameter [fields] while a list is expected");
|
||||
} else if ("_source".equals(currentFieldName)) {
|
||||
fetchSourceContext = FetchSourceContext.parse(parser);
|
||||
fetchSourceContext = FetchSourceContext.fromXContent(parser);
|
||||
} else {
|
||||
throw new IllegalArgumentException("Action/metadata line [" + line + "] contains an unknown parameter [" + currentFieldName + "]");
|
||||
}
|
||||
|
@ -362,7 +362,7 @@ public class BulkRequest extends ActionRequest implements CompositeIndicesReques
|
|||
throw new IllegalArgumentException("Malformed action/metadata line [" + line + "], expected a simple value for field [" + currentFieldName + "] but found [" + token + "]");
|
||||
}
|
||||
} else if (token == XContentParser.Token.START_OBJECT && "_source".equals(currentFieldName)) {
|
||||
fetchSourceContext = FetchSourceContext.parse(parser);
|
||||
fetchSourceContext = FetchSourceContext.fromXContent(parser);
|
||||
} else if (token != XContentParser.Token.VALUE_NULL) {
|
||||
throw new IllegalArgumentException("Malformed action/metadata line [" + line + "], expected a simple value for field [" + currentFieldName + "] but found [" + token + "]");
|
||||
}
|
||||
|
|
|
@ -52,7 +52,7 @@ public class GetPipelineResponse extends ActionResponse implements StatusToXCont
|
|||
int size = in.readVInt();
|
||||
pipelines = new ArrayList<>(size);
|
||||
for (int i = 0; i < size; i++) {
|
||||
pipelines.add(PipelineConfiguration.readPipelineConfiguration(in));
|
||||
pipelines.add(PipelineConfiguration.readFrom(in));
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -24,7 +24,6 @@ import org.elasticsearch.action.ActionRequestValidationException;
|
|||
import org.elasticsearch.action.IndicesRequest;
|
||||
import org.elasticsearch.action.support.IndicesOptions;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.ParseFieldMatcher;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
|
@ -199,7 +198,7 @@ public final class SearchRequest extends ActionRequest implements IndicesRequest
|
|||
* "query_then_fetch"/"queryThenFetch", and "query_and_fetch"/"queryAndFetch".
|
||||
*/
|
||||
public SearchRequest searchType(String searchType) {
|
||||
return searchType(SearchType.fromString(searchType, ParseFieldMatcher.EMPTY));
|
||||
return searchType(SearchType.fromString(searchType));
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -19,8 +19,6 @@
|
|||
|
||||
package org.elasticsearch.action.search;
|
||||
|
||||
import org.elasticsearch.common.ParseFieldMatcher;
|
||||
|
||||
/**
|
||||
* Search type represent the manner at which the search operation is executed.
|
||||
*
|
||||
|
@ -91,7 +89,7 @@ public enum SearchType {
|
|||
* one of "dfs_query_then_fetch"/"dfsQueryThenFetch", "dfs_query_and_fetch"/"dfsQueryAndFetch",
|
||||
* "query_then_fetch"/"queryThenFetch" and "query_and_fetch"/"queryAndFetch".
|
||||
*/
|
||||
public static SearchType fromString(String searchType, ParseFieldMatcher parseFieldMatcher) {
|
||||
public static SearchType fromString(String searchType) {
|
||||
if (searchType == null) {
|
||||
return SearchType.DEFAULT;
|
||||
}
|
||||
|
|
|
@ -27,7 +27,6 @@ import org.elasticsearch.action.support.WriteRequest;
|
|||
import org.elasticsearch.action.support.replication.ReplicationRequest;
|
||||
import org.elasticsearch.action.support.single.instance.InstanceShardOperationRequest;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.ParseFieldMatcher;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.logging.DeprecationLogger;
|
||||
|
@ -714,7 +713,7 @@ public class UpdateRequest extends InstanceShardOperationRequest<UpdateRequest>
|
|||
if (token == XContentParser.Token.FIELD_NAME) {
|
||||
currentFieldName = parser.currentName();
|
||||
} else if ("script".equals(currentFieldName)) {
|
||||
script = Script.parse(parser, ParseFieldMatcher.EMPTY);
|
||||
script = Script.parse(parser);
|
||||
} else if ("scripted_upsert".equals(currentFieldName)) {
|
||||
scriptedUpsert = parser.booleanValue();
|
||||
} else if ("upsert".equals(currentFieldName)) {
|
||||
|
@ -740,7 +739,7 @@ public class UpdateRequest extends InstanceShardOperationRequest<UpdateRequest>
|
|||
fields(fields.toArray(new String[fields.size()]));
|
||||
}
|
||||
} else if ("_source".equals(currentFieldName)) {
|
||||
fetchSourceContext = FetchSourceContext.parse(parser);
|
||||
fetchSourceContext = FetchSourceContext.fromXContent(parser);
|
||||
}
|
||||
}
|
||||
if (script != null) {
|
||||
|
|
|
@ -38,6 +38,7 @@ import org.elasticsearch.common.inject.CreationException;
|
|||
import org.elasticsearch.common.logging.ESLoggerFactory;
|
||||
import org.elasticsearch.common.logging.LogConfigurator;
|
||||
import org.elasticsearch.common.logging.Loggers;
|
||||
import org.elasticsearch.common.network.IfConfig;
|
||||
import org.elasticsearch.common.settings.KeyStoreWrapper;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.transport.BoundTransportAddress;
|
||||
|
@ -206,6 +207,9 @@ final class Bootstrap {
|
|||
throw new BootstrapException(e);
|
||||
}
|
||||
|
||||
// Log ifconfig output before SecurityManager is installed
|
||||
IfConfig.logIfNecessary();
|
||||
|
||||
// install SM after natives, shutdown hooks, etc.
|
||||
try {
|
||||
Security.configure(environment, BootstrapSettings.SECURITY_FILTER_BAD_DEFAULTS_SETTING.get(settings));
|
||||
|
|
|
@ -27,6 +27,7 @@ import org.elasticsearch.action.ActionRequest;
|
|||
import org.elasticsearch.action.ActionRequestBuilder;
|
||||
import org.elasticsearch.action.ActionResponse;
|
||||
import org.elasticsearch.client.support.AbstractClient;
|
||||
import org.elasticsearch.cluster.ClusterModule;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.common.component.LifecycleComponent;
|
||||
import org.elasticsearch.common.inject.Injector;
|
||||
|
@ -140,6 +141,7 @@ public abstract class TransportClient extends AbstractClient {
|
|||
List<NamedWriteableRegistry.Entry> entries = new ArrayList<>();
|
||||
entries.addAll(NetworkModule.getNamedWriteables());
|
||||
entries.addAll(searchModule.getNamedWriteables());
|
||||
entries.addAll(ClusterModule.getNamedWriteables());
|
||||
entries.addAll(pluginsService.filterPlugins(Plugin.class).stream()
|
||||
.flatMap(p -> p.getNamedWriteables().stream())
|
||||
.collect(Collectors.toList()));
|
||||
|
|
|
@ -40,12 +40,7 @@ public abstract class AbstractDiffable<T extends Diffable<T>> implements Diffabl
|
|||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public Diff<T> readDiffFrom(StreamInput in) throws IOException {
|
||||
return new CompleteDiff<>(this, in);
|
||||
}
|
||||
|
||||
public static <T extends Diffable<T>> Diff<T> readDiffFrom(T reader, StreamInput in) throws IOException {
|
||||
public static <T extends Diffable<T>> Diff<T> readDiffFrom(Reader<T> reader, StreamInput in) throws IOException {
|
||||
return new CompleteDiff<T>(reader, in);
|
||||
}
|
||||
|
||||
|
@ -71,9 +66,9 @@ public abstract class AbstractDiffable<T extends Diffable<T>> implements Diffabl
|
|||
/**
|
||||
* Read simple diff from the stream
|
||||
*/
|
||||
public CompleteDiff(Diffable<T> reader, StreamInput in) throws IOException {
|
||||
public CompleteDiff(Reader<T> reader, StreamInput in) throws IOException {
|
||||
if (in.readBoolean()) {
|
||||
this.part = reader.readFrom(in);
|
||||
this.part = reader.read(in);
|
||||
} else {
|
||||
this.part = null;
|
||||
}
|
||||
|
|
|
@ -0,0 +1,133 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.cluster;
|
||||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.io.stream.NamedWriteable;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
/**
|
||||
* Abstract diffable object with simple diffs implementation that sends the entire object if object has changed or
|
||||
* nothing is object remained the same. Comparing to AbstractDiffable, this class also works with NamedWriteables
|
||||
*/
|
||||
public abstract class AbstractNamedDiffable<T extends NamedDiffable<T>> implements Diffable<T>, NamedWriteable {
|
||||
|
||||
@Override
|
||||
public Diff<T> diff(T previousState) {
|
||||
if (this.get().equals(previousState)) {
|
||||
return new CompleteNamedDiff<>(previousState.getWriteableName(), previousState.getMinimalSupportedVersion());
|
||||
} else {
|
||||
return new CompleteNamedDiff<>(get());
|
||||
}
|
||||
}
|
||||
|
||||
public static <T extends NamedDiffable<T>> NamedDiff<T> readDiffFrom(Class<? extends T> tClass, String name, StreamInput in)
|
||||
throws IOException {
|
||||
return new CompleteNamedDiff<>(tClass, name, in);
|
||||
}
|
||||
|
||||
private static class CompleteNamedDiff<T extends NamedDiffable<T>> implements NamedDiff<T> {
|
||||
|
||||
@Nullable
|
||||
private final T part;
|
||||
|
||||
private final String name;
|
||||
|
||||
/**
|
||||
* A non-null value is only required for write operation, if the diff was just read from the stream the version
|
||||
* is unnecessary.
|
||||
*/
|
||||
@Nullable
|
||||
private final Version minimalSupportedVersion;
|
||||
|
||||
/**
|
||||
* Creates simple diff with changes
|
||||
*/
|
||||
public CompleteNamedDiff(T part) {
|
||||
this.part = part;
|
||||
this.name = part.getWriteableName();
|
||||
this.minimalSupportedVersion = part.getMinimalSupportedVersion();
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates simple diff without changes
|
||||
*/
|
||||
public CompleteNamedDiff(String name, Version minimalSupportedVersion) {
|
||||
this.part = null;
|
||||
this.name = name;
|
||||
this.minimalSupportedVersion = minimalSupportedVersion;
|
||||
}
|
||||
|
||||
/**
|
||||
* Read simple diff from the stream
|
||||
*/
|
||||
public CompleteNamedDiff(Class<? extends T> tClass, String name, StreamInput in) throws IOException {
|
||||
if (in.readBoolean()) {
|
||||
this.part = in.readNamedWriteable(tClass, name);
|
||||
this.minimalSupportedVersion = part.getMinimalSupportedVersion();
|
||||
} else {
|
||||
this.part = null;
|
||||
this.minimalSupportedVersion = null; // We just read this diff, so it's not going to be written
|
||||
}
|
||||
this.name = name;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
assert minimalSupportedVersion != null : "shouldn't be called on diff that was de-serialized from the stream";
|
||||
if (part != null) {
|
||||
out.writeBoolean(true);
|
||||
part.writeTo(out);
|
||||
} else {
|
||||
out.writeBoolean(false);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public T apply(T part) {
|
||||
if (this.part != null) {
|
||||
return this.part;
|
||||
} else {
|
||||
return part;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getWriteableName() {
|
||||
return name;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Version getMinimalSupportedVersion() {
|
||||
assert minimalSupportedVersion != null : "shouldn't be called on the diff that was de-serialized from the stream";
|
||||
return minimalSupportedVersion;
|
||||
}
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
public T get() {
|
||||
return (T) this;
|
||||
}
|
||||
|
||||
}
|
|
@ -22,7 +22,9 @@ package org.elasticsearch.cluster;
|
|||
import org.elasticsearch.cluster.action.index.MappingUpdatedAction;
|
||||
import org.elasticsearch.cluster.action.index.NodeMappingRefreshAction;
|
||||
import org.elasticsearch.cluster.action.shard.ShardStateAction;
|
||||
import org.elasticsearch.cluster.metadata.IndexGraveyard;
|
||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.cluster.metadata.MetaData;
|
||||
import org.elasticsearch.cluster.metadata.MetaDataCreateIndexService;
|
||||
import org.elasticsearch.cluster.metadata.MetaDataDeleteIndexService;
|
||||
import org.elasticsearch.cluster.metadata.MetaDataIndexAliasesService;
|
||||
|
@ -30,6 +32,7 @@ import org.elasticsearch.cluster.metadata.MetaDataIndexStateService;
|
|||
import org.elasticsearch.cluster.metadata.MetaDataIndexTemplateService;
|
||||
import org.elasticsearch.cluster.metadata.MetaDataMappingService;
|
||||
import org.elasticsearch.cluster.metadata.MetaDataUpdateSettingsService;
|
||||
import org.elasticsearch.cluster.metadata.RepositoriesMetaData;
|
||||
import org.elasticsearch.cluster.routing.DelayedAllocationService;
|
||||
import org.elasticsearch.cluster.routing.RoutingService;
|
||||
import org.elasticsearch.cluster.routing.allocation.AllocationService;
|
||||
|
@ -52,15 +55,25 @@ import org.elasticsearch.cluster.routing.allocation.decider.ShardsLimitAllocatio
|
|||
import org.elasticsearch.cluster.routing.allocation.decider.SnapshotInProgressAllocationDecider;
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.ThrottlingAllocationDecider;
|
||||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.common.ParseField;
|
||||
import org.elasticsearch.common.inject.AbstractModule;
|
||||
import org.elasticsearch.common.io.stream.NamedWriteable;
|
||||
import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
|
||||
import org.elasticsearch.common.io.stream.NamedWriteableRegistry.Entry;
|
||||
import org.elasticsearch.common.io.stream.Writeable;
|
||||
import org.elasticsearch.common.io.stream.Writeable.Reader;
|
||||
import org.elasticsearch.common.settings.ClusterSettings;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Setting.Property;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.xcontent.NamedXContentRegistry;
|
||||
import org.elasticsearch.gateway.GatewayAllocator;
|
||||
import org.elasticsearch.ingest.IngestMetadata;
|
||||
import org.elasticsearch.plugins.ClusterPlugin;
|
||||
import org.elasticsearch.script.ScriptMetaData;
|
||||
import org.elasticsearch.tasks.TaskResultsService;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collection;
|
||||
import java.util.HashMap;
|
||||
import java.util.LinkedHashMap;
|
||||
|
@ -94,6 +107,52 @@ public class ClusterModule extends AbstractModule {
|
|||
indexNameExpressionResolver = new IndexNameExpressionResolver(settings);
|
||||
}
|
||||
|
||||
|
||||
public static List<Entry> getNamedWriteables() {
|
||||
List<Entry> entries = new ArrayList<>();
|
||||
// Cluster State
|
||||
registerClusterCustom(entries, SnapshotsInProgress.TYPE, SnapshotsInProgress::new, SnapshotsInProgress::readDiffFrom);
|
||||
registerClusterCustom(entries, RestoreInProgress.TYPE, RestoreInProgress::new, RestoreInProgress::readDiffFrom);
|
||||
registerClusterCustom(entries, SnapshotDeletionsInProgress.TYPE, SnapshotDeletionsInProgress::new,
|
||||
SnapshotDeletionsInProgress::readDiffFrom);
|
||||
// Metadata
|
||||
registerMetaDataCustom(entries, RepositoriesMetaData.TYPE, RepositoriesMetaData::new, RepositoriesMetaData::readDiffFrom);
|
||||
registerMetaDataCustom(entries, IngestMetadata.TYPE, IngestMetadata::new, IngestMetadata::readDiffFrom);
|
||||
registerMetaDataCustom(entries, ScriptMetaData.TYPE, ScriptMetaData::new, ScriptMetaData::readDiffFrom);
|
||||
registerMetaDataCustom(entries, IndexGraveyard.TYPE, IndexGraveyard::new, IndexGraveyard::readDiffFrom);
|
||||
return entries;
|
||||
}
|
||||
|
||||
public static List<NamedXContentRegistry.Entry> getNamedXWriteables() {
|
||||
List<NamedXContentRegistry.Entry> entries = new ArrayList<>();
|
||||
// Metadata
|
||||
entries.add(new NamedXContentRegistry.Entry(MetaData.Custom.class, new ParseField(RepositoriesMetaData.TYPE),
|
||||
RepositoriesMetaData::fromXContent));
|
||||
entries.add(new NamedXContentRegistry.Entry(MetaData.Custom.class, new ParseField(IngestMetadata.TYPE),
|
||||
IngestMetadata::fromXContent));
|
||||
entries.add(new NamedXContentRegistry.Entry(MetaData.Custom.class, new ParseField(ScriptMetaData.TYPE),
|
||||
ScriptMetaData::fromXContent));
|
||||
entries.add(new NamedXContentRegistry.Entry(MetaData.Custom.class, new ParseField(IndexGraveyard.TYPE),
|
||||
IndexGraveyard::fromXContent));
|
||||
return entries;
|
||||
}
|
||||
|
||||
private static <T extends ClusterState.Custom> void registerClusterCustom(List<Entry> entries, String name, Reader<? extends T> reader,
|
||||
Reader<NamedDiff> diffReader) {
|
||||
registerCustom(entries, ClusterState.Custom.class, name, reader, diffReader);
|
||||
}
|
||||
|
||||
private static <T extends MetaData.Custom> void registerMetaDataCustom(List<Entry> entries, String name, Reader<? extends T> reader,
|
||||
Reader<NamedDiff> diffReader) {
|
||||
registerCustom(entries, MetaData.Custom.class, name, reader, diffReader);
|
||||
}
|
||||
|
||||
private static <T extends NamedWriteable> void registerCustom(List<Entry> entries, Class<T> category, String name,
|
||||
Reader<? extends T> reader, Reader<NamedDiff> diffReader) {
|
||||
entries.add(new Entry(category, name, reader));
|
||||
entries.add(new Entry(NamedDiff.class, name, diffReader));
|
||||
}
|
||||
|
||||
public IndexNameExpressionResolver getIndexNameExpressionResolver() {
|
||||
return indexNameExpressionResolver;
|
||||
}
|
||||
|
|
|
@ -22,7 +22,7 @@ package org.elasticsearch.cluster;
|
|||
import com.carrotsearch.hppc.cursors.IntObjectCursor;
|
||||
import com.carrotsearch.hppc.cursors.ObjectCursor;
|
||||
import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
|
||||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.cluster.block.ClusterBlock;
|
||||
import org.elasticsearch.cluster.block.ClusterBlocks;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
|
@ -38,7 +38,6 @@ import org.elasticsearch.cluster.routing.RoutingNodes;
|
|||
import org.elasticsearch.cluster.routing.RoutingTable;
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.UUIDs;
|
||||
import org.elasticsearch.common.bytes.BytesArray;
|
||||
|
@ -46,6 +45,8 @@ import org.elasticsearch.common.bytes.BytesReference;
|
|||
import org.elasticsearch.common.collect.ImmutableOpenMap;
|
||||
import org.elasticsearch.common.compress.CompressedXContent;
|
||||
import org.elasticsearch.common.io.stream.BytesStreamOutput;
|
||||
import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput;
|
||||
import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
|
@ -87,36 +88,12 @@ import java.util.Set;
|
|||
*/
|
||||
public class ClusterState implements ToXContent, Diffable<ClusterState> {
|
||||
|
||||
public static final ClusterState PROTO = builder(ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).build();
|
||||
public static final ClusterState EMPTY_STATE = builder(ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).build();
|
||||
|
||||
public interface Custom extends Diffable<Custom>, ToXContent {
|
||||
|
||||
String type();
|
||||
public interface Custom extends NamedDiffable<Custom>, ToXContent {
|
||||
}
|
||||
|
||||
private static final Map<String, Custom> customPrototypes = new HashMap<>();
|
||||
|
||||
/**
|
||||
* Register a custom index meta data factory. Make sure to call it from a static block.
|
||||
*/
|
||||
public static void registerPrototype(String type, Custom proto) {
|
||||
customPrototypes.put(type, proto);
|
||||
}
|
||||
|
||||
static {
|
||||
// register non plugin custom parts
|
||||
registerPrototype(SnapshotsInProgress.TYPE, SnapshotsInProgress.PROTO);
|
||||
registerPrototype(RestoreInProgress.TYPE, RestoreInProgress.PROTO);
|
||||
}
|
||||
|
||||
public static <T extends Custom> T lookupPrototype(String type) {
|
||||
@SuppressWarnings("unchecked")
|
||||
T proto = (T) customPrototypes.get(type);
|
||||
if (proto == null) {
|
||||
throw new IllegalArgumentException("No custom state prototype registered for type [" + type + "], node likely missing plugins");
|
||||
}
|
||||
return proto;
|
||||
}
|
||||
private static final NamedDiffableValueSerializer<Custom> CUSTOM_VALUE_SERIALIZER = new NamedDiffableValueSerializer<>(Custom.class);
|
||||
|
||||
public static final String UNKNOWN_UUID = "_na_";
|
||||
|
||||
|
@ -659,53 +636,39 @@ public class ClusterState implements ToXContent, Diffable<ClusterState> {
|
|||
* @param data input bytes
|
||||
* @param localNode used to set the local node in the cluster state.
|
||||
*/
|
||||
public static ClusterState fromBytes(byte[] data, DiscoveryNode localNode) throws IOException {
|
||||
return readFrom(StreamInput.wrap(data), localNode);
|
||||
public static ClusterState fromBytes(byte[] data, DiscoveryNode localNode, NamedWriteableRegistry registry) throws IOException {
|
||||
StreamInput in = new NamedWriteableAwareStreamInput(StreamInput.wrap(data), registry);
|
||||
return readFrom(in, localNode);
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* @param in input stream
|
||||
* @param localNode used to set the local node in the cluster state. can be null.
|
||||
*/
|
||||
public static ClusterState readFrom(StreamInput in, @Nullable DiscoveryNode localNode) throws IOException {
|
||||
return PROTO.readFrom(in, localNode);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public Diff diff(ClusterState previousState) {
|
||||
public Diff<ClusterState> diff(ClusterState previousState) {
|
||||
return new ClusterStateDiff(previousState, this);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Diff<ClusterState> readDiffFrom(StreamInput in) throws IOException {
|
||||
return new ClusterStateDiff(in, this);
|
||||
public static Diff<ClusterState> readDiffFrom(StreamInput in, DiscoveryNode localNode) throws IOException {
|
||||
return new ClusterStateDiff(in, localNode);
|
||||
}
|
||||
|
||||
public ClusterState readFrom(StreamInput in, DiscoveryNode localNode) throws IOException {
|
||||
public static ClusterState readFrom(StreamInput in, DiscoveryNode localNode) throws IOException {
|
||||
ClusterName clusterName = new ClusterName(in);
|
||||
Builder builder = new Builder(clusterName);
|
||||
builder.version = in.readLong();
|
||||
builder.uuid = in.readString();
|
||||
builder.metaData = MetaData.Builder.readFrom(in);
|
||||
builder.routingTable = RoutingTable.Builder.readFrom(in);
|
||||
builder.nodes = DiscoveryNodes.Builder.readFrom(in, localNode);
|
||||
builder.blocks = ClusterBlocks.Builder.readClusterBlocks(in);
|
||||
builder.metaData = MetaData.readFrom(in);
|
||||
builder.routingTable = RoutingTable.readFrom(in);
|
||||
builder.nodes = DiscoveryNodes.readFrom(in, localNode);
|
||||
builder.blocks = new ClusterBlocks(in);
|
||||
int customSize = in.readVInt();
|
||||
for (int i = 0; i < customSize; i++) {
|
||||
String type = in.readString();
|
||||
Custom customIndexMetaData = lookupPrototype(type).readFrom(in);
|
||||
builder.putCustom(type, customIndexMetaData);
|
||||
Custom customIndexMetaData = in.readNamedWriteable(Custom.class);
|
||||
builder.putCustom(customIndexMetaData.getWriteableName(), customIndexMetaData);
|
||||
}
|
||||
return builder.build();
|
||||
}
|
||||
|
||||
@Override
|
||||
public ClusterState readFrom(StreamInput in) throws IOException {
|
||||
return readFrom(in, nodes.getLocalNode());
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
clusterName.writeTo(out);
|
||||
|
@ -715,10 +678,18 @@ public class ClusterState implements ToXContent, Diffable<ClusterState> {
|
|||
routingTable.writeTo(out);
|
||||
nodes.writeTo(out);
|
||||
blocks.writeTo(out);
|
||||
out.writeVInt(customs.size());
|
||||
for (ObjectObjectCursor<String, Custom> cursor : customs) {
|
||||
out.writeString(cursor.key);
|
||||
cursor.value.writeTo(out);
|
||||
// filter out custom states not supported by the other node
|
||||
int numberOfCustoms = 0;
|
||||
for (ObjectCursor<Custom> cursor : customs.values()) {
|
||||
if (out.getVersion().onOrAfter(cursor.value.getMinimalSupportedVersion())) {
|
||||
numberOfCustoms++;
|
||||
}
|
||||
}
|
||||
out.writeVInt(numberOfCustoms);
|
||||
for (ObjectCursor<Custom> cursor : customs.values()) {
|
||||
if (out.getVersion().onOrAfter(cursor.value.getMinimalSupportedVersion())) {
|
||||
out.writeNamedWriteable(cursor.value);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -751,30 +722,19 @@ public class ClusterState implements ToXContent, Diffable<ClusterState> {
|
|||
nodes = after.nodes.diff(before.nodes);
|
||||
metaData = after.metaData.diff(before.metaData);
|
||||
blocks = after.blocks.diff(before.blocks);
|
||||
customs = DiffableUtils.diff(before.customs, after.customs, DiffableUtils.getStringKeySerializer());
|
||||
customs = DiffableUtils.diff(before.customs, after.customs, DiffableUtils.getStringKeySerializer(), CUSTOM_VALUE_SERIALIZER);
|
||||
}
|
||||
|
||||
public ClusterStateDiff(StreamInput in, ClusterState proto) throws IOException {
|
||||
public ClusterStateDiff(StreamInput in, DiscoveryNode localNode) throws IOException {
|
||||
clusterName = new ClusterName(in);
|
||||
fromUuid = in.readString();
|
||||
toUuid = in.readString();
|
||||
toVersion = in.readLong();
|
||||
routingTable = proto.routingTable.readDiffFrom(in);
|
||||
nodes = proto.nodes.readDiffFrom(in);
|
||||
metaData = proto.metaData.readDiffFrom(in);
|
||||
blocks = proto.blocks.readDiffFrom(in);
|
||||
customs = DiffableUtils.readImmutableOpenMapDiff(in, DiffableUtils.getStringKeySerializer(),
|
||||
new DiffableUtils.DiffableValueSerializer<String, Custom>() {
|
||||
@Override
|
||||
public Custom read(StreamInput in, String key) throws IOException {
|
||||
return lookupPrototype(key).readFrom(in);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Diff<Custom> readDiff(StreamInput in, String key) throws IOException {
|
||||
return lookupPrototype(key).readDiffFrom(in);
|
||||
}
|
||||
});
|
||||
routingTable = RoutingTable.readDiffFrom(in);
|
||||
nodes = DiscoveryNodes.readDiffFrom(in, localNode);
|
||||
metaData = MetaData.readDiffFrom(in);
|
||||
blocks = ClusterBlocks.readDiffFrom(in);
|
||||
customs = DiffableUtils.readImmutableOpenMapDiff(in, DiffableUtils.getStringKeySerializer(), CUSTOM_VALUE_SERIALIZER);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -18,6 +18,8 @@
|
|||
*/
|
||||
package org.elasticsearch.cluster;
|
||||
|
||||
import org.elasticsearch.common.Nullable;
|
||||
|
||||
import java.util.IdentityHashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
@ -27,10 +29,10 @@ public interface ClusterStateTaskExecutor<T> {
|
|||
* Update the cluster state based on the current state and the given tasks. Return the *same instance* if no state
|
||||
* should be changed.
|
||||
*/
|
||||
BatchResult<T> execute(ClusterState currentState, List<T> tasks) throws Exception;
|
||||
ClusterTasksResult<T> execute(ClusterState currentState, List<T> tasks) throws Exception;
|
||||
|
||||
/**
|
||||
* indicates whether this task should only run if current node is master
|
||||
* indicates whether this executor should only run if the current node is master
|
||||
*/
|
||||
default boolean runOnlyOnMaster() {
|
||||
return true;
|
||||
|
@ -68,18 +70,22 @@ public interface ClusterStateTaskExecutor<T> {
|
|||
* Represents the result of a batched execution of cluster state update tasks
|
||||
* @param <T> the type of the cluster state update task
|
||||
*/
|
||||
class BatchResult<T> {
|
||||
class ClusterTasksResult<T> {
|
||||
public final boolean noMaster;
|
||||
@Nullable
|
||||
public final ClusterState resultingState;
|
||||
public final Map<T, TaskResult> executionResults;
|
||||
|
||||
/**
|
||||
* Construct an execution result instance with a correspondence between the tasks and their execution result
|
||||
* @param noMaster whether this node steps down as master or has lost connection to the master
|
||||
* @param resultingState the resulting cluster state
|
||||
* @param executionResults the correspondence between tasks and their outcome
|
||||
*/
|
||||
BatchResult(ClusterState resultingState, Map<T, TaskResult> executionResults) {
|
||||
ClusterTasksResult(boolean noMaster, ClusterState resultingState, Map<T, TaskResult> executionResults) {
|
||||
this.resultingState = resultingState;
|
||||
this.executionResults = executionResults;
|
||||
this.noMaster = noMaster;
|
||||
}
|
||||
|
||||
public static <T> Builder<T> builder() {
|
||||
|
@ -117,8 +123,13 @@ public interface ClusterStateTaskExecutor<T> {
|
|||
return this;
|
||||
}
|
||||
|
||||
public BatchResult<T> build(ClusterState resultingState) {
|
||||
return new BatchResult<>(resultingState, executionResults);
|
||||
public ClusterTasksResult<T> build(ClusterState resultingState) {
|
||||
return new ClusterTasksResult<>(false, resultingState, executionResults);
|
||||
}
|
||||
|
||||
ClusterTasksResult<T> build(ClusterTasksResult<T> result, ClusterState previousState) {
|
||||
return new ClusterTasksResult<>(result.noMaster, result.resultingState == null ? previousState : result.resultingState,
|
||||
executionResults);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -41,9 +41,9 @@ public abstract class ClusterStateUpdateTask implements ClusterStateTaskConfig,
|
|||
}
|
||||
|
||||
@Override
|
||||
public final BatchResult<ClusterStateUpdateTask> execute(ClusterState currentState, List<ClusterStateUpdateTask> tasks) throws Exception {
|
||||
public final ClusterTasksResult<ClusterStateUpdateTask> execute(ClusterState currentState, List<ClusterStateUpdateTask> tasks) throws Exception {
|
||||
ClusterState result = execute(currentState);
|
||||
return BatchResult.<ClusterStateUpdateTask>builder().successes(tasks).build(result);
|
||||
return ClusterTasksResult.<ClusterStateUpdateTask>builder().successes(tasks).build(result);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -75,4 +75,13 @@ public abstract class ClusterStateUpdateTask implements ClusterStateTaskConfig,
|
|||
public Priority priority() {
|
||||
return priority;
|
||||
}
|
||||
|
||||
/**
|
||||
* Marked as final as cluster state update tasks should only run on master.
|
||||
* For local requests, use {@link LocalClusterUpdateTask} instead.
|
||||
*/
|
||||
@Override
|
||||
public final boolean runOnlyOnMaster() {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -34,13 +34,4 @@ public interface Diffable<T> extends Writeable {
|
|||
*/
|
||||
Diff<T> diff(T previousState);
|
||||
|
||||
/**
|
||||
* Reads the {@link org.elasticsearch.cluster.Diff} from StreamInput
|
||||
*/
|
||||
Diff<T> readDiffFrom(StreamInput in) throws IOException;
|
||||
|
||||
/**
|
||||
* Reads an object of this type from the provided {@linkplain StreamInput}. The receiving instance remains unchanged.
|
||||
*/
|
||||
T readFrom(StreamInput in) throws IOException;
|
||||
}
|
||||
|
|
|
@ -23,10 +23,12 @@ import com.carrotsearch.hppc.cursors.IntCursor;
|
|||
import com.carrotsearch.hppc.cursors.IntObjectCursor;
|
||||
import com.carrotsearch.hppc.cursors.ObjectCursor;
|
||||
import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.common.collect.ImmutableOpenIntMap;
|
||||
import org.elasticsearch.common.collect.ImmutableOpenMap;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.io.stream.Writeable.Reader;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
|
@ -74,7 +76,7 @@ public final class DiffableUtils {
|
|||
/**
|
||||
* Calculates diff between two ImmutableOpenMaps of non-diffable objects
|
||||
*/
|
||||
public static <K, T> MapDiff<K, T, ImmutableOpenMap<K, T>> diff(ImmutableOpenMap<K, T> before, ImmutableOpenMap<K, T> after, KeySerializer<K> keySerializer, NonDiffableValueSerializer<K, T> valueSerializer) {
|
||||
public static <K, T> MapDiff<K, T, ImmutableOpenMap<K, T>> diff(ImmutableOpenMap<K, T> before, ImmutableOpenMap<K, T> after, KeySerializer<K> keySerializer, ValueSerializer<K, T> valueSerializer) {
|
||||
assert after != null && before != null;
|
||||
return new ImmutableOpenMapDiff<>(before, after, keySerializer, valueSerializer);
|
||||
}
|
||||
|
@ -90,7 +92,7 @@ public final class DiffableUtils {
|
|||
/**
|
||||
* Calculates diff between two ImmutableOpenIntMaps of non-diffable objects
|
||||
*/
|
||||
public static <T> MapDiff<Integer, T, ImmutableOpenIntMap<T>> diff(ImmutableOpenIntMap<T> before, ImmutableOpenIntMap<T> after, KeySerializer<Integer> keySerializer, NonDiffableValueSerializer<Integer, T> valueSerializer) {
|
||||
public static <T> MapDiff<Integer, T, ImmutableOpenIntMap<T>> diff(ImmutableOpenIntMap<T> before, ImmutableOpenIntMap<T> after, KeySerializer<Integer> keySerializer, ValueSerializer<Integer, T> valueSerializer) {
|
||||
assert after != null && before != null;
|
||||
return new ImmutableOpenIntMapDiff<>(before, after, keySerializer, valueSerializer);
|
||||
}
|
||||
|
@ -106,7 +108,7 @@ public final class DiffableUtils {
|
|||
/**
|
||||
* Calculates diff between two Maps of non-diffable objects
|
||||
*/
|
||||
public static <K, T> MapDiff<K, T, Map<K, T>> diff(Map<K, T> before, Map<K, T> after, KeySerializer<K> keySerializer, NonDiffableValueSerializer<K, T> valueSerializer) {
|
||||
public static <K, T> MapDiff<K, T, Map<K, T>> diff(Map<K, T> before, Map<K, T> after, KeySerializer<K> keySerializer, ValueSerializer<K, T> valueSerializer) {
|
||||
assert after != null && before != null;
|
||||
return new JdkMapDiff<>(before, after, keySerializer, valueSerializer);
|
||||
}
|
||||
|
@ -135,22 +137,22 @@ public final class DiffableUtils {
|
|||
/**
|
||||
* Loads an object that represents difference between two ImmutableOpenMaps of Diffable objects using Diffable proto object
|
||||
*/
|
||||
public static <K, T extends Diffable<T>> MapDiff<K, T, ImmutableOpenMap<K, T>> readImmutableOpenMapDiff(StreamInput in, KeySerializer<K> keySerializer, T proto) throws IOException {
|
||||
return new ImmutableOpenMapDiff<>(in, keySerializer, new DiffablePrototypeValueReader<>(proto));
|
||||
public static <K, T extends Diffable<T>> MapDiff<K, T, ImmutableOpenMap<K, T>> readImmutableOpenMapDiff(StreamInput in, KeySerializer<K> keySerializer, Reader<T> reader, Reader<Diff<T>> diffReader) throws IOException {
|
||||
return new ImmutableOpenMapDiff<>(in, keySerializer, new DiffableValueReader<>(reader, diffReader));
|
||||
}
|
||||
|
||||
/**
|
||||
* Loads an object that represents difference between two ImmutableOpenIntMaps of Diffable objects using Diffable proto object
|
||||
*/
|
||||
public static <T extends Diffable<T>> MapDiff<Integer, T, ImmutableOpenIntMap<T>> readImmutableOpenIntMapDiff(StreamInput in, KeySerializer<Integer> keySerializer, T proto) throws IOException {
|
||||
return new ImmutableOpenIntMapDiff<>(in, keySerializer, new DiffablePrototypeValueReader<>(proto));
|
||||
public static <T extends Diffable<T>> MapDiff<Integer, T, ImmutableOpenIntMap<T>> readImmutableOpenIntMapDiff(StreamInput in, KeySerializer<Integer> keySerializer, Reader<T> reader, Reader<Diff<T>> diffReader) throws IOException {
|
||||
return new ImmutableOpenIntMapDiff<>(in, keySerializer, new DiffableValueReader<>(reader, diffReader));
|
||||
}
|
||||
|
||||
/**
|
||||
* Loads an object that represents difference between two Maps of Diffable objects using Diffable proto object
|
||||
*/
|
||||
public static <K, T extends Diffable<T>> MapDiff<K, T, Map<K, T>> readJdkMapDiff(StreamInput in, KeySerializer<K> keySerializer, T proto) throws IOException {
|
||||
return new JdkMapDiff<>(in, keySerializer, new DiffablePrototypeValueReader<>(proto));
|
||||
public static <K, T extends Diffable<T>> MapDiff<K, T, Map<K, T>> readJdkMapDiff(StreamInput in, KeySerializer<K> keySerializer, Reader<T> reader, Reader<Diff<T>> diffReader) throws IOException {
|
||||
return new JdkMapDiff<>(in, keySerializer, new DiffableValueReader<>(reader, diffReader));
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -214,12 +216,17 @@ public final class DiffableUtils {
|
|||
*
|
||||
* @param <T> the object type
|
||||
*/
|
||||
private static class ImmutableOpenMapDiff<K, T> extends MapDiff<K, T, ImmutableOpenMap<K, T>> {
|
||||
public static class ImmutableOpenMapDiff<K, T> extends MapDiff<K, T, ImmutableOpenMap<K, T>> {
|
||||
|
||||
protected ImmutableOpenMapDiff(StreamInput in, KeySerializer<K> keySerializer, ValueSerializer<K, T> valueSerializer) throws IOException {
|
||||
super(in, keySerializer, valueSerializer);
|
||||
}
|
||||
|
||||
private ImmutableOpenMapDiff(KeySerializer<K> keySerializer, ValueSerializer<K, T> valueSerializer,
|
||||
List<K> deletes, Map<K, Diff<T>> diffs, Map<K, T> upserts) {
|
||||
super(keySerializer, valueSerializer, deletes, diffs, upserts);
|
||||
}
|
||||
|
||||
public ImmutableOpenMapDiff(ImmutableOpenMap<K, T> before, ImmutableOpenMap<K, T> after,
|
||||
KeySerializer<K> keySerializer, ValueSerializer<K, T> valueSerializer) {
|
||||
super(keySerializer, valueSerializer);
|
||||
|
@ -245,6 +252,21 @@ public final class DiffableUtils {
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns a new diff map with the given key removed, does not modify the invoking instance.
|
||||
* If the key does not exist in the diff map, the same instance is returned.
|
||||
*/
|
||||
public ImmutableOpenMapDiff<K, T> withKeyRemoved(K key) {
|
||||
if (this.diffs.containsKey(key) == false && this.upserts.containsKey(key) == false) {
|
||||
return this;
|
||||
}
|
||||
Map<K, Diff<T>> newDiffs = new HashMap<>(this.diffs);
|
||||
newDiffs.remove(key);
|
||||
Map<K, T> newUpserts = new HashMap<>(this.upserts);
|
||||
newUpserts.remove(key);
|
||||
return new ImmutableOpenMapDiff<>(this.keySerializer, this.valueSerializer, this.deletes, newDiffs, newUpserts);
|
||||
}
|
||||
|
||||
@Override
|
||||
public ImmutableOpenMap<K, T> apply(ImmutableOpenMap<K, T> map) {
|
||||
ImmutableOpenMap.Builder<K, T> builder = ImmutableOpenMap.builder();
|
||||
|
@ -346,6 +368,15 @@ public final class DiffableUtils {
|
|||
upserts = new HashMap<>();
|
||||
}
|
||||
|
||||
protected MapDiff(KeySerializer<K> keySerializer, ValueSerializer<K, T> valueSerializer,
|
||||
List<K> deletes, Map<K, Diff<T>> diffs, Map<K, T> upserts) {
|
||||
this.keySerializer = keySerializer;
|
||||
this.valueSerializer = valueSerializer;
|
||||
this.deletes = deletes;
|
||||
this.diffs = diffs;
|
||||
this.upserts = upserts;
|
||||
}
|
||||
|
||||
protected MapDiff(StreamInput in, KeySerializer<K> keySerializer, ValueSerializer<K, T> valueSerializer) throws IOException {
|
||||
this.keySerializer = keySerializer;
|
||||
this.valueSerializer = valueSerializer;
|
||||
|
@ -406,12 +437,29 @@ public final class DiffableUtils {
|
|||
for (K delete : deletes) {
|
||||
keySerializer.writeKey(delete, out);
|
||||
}
|
||||
out.writeVInt(diffs.size());
|
||||
Version version = out.getVersion();
|
||||
// filter out custom states not supported by the other node
|
||||
int diffCount = 0;
|
||||
for (Diff<T> diff : diffs.values()) {
|
||||
if(valueSerializer.supportsVersion(diff, version)) {
|
||||
diffCount++;
|
||||
}
|
||||
}
|
||||
out.writeVInt(diffCount);
|
||||
for (Map.Entry<K, Diff<T>> entry : diffs.entrySet()) {
|
||||
if(valueSerializer.supportsVersion(entry.getValue(), version)) {
|
||||
keySerializer.writeKey(entry.getKey(), out);
|
||||
valueSerializer.writeDiff(entry.getValue(), out);
|
||||
}
|
||||
out.writeVInt(upserts.size());
|
||||
}
|
||||
// filter out custom states not supported by the other node
|
||||
int upsertsCount = 0;
|
||||
for (T upsert : upserts.values()) {
|
||||
if(valueSerializer.supportsVersion(upsert, version)) {
|
||||
upsertsCount++;
|
||||
}
|
||||
}
|
||||
out.writeVInt(upsertsCount);
|
||||
for (Map.Entry<K, T> entry : upserts.entrySet()) {
|
||||
keySerializer.writeKey(entry.getKey(), out);
|
||||
valueSerializer.write(entry.getValue(), out);
|
||||
|
@ -511,6 +559,20 @@ public final class DiffableUtils {
|
|||
*/
|
||||
boolean supportsDiffableValues();
|
||||
|
||||
/**
|
||||
* Whether this serializer supports the version of the output stream
|
||||
*/
|
||||
default boolean supportsVersion(Diff<V> value, Version version) {
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* Whether this serializer supports the version of the output stream
|
||||
*/
|
||||
default boolean supportsVersion(V value, Version version) {
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* Computes diff if this serializer supports diffable values
|
||||
*/
|
||||
|
@ -600,25 +662,27 @@ public final class DiffableUtils {
|
|||
}
|
||||
|
||||
/**
|
||||
* Implementation of the ValueSerializer that uses a prototype object for reading operations
|
||||
* Implementation of the ValueSerializer that wraps value and diff readers.
|
||||
*
|
||||
* Note: this implementation is ignoring the key.
|
||||
*/
|
||||
public static class DiffablePrototypeValueReader<K, V extends Diffable<V>> extends DiffableValueSerializer<K, V> {
|
||||
private final V proto;
|
||||
public static class DiffableValueReader<K, V extends Diffable<V>> extends DiffableValueSerializer<K, V> {
|
||||
private final Reader<V> reader;
|
||||
private final Reader<Diff<V>> diffReader;
|
||||
|
||||
public DiffablePrototypeValueReader(V proto) {
|
||||
this.proto = proto;
|
||||
public DiffableValueReader(Reader<V> reader, Reader<Diff<V>> diffReader) {
|
||||
this.reader = reader;
|
||||
this.diffReader = diffReader;
|
||||
}
|
||||
|
||||
@Override
|
||||
public V read(StreamInput in, K key) throws IOException {
|
||||
return proto.readFrom(in);
|
||||
return reader.read(in);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Diff<V> readDiff(StreamInput in, K key) throws IOException {
|
||||
return proto.readDiffFrom(in);
|
||||
return diffReader.read(in);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -0,0 +1,93 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.cluster;
|
||||
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.Priority;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
* Used to apply state updates on nodes that are not necessarily master
|
||||
*/
|
||||
public abstract class LocalClusterUpdateTask implements ClusterStateTaskConfig, ClusterStateTaskExecutor<LocalClusterUpdateTask>,
|
||||
ClusterStateTaskListener {
|
||||
|
||||
private final Priority priority;
|
||||
|
||||
public LocalClusterUpdateTask() {
|
||||
this(Priority.NORMAL);
|
||||
}
|
||||
|
||||
public LocalClusterUpdateTask(Priority priority) {
|
||||
this.priority = priority;
|
||||
}
|
||||
|
||||
public abstract ClusterTasksResult<LocalClusterUpdateTask> execute(ClusterState currentState) throws Exception;
|
||||
|
||||
@Override
|
||||
public final ClusterTasksResult<LocalClusterUpdateTask> execute(ClusterState currentState,
|
||||
List<LocalClusterUpdateTask> tasks) throws Exception {
|
||||
assert tasks.size() == 1 && tasks.get(0) == this : "expected one-element task list containing current object but was " + tasks;
|
||||
ClusterTasksResult<LocalClusterUpdateTask> result = execute(currentState);
|
||||
return ClusterTasksResult.<LocalClusterUpdateTask>builder().successes(tasks).build(result, currentState);
|
||||
}
|
||||
|
||||
/**
|
||||
* node stepped down as master or has lost connection to the master
|
||||
*/
|
||||
public static ClusterTasksResult<LocalClusterUpdateTask> noMaster() {
|
||||
return new ClusterTasksResult(true, null, null);
|
||||
}
|
||||
|
||||
/**
|
||||
* no changes were made to the cluster state. Useful to execute a runnable on the cluster state applier thread
|
||||
*/
|
||||
public static ClusterTasksResult<LocalClusterUpdateTask> unchanged() {
|
||||
return new ClusterTasksResult(false, null, null);
|
||||
}
|
||||
|
||||
/**
|
||||
* locally apply cluster state received from a master
|
||||
*/
|
||||
public static ClusterTasksResult<LocalClusterUpdateTask> newState(ClusterState clusterState) {
|
||||
return new ClusterTasksResult(false, clusterState, null);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String describeTasks(List<LocalClusterUpdateTask> tasks) {
|
||||
return ""; // one of task, source is enough
|
||||
}
|
||||
|
||||
@Nullable
|
||||
public TimeValue timeout() {
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Priority priority() {
|
||||
return priority;
|
||||
}
|
||||
|
||||
@Override
|
||||
public final boolean runOnlyOnMaster() {
|
||||
return false;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,36 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.cluster;
|
||||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.common.io.stream.NamedWriteable;
|
||||
|
||||
/**
|
||||
* Diff that also support NamedWriteable interface
|
||||
*/
|
||||
public interface NamedDiff<T extends Diffable<T>> extends Diff<T>, NamedWriteable {
|
||||
/**
|
||||
* The minimal version of the recipient this custom object can be sent to
|
||||
*/
|
||||
default Version getMinimalSupportedVersion() {
|
||||
return Version.CURRENT.minimumCompatibilityVersion();
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,35 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.cluster;
|
||||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.common.io.stream.NamedWriteable;
|
||||
|
||||
/**
|
||||
* Diff that also support NamedWriteable interface
|
||||
*/
|
||||
public interface NamedDiffable<T> extends Diffable<T>, NamedWriteable {
|
||||
/**
|
||||
* The minimal version of the recipient this custom object can be sent to
|
||||
*/
|
||||
default Version getMinimalSupportedVersion() {
|
||||
return Version.CURRENT.minimumCompatibilityVersion();
|
||||
}
|
||||
}
|
|
@ -0,0 +1,58 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.cluster;
|
||||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
/**
|
||||
* Value Serializer for named diffables
|
||||
*/
|
||||
public class NamedDiffableValueSerializer<T extends NamedDiffable<T>> extends DiffableUtils.DiffableValueSerializer<String, T> {
|
||||
|
||||
private final Class<T> tClass;
|
||||
|
||||
public NamedDiffableValueSerializer(Class<T> tClass) {
|
||||
this.tClass = tClass;
|
||||
}
|
||||
|
||||
@Override
|
||||
public T read(StreamInput in, String key) throws IOException {
|
||||
return in.readNamedWriteable(tClass, key);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean supportsVersion(Diff<T> value, Version version) {
|
||||
return version.onOrAfter(((NamedDiff<?>)value).getMinimalSupportedVersion());
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean supportsVersion(T value, Version version) {
|
||||
return version.onOrAfter(value.getMinimalSupportedVersion());
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
@Override
|
||||
public Diff<T> readDiff(StreamInput in, String key) throws IOException {
|
||||
return in.readNamedWriteable(NamedDiff.class, key);
|
||||
}
|
||||
}
|
|
@ -39,12 +39,10 @@ import java.util.Objects;
|
|||
/**
|
||||
* Meta data about restore processes that are currently executing
|
||||
*/
|
||||
public class RestoreInProgress extends AbstractDiffable<Custom> implements Custom {
|
||||
public class RestoreInProgress extends AbstractNamedDiffable<Custom> implements Custom {
|
||||
|
||||
public static final String TYPE = "restore";
|
||||
|
||||
public static final RestoreInProgress PROTO = new RestoreInProgress();
|
||||
|
||||
private final List<Entry> entries;
|
||||
|
||||
/**
|
||||
|
@ -377,15 +375,15 @@ public class RestoreInProgress extends AbstractDiffable<Custom> implements Custo
|
|||
* {@inheritDoc}
|
||||
*/
|
||||
@Override
|
||||
public String type() {
|
||||
public String getWriteableName() {
|
||||
return TYPE;
|
||||
}
|
||||
|
||||
/**
|
||||
* {@inheritDoc}
|
||||
*/
|
||||
@Override
|
||||
public RestoreInProgress readFrom(StreamInput in) throws IOException {
|
||||
public static NamedDiff<Custom> readDiffFrom(StreamInput in) throws IOException {
|
||||
return readDiffFrom(Custom.class, TYPE, in);
|
||||
}
|
||||
|
||||
public RestoreInProgress(StreamInput in) throws IOException {
|
||||
Entry[] entries = new Entry[in.readVInt()];
|
||||
for (int i = 0; i < entries.length; i++) {
|
||||
Snapshot snapshot = new Snapshot(in);
|
||||
|
@ -404,7 +402,7 @@ public class RestoreInProgress extends AbstractDiffable<Custom> implements Custo
|
|||
}
|
||||
entries[i] = new Entry(snapshot, state, Collections.unmodifiableList(indexBuilder), builder.build());
|
||||
}
|
||||
return new RestoreInProgress(entries);
|
||||
this.entries = Arrays.asList(entries);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -0,0 +1,220 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.cluster;
|
||||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.cluster.ClusterState.Custom;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.io.stream.Writeable;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.snapshots.Snapshot;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.Objects;
|
||||
|
||||
/**
|
||||
* A class that represents the snapshot deletions that are in progress in the cluster.
|
||||
*/
|
||||
public class SnapshotDeletionsInProgress extends AbstractNamedDiffable<Custom> implements Custom {
|
||||
|
||||
public static final String TYPE = "snapshot_deletions";
|
||||
// the version where SnapshotDeletionsInProgress was introduced
|
||||
public static final Version VERSION_INTRODUCED = Version.V_5_2_0_UNRELEASED;
|
||||
|
||||
// the list of snapshot deletion request entries
|
||||
private final List<Entry> entries;
|
||||
|
||||
private SnapshotDeletionsInProgress(List<Entry> entries) {
|
||||
this.entries = Collections.unmodifiableList(entries);
|
||||
}
|
||||
|
||||
public SnapshotDeletionsInProgress(StreamInput in) throws IOException {
|
||||
this.entries = Collections.unmodifiableList(in.readList(Entry::new));
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns a new instance of {@link SnapshotDeletionsInProgress} with the given
|
||||
* {@link Entry} added.
|
||||
*/
|
||||
public static SnapshotDeletionsInProgress newInstance(Entry entry) {
|
||||
return new SnapshotDeletionsInProgress(Collections.singletonList(entry));
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns a new instance of {@link SnapshotDeletionsInProgress} which adds
|
||||
* the given {@link Entry} to the invoking instance.
|
||||
*/
|
||||
public SnapshotDeletionsInProgress withAddedEntry(Entry entry) {
|
||||
List<Entry> entries = new ArrayList<>(getEntries());
|
||||
entries.add(entry);
|
||||
return new SnapshotDeletionsInProgress(entries);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns a new instance of {@link SnapshotDeletionsInProgress} which removes
|
||||
* the given entry from the invoking instance.
|
||||
*/
|
||||
public SnapshotDeletionsInProgress withRemovedEntry(Entry entry) {
|
||||
List<Entry> entries = new ArrayList<>(getEntries());
|
||||
entries.remove(entry);
|
||||
return new SnapshotDeletionsInProgress(entries);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns an unmodifiable list of snapshot deletion entries.
|
||||
*/
|
||||
public List<Entry> getEntries() {
|
||||
return entries;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns {@code true} if there are snapshot deletions in progress in the cluster,
|
||||
* returns {@code false} otherwise.
|
||||
*/
|
||||
public boolean hasDeletionsInProgress() {
|
||||
return entries.isEmpty() == false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getWriteableName() {
|
||||
return TYPE;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
if (this == o) {
|
||||
return true;
|
||||
}
|
||||
if (o == null || getClass() != o.getClass()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
SnapshotDeletionsInProgress that = (SnapshotDeletionsInProgress) o;
|
||||
return entries.equals(that.entries);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return 31 + entries.hashCode();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
out.writeList(entries);
|
||||
}
|
||||
|
||||
public static NamedDiff<Custom> readDiffFrom(StreamInput in) throws IOException {
|
||||
return readDiffFrom(Custom.class, TYPE, in);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Version getMinimalSupportedVersion() {
|
||||
return VERSION_INTRODUCED;
|
||||
}
|
||||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.startArray(TYPE);
|
||||
for (Entry entry : entries) {
|
||||
builder.startObject();
|
||||
{
|
||||
builder.field("repository", entry.snapshot.getRepository());
|
||||
builder.field("snapshot", entry.snapshot.getSnapshotId().getName());
|
||||
builder.timeValueField("start_time_millis", "start_time", entry.startTime);
|
||||
builder.field("repository_state_id", entry.repositoryStateId);
|
||||
}
|
||||
builder.endObject();
|
||||
}
|
||||
builder.endArray();
|
||||
return builder;
|
||||
}
|
||||
|
||||
/**
|
||||
* A class representing a snapshot deletion request entry in the cluster state.
|
||||
*/
|
||||
public static final class Entry implements Writeable {
|
||||
private final Snapshot snapshot;
|
||||
private final long startTime;
|
||||
private final long repositoryStateId;
|
||||
|
||||
public Entry(Snapshot snapshot, long startTime, long repositoryStateId) {
|
||||
this.snapshot = snapshot;
|
||||
this.startTime = startTime;
|
||||
this.repositoryStateId = repositoryStateId;
|
||||
}
|
||||
|
||||
public Entry(StreamInput in) throws IOException {
|
||||
this.snapshot = new Snapshot(in);
|
||||
this.startTime = in.readVLong();
|
||||
this.repositoryStateId = in.readLong();
|
||||
}
|
||||
|
||||
/**
|
||||
* The snapshot to delete.
|
||||
*/
|
||||
public Snapshot getSnapshot() {
|
||||
return snapshot;
|
||||
}
|
||||
|
||||
/**
|
||||
* The start time in milliseconds for deleting the snapshots.
|
||||
*/
|
||||
public long getStartTime() {
|
||||
return startTime;
|
||||
}
|
||||
|
||||
/**
|
||||
* The repository state id at the time the snapshot deletion began.
|
||||
*/
|
||||
public long getRepositoryStateId() {
|
||||
return repositoryStateId;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
if (this == o) {
|
||||
return true;
|
||||
}
|
||||
if (o == null || getClass() != o.getClass()) {
|
||||
return false;
|
||||
}
|
||||
Entry that = (Entry) o;
|
||||
return snapshot.equals(that.snapshot)
|
||||
&& startTime == that.startTime
|
||||
&& repositoryStateId == that.repositoryStateId;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(snapshot, startTime, repositoryStateId);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
snapshot.writeTo(out);
|
||||
out.writeVLong(startTime);
|
||||
out.writeLong(repositoryStateId);
|
||||
}
|
||||
}
|
||||
}
|
|
@ -22,6 +22,7 @@ package org.elasticsearch.cluster;
|
|||
import com.carrotsearch.hppc.ObjectContainer;
|
||||
import com.carrotsearch.hppc.cursors.ObjectCursor;
|
||||
import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.cluster.ClusterState.Custom;
|
||||
import org.elasticsearch.common.collect.ImmutableOpenMap;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
|
@ -43,10 +44,14 @@ import java.util.Map;
|
|||
/**
|
||||
* Meta data about snapshots that are currently executing
|
||||
*/
|
||||
public class SnapshotsInProgress extends AbstractDiffable<Custom> implements Custom {
|
||||
public class SnapshotsInProgress extends AbstractNamedDiffable<Custom> implements Custom {
|
||||
public static final String TYPE = "snapshots";
|
||||
|
||||
public static final SnapshotsInProgress PROTO = new SnapshotsInProgress();
|
||||
// denotes an undefined repository state id, which will happen when receiving a cluster state with
|
||||
// a snapshot in progress from a pre 5.2.x node
|
||||
public static final long UNDEFINED_REPOSITORY_STATE_ID = -2L;
|
||||
// the version where repository state ids were introduced
|
||||
private static final Version REPOSITORY_ID_INTRODUCED_VERSION = Version.V_5_2_0_UNRELEASED;
|
||||
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
|
@ -74,9 +79,10 @@ public class SnapshotsInProgress extends AbstractDiffable<Custom> implements Cus
|
|||
private final List<IndexId> indices;
|
||||
private final ImmutableOpenMap<String, List<ShardId>> waitingIndices;
|
||||
private final long startTime;
|
||||
private final long repositoryStateId;
|
||||
|
||||
public Entry(Snapshot snapshot, boolean includeGlobalState, boolean partial, State state, List<IndexId> indices,
|
||||
long startTime, ImmutableOpenMap<ShardId, ShardSnapshotStatus> shards) {
|
||||
long startTime, long repositoryStateId, ImmutableOpenMap<ShardId, ShardSnapshotStatus> shards) {
|
||||
this.state = state;
|
||||
this.snapshot = snapshot;
|
||||
this.includeGlobalState = includeGlobalState;
|
||||
|
@ -90,10 +96,12 @@ public class SnapshotsInProgress extends AbstractDiffable<Custom> implements Cus
|
|||
this.shards = shards;
|
||||
this.waitingIndices = findWaitingIndices(shards);
|
||||
}
|
||||
this.repositoryStateId = repositoryStateId;
|
||||
}
|
||||
|
||||
public Entry(Entry entry, State state, ImmutableOpenMap<ShardId, ShardSnapshotStatus> shards) {
|
||||
this(entry.snapshot, entry.includeGlobalState, entry.partial, state, entry.indices, entry.startTime, shards);
|
||||
this(entry.snapshot, entry.includeGlobalState, entry.partial, state, entry.indices, entry.startTime,
|
||||
entry.repositoryStateId, shards);
|
||||
}
|
||||
|
||||
public Entry(Entry entry, ImmutableOpenMap<ShardId, ShardSnapshotStatus> shards) {
|
||||
|
@ -132,6 +140,10 @@ public class SnapshotsInProgress extends AbstractDiffable<Custom> implements Cus
|
|||
return startTime;
|
||||
}
|
||||
|
||||
public long getRepositoryStateId() {
|
||||
return repositoryStateId;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
if (this == o) return true;
|
||||
|
@ -147,6 +159,7 @@ public class SnapshotsInProgress extends AbstractDiffable<Custom> implements Cus
|
|||
if (!snapshot.equals(entry.snapshot)) return false;
|
||||
if (state != entry.state) return false;
|
||||
if (!waitingIndices.equals(entry.waitingIndices)) return false;
|
||||
if (repositoryStateId != entry.repositoryStateId) return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
@ -161,6 +174,7 @@ public class SnapshotsInProgress extends AbstractDiffable<Custom> implements Cus
|
|||
result = 31 * result + indices.hashCode();
|
||||
result = 31 * result + waitingIndices.hashCode();
|
||||
result = 31 * result + Long.hashCode(startTime);
|
||||
result = 31 * result + Long.hashCode(repositoryStateId);
|
||||
return result;
|
||||
}
|
||||
|
||||
|
@ -361,12 +375,15 @@ public class SnapshotsInProgress extends AbstractDiffable<Custom> implements Cus
|
|||
}
|
||||
|
||||
@Override
|
||||
public String type() {
|
||||
public String getWriteableName() {
|
||||
return TYPE;
|
||||
}
|
||||
|
||||
@Override
|
||||
public SnapshotsInProgress readFrom(StreamInput in) throws IOException {
|
||||
public static NamedDiff<Custom> readDiffFrom(StreamInput in) throws IOException {
|
||||
return readDiffFrom(Custom.class, TYPE, in);
|
||||
}
|
||||
|
||||
public SnapshotsInProgress(StreamInput in) throws IOException {
|
||||
Entry[] entries = new Entry[in.readVInt()];
|
||||
for (int i = 0; i < entries.length; i++) {
|
||||
Snapshot snapshot = new Snapshot(in);
|
||||
|
@ -387,15 +404,20 @@ public class SnapshotsInProgress extends AbstractDiffable<Custom> implements Cus
|
|||
State shardState = State.fromValue(in.readByte());
|
||||
builder.put(shardId, new ShardSnapshotStatus(nodeId, shardState));
|
||||
}
|
||||
long repositoryStateId = UNDEFINED_REPOSITORY_STATE_ID;
|
||||
if (in.getVersion().onOrAfter(REPOSITORY_ID_INTRODUCED_VERSION)) {
|
||||
repositoryStateId = in.readLong();
|
||||
}
|
||||
entries[i] = new Entry(snapshot,
|
||||
includeGlobalState,
|
||||
partial,
|
||||
state,
|
||||
Collections.unmodifiableList(indexBuilder),
|
||||
startTime,
|
||||
repositoryStateId,
|
||||
builder.build());
|
||||
}
|
||||
return new SnapshotsInProgress(entries);
|
||||
this.entries = Arrays.asList(entries);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -417,6 +439,9 @@ public class SnapshotsInProgress extends AbstractDiffable<Custom> implements Cus
|
|||
out.writeOptionalString(shardEntry.value.nodeId());
|
||||
out.writeByte(shardEntry.value.state().value());
|
||||
}
|
||||
if (out.getVersion().onOrAfter(REPOSITORY_ID_INTRODUCED_VERSION)) {
|
||||
out.writeLong(entry.repositoryStateId);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -430,6 +455,7 @@ public class SnapshotsInProgress extends AbstractDiffable<Custom> implements Cus
|
|||
private static final String INDICES = "indices";
|
||||
private static final String START_TIME_MILLIS = "start_time_millis";
|
||||
private static final String START_TIME = "start_time";
|
||||
private static final String REPOSITORY_STATE_ID = "repository_state_id";
|
||||
private static final String SHARDS = "shards";
|
||||
private static final String INDEX = "index";
|
||||
private static final String SHARD = "shard";
|
||||
|
@ -461,6 +487,7 @@ public class SnapshotsInProgress extends AbstractDiffable<Custom> implements Cus
|
|||
}
|
||||
builder.endArray();
|
||||
builder.timeValueField(START_TIME_MILLIS, START_TIME, entry.startTime());
|
||||
builder.field(REPOSITORY_STATE_ID, entry.getRepositoryStateId());
|
||||
builder.startArray(SHARDS);
|
||||
{
|
||||
for (ObjectObjectCursor<ShardId, ShardSnapshotStatus> shardEntry : entry.shards) {
|
||||
|
|
|
@ -25,10 +25,10 @@ import org.apache.logging.log4j.util.Supplier;
|
|||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.ExceptionsHelper;
|
||||
import org.elasticsearch.cluster.ClusterChangedEvent;
|
||||
import org.elasticsearch.cluster.ClusterStateTaskExecutor;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.ClusterStateObserver;
|
||||
import org.elasticsearch.cluster.ClusterStateTaskConfig;
|
||||
import org.elasticsearch.cluster.ClusterStateTaskExecutor;
|
||||
import org.elasticsearch.cluster.ClusterStateTaskListener;
|
||||
import org.elasticsearch.cluster.MasterNodeChangePredicate;
|
||||
import org.elasticsearch.cluster.NotMasterException;
|
||||
|
@ -260,8 +260,8 @@ public class ShardStateAction extends AbstractComponent {
|
|||
}
|
||||
|
||||
@Override
|
||||
public BatchResult<ShardEntry> execute(ClusterState currentState, List<ShardEntry> tasks) throws Exception {
|
||||
BatchResult.Builder<ShardEntry> batchResultBuilder = BatchResult.builder();
|
||||
public ClusterTasksResult<ShardEntry> execute(ClusterState currentState, List<ShardEntry> tasks) throws Exception {
|
||||
ClusterTasksResult.Builder<ShardEntry> batchResultBuilder = ClusterTasksResult.builder();
|
||||
List<ShardEntry> tasksToBeApplied = new ArrayList<>();
|
||||
List<FailedShard> failedShardsToBeApplied = new ArrayList<>();
|
||||
List<StaleShard> staleShardsToBeApplied = new ArrayList<>();
|
||||
|
@ -394,8 +394,8 @@ public class ShardStateAction extends AbstractComponent {
|
|||
}
|
||||
|
||||
@Override
|
||||
public BatchResult<ShardEntry> execute(ClusterState currentState, List<ShardEntry> tasks) throws Exception {
|
||||
BatchResult.Builder<ShardEntry> builder = BatchResult.builder();
|
||||
public ClusterTasksResult<ShardEntry> execute(ClusterState currentState, List<ShardEntry> tasks) throws Exception {
|
||||
ClusterTasksResult.Builder<ShardEntry> builder = ClusterTasksResult.builder();
|
||||
List<ShardEntry> tasksToBeApplied = new ArrayList<>();
|
||||
List<ShardRouting> shardRoutingsToBeApplied = new ArrayList<>(tasks.size());
|
||||
Set<ShardRouting> seenShardRoutings = new HashSet<>(); // to prevent duplicates
|
||||
|
|
|
@ -21,6 +21,7 @@ package org.elasticsearch.cluster.block;
|
|||
|
||||
import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
|
||||
import org.elasticsearch.cluster.AbstractDiffable;
|
||||
import org.elasticsearch.cluster.Diff;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.metadata.MetaDataIndexStateService;
|
||||
import org.elasticsearch.common.collect.ImmutableOpenMap;
|
||||
|
@ -48,8 +49,6 @@ import static java.util.stream.Stream.concat;
|
|||
public class ClusterBlocks extends AbstractDiffable<ClusterBlocks> {
|
||||
public static final ClusterBlocks EMPTY_CLUSTER_BLOCK = new ClusterBlocks(emptySet(), ImmutableOpenMap.of());
|
||||
|
||||
public static final ClusterBlocks PROTO = EMPTY_CLUSTER_BLOCK;
|
||||
|
||||
private final Set<ClusterBlock> global;
|
||||
|
||||
private final ImmutableOpenMap<String, Set<ClusterBlock>> indicesBlocks;
|
||||
|
@ -59,23 +58,7 @@ public class ClusterBlocks extends AbstractDiffable<ClusterBlocks> {
|
|||
ClusterBlocks(Set<ClusterBlock> global, ImmutableOpenMap<String, Set<ClusterBlock>> indicesBlocks) {
|
||||
this.global = global;
|
||||
this.indicesBlocks = indicesBlocks;
|
||||
|
||||
levelHolders = new ImmutableLevelHolder[ClusterBlockLevel.values().length];
|
||||
for (final ClusterBlockLevel level : ClusterBlockLevel.values()) {
|
||||
Predicate<ClusterBlock> containsLevel = block -> block.contains(level);
|
||||
Set<ClusterBlock> newGlobal = unmodifiableSet(global.stream()
|
||||
.filter(containsLevel)
|
||||
.collect(toSet()));
|
||||
|
||||
ImmutableOpenMap.Builder<String, Set<ClusterBlock>> indicesBuilder = ImmutableOpenMap.builder();
|
||||
for (ObjectObjectCursor<String, Set<ClusterBlock>> entry : indicesBlocks) {
|
||||
indicesBuilder.put(entry.key, unmodifiableSet(entry.value.stream()
|
||||
.filter(containsLevel)
|
||||
.collect(toSet())));
|
||||
}
|
||||
|
||||
levelHolders[level.id()] = new ImmutableLevelHolder(newGlobal, indicesBuilder.build());
|
||||
}
|
||||
levelHolders = generateLevelHolders(global, indicesBlocks);
|
||||
}
|
||||
|
||||
public Set<ClusterBlock> global() {
|
||||
|
@ -98,6 +81,27 @@ public class ClusterBlocks extends AbstractDiffable<ClusterBlocks> {
|
|||
return indices(level).getOrDefault(index, emptySet());
|
||||
}
|
||||
|
||||
private static ImmutableLevelHolder[] generateLevelHolders(Set<ClusterBlock> global,
|
||||
ImmutableOpenMap<String, Set<ClusterBlock>> indicesBlocks) {
|
||||
ImmutableLevelHolder[] levelHolders = new ImmutableLevelHolder[ClusterBlockLevel.values().length];
|
||||
for (final ClusterBlockLevel level : ClusterBlockLevel.values()) {
|
||||
Predicate<ClusterBlock> containsLevel = block -> block.contains(level);
|
||||
Set<ClusterBlock> newGlobal = unmodifiableSet(global.stream()
|
||||
.filter(containsLevel)
|
||||
.collect(toSet()));
|
||||
|
||||
ImmutableOpenMap.Builder<String, Set<ClusterBlock>> indicesBuilder = ImmutableOpenMap.builder();
|
||||
for (ObjectObjectCursor<String, Set<ClusterBlock>> entry : indicesBlocks) {
|
||||
indicesBuilder.put(entry.key, unmodifiableSet(entry.value.stream()
|
||||
.filter(containsLevel)
|
||||
.collect(toSet())));
|
||||
}
|
||||
|
||||
levelHolders[level.id()] = new ImmutableLevelHolder(newGlobal, indicesBuilder.build());
|
||||
}
|
||||
return levelHolders;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns <tt>true</tt> if one of the global blocks as its disable state persistence flag set.
|
||||
*/
|
||||
|
@ -239,15 +243,16 @@ public class ClusterBlocks extends AbstractDiffable<ClusterBlocks> {
|
|||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public ClusterBlocks readFrom(StreamInput in) throws IOException {
|
||||
public ClusterBlocks(StreamInput in) throws IOException {
|
||||
Set<ClusterBlock> global = readBlockSet(in);
|
||||
int size = in.readVInt();
|
||||
ImmutableOpenMap.Builder<String, Set<ClusterBlock>> indicesBuilder = ImmutableOpenMap.builder(size);
|
||||
for (int j = 0; j < size; j++) {
|
||||
indicesBuilder.put(in.readString().intern(), readBlockSet(in));
|
||||
}
|
||||
return new ClusterBlocks(global, indicesBuilder.build());
|
||||
this.global = global;
|
||||
this.indicesBlocks = indicesBuilder.build();
|
||||
levelHolders = generateLevelHolders(global, indicesBlocks);
|
||||
}
|
||||
|
||||
private static Set<ClusterBlock> readBlockSet(StreamInput in) throws IOException {
|
||||
|
@ -259,6 +264,10 @@ public class ClusterBlocks extends AbstractDiffable<ClusterBlocks> {
|
|||
return unmodifiableSet(blocks);
|
||||
}
|
||||
|
||||
public static Diff<ClusterBlocks> readDiffFrom(StreamInput in) throws IOException {
|
||||
return AbstractDiffable.readDiffFrom(ClusterBlocks::new, in);
|
||||
}
|
||||
|
||||
static class ImmutableLevelHolder {
|
||||
|
||||
static final ImmutableLevelHolder EMPTY = new ImmutableLevelHolder(emptySet(), ImmutableOpenMap.of());
|
||||
|
@ -383,9 +392,5 @@ public class ClusterBlocks extends AbstractDiffable<ClusterBlocks> {
|
|||
}
|
||||
return new ClusterBlocks(unmodifiableSet(new HashSet<>(global)), indicesBuilder.build());
|
||||
}
|
||||
|
||||
public static ClusterBlocks readClusterBlocks(StreamInput in) throws IOException {
|
||||
return PROTO.readFrom(in);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -21,6 +21,7 @@ package org.elasticsearch.cluster.metadata;
|
|||
|
||||
import org.elasticsearch.ElasticsearchGenerationException;
|
||||
import org.elasticsearch.cluster.AbstractDiffable;
|
||||
import org.elasticsearch.cluster.Diff;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.bytes.BytesArray;
|
||||
import org.elasticsearch.common.compress.CompressedXContent;
|
||||
|
@ -41,8 +42,6 @@ import static java.util.Collections.emptySet;
|
|||
|
||||
public class AliasMetaData extends AbstractDiffable<AliasMetaData> {
|
||||
|
||||
public static final AliasMetaData PROTO = new AliasMetaData("", null, null, null);
|
||||
|
||||
private final String alias;
|
||||
|
||||
private final CompressedXContent filter;
|
||||
|
@ -173,22 +172,29 @@ public class AliasMetaData extends AbstractDiffable<AliasMetaData> {
|
|||
|
||||
}
|
||||
|
||||
@Override
|
||||
public AliasMetaData readFrom(StreamInput in) throws IOException {
|
||||
String alias = in.readString();
|
||||
CompressedXContent filter = null;
|
||||
public AliasMetaData(StreamInput in) throws IOException {
|
||||
alias = in.readString();
|
||||
if (in.readBoolean()) {
|
||||
filter = CompressedXContent.readCompressedString(in);
|
||||
} else {
|
||||
filter = null;
|
||||
}
|
||||
String indexRouting = null;
|
||||
if (in.readBoolean()) {
|
||||
indexRouting = in.readString();
|
||||
} else {
|
||||
indexRouting = null;
|
||||
}
|
||||
String searchRouting = null;
|
||||
if (in.readBoolean()) {
|
||||
searchRouting = in.readString();
|
||||
searchRoutingValues = Collections.unmodifiableSet(Strings.splitStringByCommaToSet(searchRouting));
|
||||
} else {
|
||||
searchRouting = null;
|
||||
searchRoutingValues = emptySet();
|
||||
}
|
||||
return new AliasMetaData(alias, filter, indexRouting, searchRouting);
|
||||
}
|
||||
|
||||
public static Diff<AliasMetaData> readDiffFrom(StreamInput in) throws IOException {
|
||||
return readDiffFrom(AliasMetaData::new, in);
|
||||
}
|
||||
|
||||
public static class Builder {
|
||||
|
@ -327,14 +333,6 @@ public class AliasMetaData extends AbstractDiffable<AliasMetaData> {
|
|||
}
|
||||
return builder.build();
|
||||
}
|
||||
|
||||
public void writeTo(AliasMetaData aliasMetaData, StreamOutput out) throws IOException {
|
||||
aliasMetaData.writeTo(out);
|
||||
}
|
||||
|
||||
public static AliasMetaData readFrom(StreamInput in) throws IOException {
|
||||
return PROTO.readFrom(in);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -20,9 +20,8 @@
|
|||
package org.elasticsearch.cluster.metadata;
|
||||
|
||||
import org.elasticsearch.cluster.Diff;
|
||||
import org.elasticsearch.cluster.NamedDiff;
|
||||
import org.elasticsearch.common.ParseField;
|
||||
import org.elasticsearch.common.ParseFieldMatcher;
|
||||
import org.elasticsearch.common.ParseFieldMatcherSupplier;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.io.stream.Writeable;
|
||||
|
@ -67,10 +66,9 @@ public final class IndexGraveyard implements MetaData.Custom {
|
|||
500, // the default maximum number of tombstones
|
||||
Setting.Property.NodeScope);
|
||||
|
||||
public static final IndexGraveyard PROTO = new IndexGraveyard(new ArrayList<>());
|
||||
public static final String TYPE = "index-graveyard";
|
||||
private static final ParseField TOMBSTONES_FIELD = new ParseField("tombstones");
|
||||
private static final ObjectParser<List<Tombstone>, ParseFieldMatcherSupplier> GRAVEYARD_PARSER;
|
||||
private static final ObjectParser<List<Tombstone>, Void> GRAVEYARD_PARSER;
|
||||
static {
|
||||
GRAVEYARD_PARSER = new ObjectParser<>("index_graveyard", ArrayList::new);
|
||||
GRAVEYARD_PARSER.declareObjectArray(List::addAll, Tombstone.getParser(), TOMBSTONES_FIELD);
|
||||
|
@ -83,7 +81,7 @@ public final class IndexGraveyard implements MetaData.Custom {
|
|||
tombstones = Collections.unmodifiableList(list);
|
||||
}
|
||||
|
||||
private IndexGraveyard(final StreamInput in) throws IOException {
|
||||
public IndexGraveyard(final StreamInput in) throws IOException {
|
||||
final int queueSize = in.readVInt();
|
||||
List<Tombstone> tombstones = new ArrayList<>(queueSize);
|
||||
for (int i = 0; i < queueSize; i++) {
|
||||
|
@ -92,12 +90,8 @@ public final class IndexGraveyard implements MetaData.Custom {
|
|||
this.tombstones = Collections.unmodifiableList(tombstones);
|
||||
}
|
||||
|
||||
public static IndexGraveyard fromStream(final StreamInput in) throws IOException {
|
||||
return new IndexGraveyard(in);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String type() {
|
||||
public String getWriteableName() {
|
||||
return TYPE;
|
||||
}
|
||||
|
||||
|
@ -144,8 +138,8 @@ public final class IndexGraveyard implements MetaData.Custom {
|
|||
return builder.endArray();
|
||||
}
|
||||
|
||||
public IndexGraveyard fromXContent(final XContentParser parser) throws IOException {
|
||||
return new IndexGraveyard(GRAVEYARD_PARSER.parse(parser, () -> ParseFieldMatcher.STRICT));
|
||||
public static IndexGraveyard fromXContent(final XContentParser parser) throws IOException {
|
||||
return new IndexGraveyard(GRAVEYARD_PARSER.parse(parser, null));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -161,19 +155,13 @@ public final class IndexGraveyard implements MetaData.Custom {
|
|||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public IndexGraveyard readFrom(final StreamInput in) throws IOException {
|
||||
return new IndexGraveyard(in);
|
||||
}
|
||||
|
||||
@Override
|
||||
@SuppressWarnings("unchecked")
|
||||
public Diff<MetaData.Custom> diff(final MetaData.Custom previous) {
|
||||
return new IndexGraveyardDiff((IndexGraveyard) previous, this);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Diff<MetaData.Custom> readDiffFrom(final StreamInput in) throws IOException {
|
||||
public static NamedDiff<MetaData.Custom> readDiffFrom(final StreamInput in) throws IOException {
|
||||
return new IndexGraveyardDiff(in);
|
||||
}
|
||||
|
||||
|
@ -273,7 +261,7 @@ public final class IndexGraveyard implements MetaData.Custom {
|
|||
/**
|
||||
* A class representing a diff of two IndexGraveyard objects.
|
||||
*/
|
||||
public static final class IndexGraveyardDiff implements Diff<MetaData.Custom> {
|
||||
public static final class IndexGraveyardDiff implements NamedDiff<MetaData.Custom> {
|
||||
|
||||
private final List<Tombstone> added;
|
||||
private final int removedCount;
|
||||
|
@ -349,6 +337,11 @@ public final class IndexGraveyard implements MetaData.Custom {
|
|||
public int getRemovedCount() {
|
||||
return removedCount;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getWriteableName() {
|
||||
return TYPE;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -359,16 +352,17 @@ public final class IndexGraveyard implements MetaData.Custom {
|
|||
private static final String INDEX_KEY = "index";
|
||||
private static final String DELETE_DATE_IN_MILLIS_KEY = "delete_date_in_millis";
|
||||
private static final String DELETE_DATE_KEY = "delete_date";
|
||||
private static final ObjectParser<Tombstone.Builder, ParseFieldMatcherSupplier> TOMBSTONE_PARSER;
|
||||
private static final ObjectParser<Tombstone.Builder, Void> TOMBSTONE_PARSER;
|
||||
static {
|
||||
TOMBSTONE_PARSER = new ObjectParser<>("tombstoneEntry", Tombstone.Builder::new);
|
||||
TOMBSTONE_PARSER.declareObject(Tombstone.Builder::index, Index::parseIndex, new ParseField(INDEX_KEY));
|
||||
TOMBSTONE_PARSER.declareObject(Tombstone.Builder::index, (parser, context) -> Index.fromXContent(parser),
|
||||
new ParseField(INDEX_KEY));
|
||||
TOMBSTONE_PARSER.declareLong(Tombstone.Builder::deleteDateInMillis, new ParseField(DELETE_DATE_IN_MILLIS_KEY));
|
||||
TOMBSTONE_PARSER.declareString((b, s) -> {}, new ParseField(DELETE_DATE_KEY));
|
||||
}
|
||||
|
||||
static ContextParser<ParseFieldMatcherSupplier, Tombstone> getParser() {
|
||||
return (p, c) -> TOMBSTONE_PARSER.apply(p, c).build();
|
||||
static ContextParser<Void, Tombstone> getParser() {
|
||||
return (parser, context) -> TOMBSTONE_PARSER.apply(parser, null).build();
|
||||
}
|
||||
|
||||
private final Index index;
|
||||
|
@ -443,7 +437,7 @@ public final class IndexGraveyard implements MetaData.Custom {
|
|||
}
|
||||
|
||||
public static Tombstone fromXContent(final XContentParser parser) throws IOException {
|
||||
return TOMBSTONE_PARSER.parse(parser, () -> ParseFieldMatcher.STRICT).build();
|
||||
return TOMBSTONE_PARSER.parse(parser, null).build();
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -23,7 +23,6 @@ import com.carrotsearch.hppc.LongArrayList;
|
|||
import com.carrotsearch.hppc.cursors.IntObjectCursor;
|
||||
import com.carrotsearch.hppc.cursors.ObjectCursor;
|
||||
import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
|
||||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.support.ActiveShardCount;
|
||||
import org.elasticsearch.cluster.Diff;
|
||||
|
@ -34,7 +33,6 @@ import org.elasticsearch.cluster.block.ClusterBlockLevel;
|
|||
import org.elasticsearch.cluster.node.DiscoveryNodeFilters;
|
||||
import org.elasticsearch.cluster.routing.allocation.IndexMetaDataUpdater;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.ParseFieldMatcher;
|
||||
import org.elasticsearch.common.bytes.BytesArray;
|
||||
import org.elasticsearch.common.collect.ImmutableOpenIntMap;
|
||||
import org.elasticsearch.common.collect.ImmutableOpenMap;
|
||||
|
@ -46,7 +44,6 @@ import org.elasticsearch.common.settings.Setting;
|
|||
import org.elasticsearch.common.settings.Setting.Property;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.settings.loader.SettingsLoader;
|
||||
import org.elasticsearch.common.xcontent.FromXContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentFactory;
|
||||
|
@ -78,8 +75,12 @@ import static org.elasticsearch.cluster.node.DiscoveryNodeFilters.OpType.OR;
|
|||
import static org.elasticsearch.common.settings.Settings.readSettingsFromStream;
|
||||
import static org.elasticsearch.common.settings.Settings.writeSettingsToStream;
|
||||
|
||||
public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuilder<IndexMetaData>, ToXContent {
|
||||
public class IndexMetaData implements Diffable<IndexMetaData>, ToXContent {
|
||||
|
||||
/**
|
||||
* This class will be removed in v7.0
|
||||
*/
|
||||
@Deprecated
|
||||
public interface Custom extends Diffable<Custom>, ToXContent {
|
||||
|
||||
String type();
|
||||
|
@ -88,6 +89,16 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
|
|||
|
||||
Custom fromXContent(XContentParser parser) throws IOException;
|
||||
|
||||
/**
|
||||
* Reads the {@link org.elasticsearch.cluster.Diff} from StreamInput
|
||||
*/
|
||||
Diff<Custom> readDiffFrom(StreamInput in) throws IOException;
|
||||
|
||||
/**
|
||||
* Reads an object of this type from the provided {@linkplain StreamInput}. The receiving instance remains unchanged.
|
||||
*/
|
||||
Custom readFrom(StreamInput in) throws IOException;
|
||||
|
||||
/**
|
||||
* Merges from this to another, with this being more important, i.e., if something exists in this and another,
|
||||
* this will prevail.
|
||||
|
@ -249,10 +260,6 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
|
|||
Setting.Property.Dynamic,
|
||||
Setting.Property.IndexScope);
|
||||
|
||||
public static final IndexMetaData PROTO = IndexMetaData.builder("")
|
||||
.settings(Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT))
|
||||
.numberOfShards(1).numberOfReplicas(0).build();
|
||||
|
||||
public static final String KEY_IN_SYNC_ALLOCATIONS = "in_sync_allocations";
|
||||
static final String KEY_VERSION = "version";
|
||||
static final String KEY_ROUTING_NUM_SHARDS = "routing_num_shards";
|
||||
|
@ -567,13 +574,11 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
|
|||
return new IndexMetaDataDiff(previousState, this);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Diff<IndexMetaData> readDiffFrom(StreamInput in) throws IOException {
|
||||
public static Diff<IndexMetaData> readDiffFrom(StreamInput in) throws IOException {
|
||||
return new IndexMetaDataDiff(in);
|
||||
}
|
||||
|
||||
@Override
|
||||
public IndexMetaData fromXContent(XContentParser parser, ParseFieldMatcher parseFieldMatcher) throws IOException {
|
||||
public static IndexMetaData fromXContent(XContentParser parser) throws IOException {
|
||||
return Builder.fromXContent(parser);
|
||||
}
|
||||
|
||||
|
@ -617,8 +622,10 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
|
|||
state = State.fromId(in.readByte());
|
||||
settings = Settings.readSettingsFromStream(in);
|
||||
primaryTerms = in.readVLongArray();
|
||||
mappings = DiffableUtils.readImmutableOpenMapDiff(in, DiffableUtils.getStringKeySerializer(), MappingMetaData.PROTO);
|
||||
aliases = DiffableUtils.readImmutableOpenMapDiff(in, DiffableUtils.getStringKeySerializer(), AliasMetaData.PROTO);
|
||||
mappings = DiffableUtils.readImmutableOpenMapDiff(in, DiffableUtils.getStringKeySerializer(), MappingMetaData::new,
|
||||
MappingMetaData::readDiffFrom);
|
||||
aliases = DiffableUtils.readImmutableOpenMapDiff(in, DiffableUtils.getStringKeySerializer(), AliasMetaData::new,
|
||||
AliasMetaData::readDiffFrom);
|
||||
customs = DiffableUtils.readImmutableOpenMapDiff(in, DiffableUtils.getStringKeySerializer(),
|
||||
new DiffableUtils.DiffableValueSerializer<String, Custom>() {
|
||||
@Override
|
||||
|
@ -626,6 +633,7 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
|
|||
return lookupPrototypeSafe(key).readFrom(in);
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
@Override
|
||||
public Diff<Custom> readDiff(StreamInput in, String key) throws IOException {
|
||||
return lookupPrototypeSafe(key).readDiffFrom(in);
|
||||
|
@ -665,8 +673,7 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
|
|||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public IndexMetaData readFrom(StreamInput in) throws IOException {
|
||||
public static IndexMetaData readFrom(StreamInput in) throws IOException {
|
||||
Builder builder = new Builder(in.readString());
|
||||
builder.version(in.readLong());
|
||||
builder.setRoutingNumShards(in.readInt());
|
||||
|
@ -675,12 +682,12 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
|
|||
builder.primaryTerms(in.readVLongArray());
|
||||
int mappingsSize = in.readVInt();
|
||||
for (int i = 0; i < mappingsSize; i++) {
|
||||
MappingMetaData mappingMd = MappingMetaData.PROTO.readFrom(in);
|
||||
MappingMetaData mappingMd = new MappingMetaData(in);
|
||||
builder.putMapping(mappingMd);
|
||||
}
|
||||
int aliasesSize = in.readVInt();
|
||||
for (int i = 0; i < aliasesSize; i++) {
|
||||
AliasMetaData aliasMd = AliasMetaData.Builder.readFrom(in);
|
||||
AliasMetaData aliasMd = new AliasMetaData(in);
|
||||
builder.putAlias(aliasMd);
|
||||
}
|
||||
int customSize = in.readVInt();
|
||||
|
@ -1200,10 +1207,6 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
|
|||
}
|
||||
return builder.build();
|
||||
}
|
||||
|
||||
public static IndexMetaData readFrom(StreamInput in) throws IOException {
|
||||
return PROTO.readFrom(in);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue