Merge branch 'master' into pr/update-aws-sdk
This commit is contained in:
commit
63223928dc
|
@ -7,7 +7,7 @@ attention.
|
|||
-->
|
||||
|
||||
- Have you signed the [contributor license agreement](https://www.elastic.co/contributor-agreement)?
|
||||
- Have you followed the [contributor guidelines](https://github.com/elastic/elasticsearch/blob/master/.github/CONTRIBUTING.md)?
|
||||
- Have you followed the [contributor guidelines](https://github.com/elastic/elasticsearch/blob/master/CONTRIBUTING.md)?
|
||||
- If submitting code, have you built your formula locally prior to submission with `gradle check`?
|
||||
- If submitting code, is your pull request against master? Unless there is a good reason otherwise, we prefer pull requests against master and will backport as needed.
|
||||
- If submitting code, have you checked that your submission is for an [OS that we support](https://www.elastic.co/support/matrix#show_os)?
|
||||
|
|
|
@ -160,6 +160,7 @@ subprojects {
|
|||
them as external dependencies so the build plugin that we use can be used
|
||||
to build elasticsearch plugins outside of the elasticsearch source tree. */
|
||||
ext.projectSubstitutions = [
|
||||
"org.elasticsearch.gradle:build-tools:${version}": ':build-tools',
|
||||
"org.elasticsearch:rest-api-spec:${version}": ':rest-api-spec',
|
||||
"org.elasticsearch:elasticsearch:${version}": ':core',
|
||||
"org.elasticsearch.test:framework:${version}": ':test:framework',
|
||||
|
@ -257,7 +258,6 @@ allprojects {
|
|||
|
||||
idea {
|
||||
project {
|
||||
languageLevel = org.elasticsearch.gradle.BuildPlugin.minimumJava.toString()
|
||||
vcs = 'Git'
|
||||
}
|
||||
}
|
||||
|
@ -304,9 +304,10 @@ allprojects {
|
|||
into '.settings'
|
||||
}
|
||||
// otherwise .settings is not nuked entirely
|
||||
tasks.cleanEclipse {
|
||||
task wipeEclipseSettings(type: Delete) {
|
||||
delete '.settings'
|
||||
}
|
||||
tasks.cleanEclipse.dependsOn(wipeEclipseSettings)
|
||||
// otherwise the eclipse merging is *super confusing*
|
||||
tasks.eclipse.dependsOn(cleanEclipse, copyEclipseSettings)
|
||||
}
|
||||
|
|
|
@ -103,6 +103,7 @@ if (project == rootProject) {
|
|||
url "https://oss.sonatype.org/content/repositories/snapshots/"
|
||||
}
|
||||
}
|
||||
test.exclude 'org/elasticsearch/test/NamingConventionsCheckBadClasses*'
|
||||
}
|
||||
|
||||
/*****************************************************************************
|
||||
|
@ -122,8 +123,8 @@ if (project != rootProject) {
|
|||
// build-tools is not ready for primetime with these...
|
||||
dependencyLicenses.enabled = false
|
||||
forbiddenApisMain.enabled = false
|
||||
forbiddenApisTest.enabled = false
|
||||
jarHell.enabled = false
|
||||
loggerUsageCheck.enabled = false
|
||||
thirdPartyAudit.enabled = false
|
||||
|
||||
// test for elasticsearch.build tries to run with ES...
|
||||
|
@ -137,4 +138,9 @@ if (project != rootProject) {
|
|||
// the file that actually defines nocommit
|
||||
exclude '**/ForbiddenPatternsTask.groovy'
|
||||
}
|
||||
|
||||
namingConventions {
|
||||
testClass = 'org.elasticsearch.test.NamingConventionsCheckBadClasses$UnitTestCase'
|
||||
integTestClass = 'org.elasticsearch.test.NamingConventionsCheckBadClasses$IntegTestCase'
|
||||
}
|
||||
}
|
||||
|
|
|
@ -113,7 +113,7 @@ class BuildPlugin implements Plugin<Project> {
|
|||
}
|
||||
|
||||
// enforce gradle version
|
||||
GradleVersion minGradle = GradleVersion.version('2.8')
|
||||
GradleVersion minGradle = GradleVersion.version('2.13')
|
||||
if (GradleVersion.current() < minGradle) {
|
||||
throw new GradleException("${minGradle} or above is required to build elasticsearch")
|
||||
}
|
||||
|
@ -372,6 +372,13 @@ class BuildPlugin implements Plugin<Project> {
|
|||
options.fork = true
|
||||
options.forkOptions.executable = new File(project.javaHome, 'bin/javac')
|
||||
options.forkOptions.memoryMaximumSize = "1g"
|
||||
if (project.targetCompatibility >= JavaVersion.VERSION_1_8) {
|
||||
// compile with compact 3 profile by default
|
||||
// NOTE: this is just a compile time check: does not replace testing with a compact3 JRE
|
||||
if (project.compactProfile != 'full') {
|
||||
options.compilerArgs << '-profile' << project.compactProfile
|
||||
}
|
||||
}
|
||||
/*
|
||||
* -path because gradle will send in paths that don't always exist.
|
||||
* -missing because we have tons of missing @returns and @param.
|
||||
|
@ -379,11 +386,6 @@ class BuildPlugin implements Plugin<Project> {
|
|||
*/
|
||||
// don't even think about passing args with -J-xxx, oracle will ask you to submit a bug report :)
|
||||
options.compilerArgs << '-Werror' << '-Xlint:all,-path,-serial,-options,-deprecation' << '-Xdoclint:all' << '-Xdoclint:-missing'
|
||||
// compile with compact 3 profile by default
|
||||
// NOTE: this is just a compile time check: does not replace testing with a compact3 JRE
|
||||
if (project.compactProfile != 'full') {
|
||||
options.compilerArgs << '-profile' << project.compactProfile
|
||||
}
|
||||
options.encoding = 'UTF-8'
|
||||
//options.incremental = true
|
||||
|
||||
|
|
|
@ -170,6 +170,13 @@ public class RestTestsFromSnippetsTask extends SnippetsTask {
|
|||
current.println('---')
|
||||
current.println("setup:")
|
||||
body(setup)
|
||||
// always wait for yellow before anything is executed
|
||||
current.println(
|
||||
" - do:\n" +
|
||||
" raw:\n" +
|
||||
" method: GET\n" +
|
||||
" path: \"_cluster/health\"\n" +
|
||||
" wait_for_status: \"yellow\"")
|
||||
}
|
||||
|
||||
private void body(Snippet snippet) {
|
||||
|
|
|
@ -21,6 +21,7 @@ package org.elasticsearch.gradle.precommit
|
|||
|
||||
import org.elasticsearch.gradle.LoggedExec
|
||||
import org.elasticsearch.gradle.VersionProperties
|
||||
import org.gradle.api.artifacts.Dependency
|
||||
import org.gradle.api.file.FileCollection
|
||||
import org.gradle.api.tasks.Input
|
||||
import org.gradle.api.tasks.InputFiles
|
||||
|
@ -57,8 +58,27 @@ public class NamingConventionsTask extends LoggedExec {
|
|||
@Input
|
||||
boolean skipIntegTestInDisguise = false
|
||||
|
||||
/**
|
||||
* Superclass for all tests.
|
||||
*/
|
||||
@Input
|
||||
String testClass = 'org.apache.lucene.util.LuceneTestCase'
|
||||
|
||||
/**
|
||||
* Superclass for all integration tests.
|
||||
*/
|
||||
@Input
|
||||
String integTestClass = 'org.elasticsearch.test.ESIntegTestCase'
|
||||
|
||||
public NamingConventionsTask() {
|
||||
dependsOn(classpath)
|
||||
// Extra classpath contains the actual test
|
||||
project.configurations.create('namingConventions')
|
||||
Dependency buildToolsDep = project.dependencies.add('namingConventions',
|
||||
"org.elasticsearch.gradle:build-tools:${VersionProperties.elasticsearch}")
|
||||
buildToolsDep.transitive = false // We don't need gradle in the classpath. It conflicts.
|
||||
FileCollection extraClasspath = project.configurations.namingConventions
|
||||
dependsOn(extraClasspath)
|
||||
|
||||
description = "Runs NamingConventionsCheck on ${classpath}"
|
||||
executable = new File(project.javaHome, 'bin/java')
|
||||
onlyIf { project.sourceSets.test.output.classesDir.exists() }
|
||||
|
@ -69,7 +89,8 @@ public class NamingConventionsTask extends LoggedExec {
|
|||
project.afterEvaluate {
|
||||
doFirst {
|
||||
args('-Djna.nosys=true')
|
||||
args('-cp', classpath.asPath, 'org.elasticsearch.test.NamingConventionsCheck')
|
||||
args('-cp', (classpath + extraClasspath).asPath, 'org.elasticsearch.test.NamingConventionsCheck')
|
||||
args(testClass, integTestClass)
|
||||
if (skipIntegTestInDisguise) {
|
||||
args('--skip-integ-tests-in-disguise')
|
||||
}
|
||||
|
@ -79,7 +100,7 @@ public class NamingConventionsTask extends LoggedExec {
|
|||
* process of ignoring them lets us validate that they were found so this ignore parameter acts
|
||||
* as the test for the NamingConventionsCheck.
|
||||
*/
|
||||
if (':test:framework'.equals(project.path)) {
|
||||
if (':build-tools'.equals(project.path)) {
|
||||
args('--self-test')
|
||||
}
|
||||
args('--', project.sourceSets.test.output.classesDir.absolutePath)
|
||||
|
|
|
@ -34,7 +34,6 @@ class PrecommitTasks {
|
|||
configureForbiddenApis(project),
|
||||
configureCheckstyle(project),
|
||||
configureNamingConventions(project),
|
||||
configureLoggerUsage(project),
|
||||
project.tasks.create('forbiddenPatterns', ForbiddenPatternsTask.class),
|
||||
project.tasks.create('licenseHeaders', LicenseHeadersTask.class),
|
||||
project.tasks.create('jarHell', JarHellTask.class),
|
||||
|
@ -49,6 +48,20 @@ class PrecommitTasks {
|
|||
UpdateShasTask updateShas = project.tasks.create('updateShas', UpdateShasTask.class)
|
||||
updateShas.parentTask = dependencyLicenses
|
||||
}
|
||||
if (project.path != ':build-tools') {
|
||||
/*
|
||||
* Sadly, build-tools can't have logger-usage-check because that
|
||||
* would create a circular project dependency between build-tools
|
||||
* (which provides NamingConventionsCheck) and :test:logger-usage
|
||||
* which provides the logger usage check. Since the build tools
|
||||
* don't use the logger usage check because they don't have any
|
||||
* of Elaticsearch's loggers and :test:logger-usage actually does
|
||||
* use the NamingConventionsCheck we break the circular dependency
|
||||
* here.
|
||||
*/
|
||||
precommitTasks.add(configureLoggerUsage(project))
|
||||
}
|
||||
|
||||
|
||||
Map<String, Object> precommitOptions = [
|
||||
name: 'precommit',
|
||||
|
|
|
@ -291,9 +291,10 @@ class ClusterFormationTasks {
|
|||
File configDir = new File(node.homeDir, 'config')
|
||||
copyConfig.into(configDir) // copy must always have a general dest dir, even though we don't use it
|
||||
for (Map.Entry<String,Object> extraConfigFile : node.config.extraConfigFiles.entrySet()) {
|
||||
Object extraConfigFileValue = extraConfigFile.getValue()
|
||||
copyConfig.doFirst {
|
||||
// make sure the copy won't be a no-op or act on a directory
|
||||
File srcConfigFile = project.file(extraConfigFile.getValue())
|
||||
File srcConfigFile = project.file(extraConfigFileValue)
|
||||
if (srcConfigFile.isDirectory()) {
|
||||
throw new GradleException("Source for extraConfigFile must be a file: ${srcConfigFile}")
|
||||
}
|
||||
|
@ -303,7 +304,7 @@ class ClusterFormationTasks {
|
|||
}
|
||||
File destConfigFile = new File(node.homeDir, 'config/' + extraConfigFile.getKey())
|
||||
// wrap source file in closure to delay resolution to execution time
|
||||
copyConfig.from({ extraConfigFile.getValue() }) {
|
||||
copyConfig.from({ extraConfigFileValue }) {
|
||||
// this must be in a closure so it is only applied to the single file specified in from above
|
||||
into(configDir.toPath().relativize(destConfigFile.canonicalFile.parentFile.toPath()).toFile())
|
||||
rename { destConfigFile.name }
|
||||
|
|
|
@ -25,14 +25,11 @@ import java.nio.file.FileVisitResult;
|
|||
import java.nio.file.FileVisitor;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Path;
|
||||
import java.nio.file.Paths;
|
||||
import java.nio.file.attribute.BasicFileAttributes;
|
||||
import java.util.HashSet;
|
||||
import java.util.Set;
|
||||
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.elasticsearch.common.SuppressForbidden;
|
||||
import org.elasticsearch.common.io.PathUtils;
|
||||
|
||||
/**
|
||||
* Checks that all tests in a directory are named according to our naming conventions. This is important because tests that do not follow
|
||||
* our conventions aren't run by gradle. This was once a glorious unit test but now that Elasticsearch is a multi-module project it must be
|
||||
|
@ -46,11 +43,13 @@ import org.elasticsearch.common.io.PathUtils;
|
|||
* {@code --self-test} that is only run in the test:framework project.
|
||||
*/
|
||||
public class NamingConventionsCheck {
|
||||
public static void main(String[] args) throws IOException, ClassNotFoundException {
|
||||
NamingConventionsCheck check = new NamingConventionsCheck();
|
||||
public static void main(String[] args) throws IOException {
|
||||
int i = 0;
|
||||
NamingConventionsCheck check = new NamingConventionsCheck(
|
||||
loadClassWithoutInitializing(args[i++]),
|
||||
loadClassWithoutInitializing(args[i++]));
|
||||
boolean skipIntegTestsInDisguise = false;
|
||||
boolean selfTest = false;
|
||||
int i = 0;
|
||||
while (true) {
|
||||
switch (args[i]) {
|
||||
case "--skip-integ-tests-in-disguise":
|
||||
|
@ -69,7 +68,7 @@ public class NamingConventionsCheck {
|
|||
}
|
||||
break;
|
||||
}
|
||||
check.check(PathUtils.get(args[i]));
|
||||
check.check(Paths.get(args[i]));
|
||||
|
||||
if (selfTest) {
|
||||
assertViolation("WrongName", check.missingSuffix);
|
||||
|
@ -82,14 +81,12 @@ public class NamingConventionsCheck {
|
|||
}
|
||||
|
||||
// Now we should have no violations
|
||||
assertNoViolations("Not all subclasses of " + ESTestCase.class.getSimpleName()
|
||||
assertNoViolations("Not all subclasses of " + check.testClass.getSimpleName()
|
||||
+ " match the naming convention. Concrete classes must end with [Tests]", check.missingSuffix);
|
||||
assertNoViolations("Classes ending with [Tests] are abstract or interfaces", check.notRunnable);
|
||||
assertNoViolations("Found inner classes that are tests, which are excluded from the test runner", check.innerClasses);
|
||||
String classesToSubclass = String.join(",", ESTestCase.class.getSimpleName(), ESTestCase.class.getSimpleName(),
|
||||
ESTokenStreamTestCase.class.getSimpleName(), LuceneTestCase.class.getSimpleName());
|
||||
assertNoViolations("Pure Unit-Test found must subclass one of [" + classesToSubclass + "]", check.pureUnitTest);
|
||||
assertNoViolations("Classes ending with [Tests] must subclass [" + classesToSubclass + "]", check.notImplementing);
|
||||
assertNoViolations("Pure Unit-Test found must subclass [" + check.testClass.getSimpleName() + "]", check.pureUnitTest);
|
||||
assertNoViolations("Classes ending with [Tests] must subclass [" + check.testClass.getSimpleName() + "]", check.notImplementing);
|
||||
if (!skipIntegTestsInDisguise) {
|
||||
assertNoViolations("Subclasses of ESIntegTestCase should end with IT as they are integration tests",
|
||||
check.integTestsInDisguise);
|
||||
|
@ -103,6 +100,14 @@ public class NamingConventionsCheck {
|
|||
private final Set<Class<?>> notRunnable = new HashSet<>();
|
||||
private final Set<Class<?>> innerClasses = new HashSet<>();
|
||||
|
||||
private final Class<?> testClass;
|
||||
private final Class<?> integTestClass;
|
||||
|
||||
public NamingConventionsCheck(Class<?> testClass, Class<?> integTestClass) {
|
||||
this.testClass = testClass;
|
||||
this.integTestClass = integTestClass;
|
||||
}
|
||||
|
||||
public void check(Path rootPath) throws IOException {
|
||||
Files.walkFileTree(rootPath, new FileVisitor<Path>() {
|
||||
/**
|
||||
|
@ -136,9 +141,9 @@ public class NamingConventionsCheck {
|
|||
String filename = file.getFileName().toString();
|
||||
if (filename.endsWith(".class")) {
|
||||
String className = filename.substring(0, filename.length() - ".class".length());
|
||||
Class<?> clazz = loadClass(className);
|
||||
Class<?> clazz = loadClassWithoutInitializing(packageName + className);
|
||||
if (clazz.getName().endsWith("Tests")) {
|
||||
if (ESIntegTestCase.class.isAssignableFrom(clazz)) {
|
||||
if (integTestClass.isAssignableFrom(clazz)) {
|
||||
integTestsInDisguise.add(clazz);
|
||||
}
|
||||
if (Modifier.isAbstract(clazz.getModifiers()) || Modifier.isInterface(clazz.getModifiers())) {
|
||||
|
@ -164,15 +169,7 @@ public class NamingConventionsCheck {
|
|||
}
|
||||
|
||||
private boolean isTestCase(Class<?> clazz) {
|
||||
return LuceneTestCase.class.isAssignableFrom(clazz);
|
||||
}
|
||||
|
||||
private Class<?> loadClass(String className) {
|
||||
try {
|
||||
return Thread.currentThread().getContextClassLoader().loadClass(packageName + className);
|
||||
} catch (ClassNotFoundException e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
return testClass.isAssignableFrom(clazz);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -186,7 +183,6 @@ public class NamingConventionsCheck {
|
|||
* Fail the process if there are any violations in the set. Named to look like a junit assertion even though it isn't because it is
|
||||
* similar enough.
|
||||
*/
|
||||
@SuppressForbidden(reason = "System.err/System.exit")
|
||||
private static void assertNoViolations(String message, Set<Class<?>> set) {
|
||||
if (false == set.isEmpty()) {
|
||||
System.err.println(message + ":");
|
||||
|
@ -201,10 +197,9 @@ public class NamingConventionsCheck {
|
|||
* Fail the process if we didn't detect a particular violation. Named to look like a junit assertion even though it isn't because it is
|
||||
* similar enough.
|
||||
*/
|
||||
@SuppressForbidden(reason = "System.err/System.exit")
|
||||
private static void assertViolation(String className, Set<Class<?>> set) throws ClassNotFoundException {
|
||||
className = "org.elasticsearch.test.test.NamingConventionsCheckBadClasses$" + className;
|
||||
if (false == set.remove(Class.forName(className))) {
|
||||
private static void assertViolation(String className, Set<Class<?>> set) {
|
||||
className = "org.elasticsearch.test.NamingConventionsCheckBadClasses$" + className;
|
||||
if (false == set.remove(loadClassWithoutInitializing(className))) {
|
||||
System.err.println("Error in NamingConventionsCheck! Expected [" + className + "] to be a violation but wasn't.");
|
||||
System.exit(1);
|
||||
}
|
||||
|
@ -213,9 +208,20 @@ public class NamingConventionsCheck {
|
|||
/**
|
||||
* Fail the process with the provided message.
|
||||
*/
|
||||
@SuppressForbidden(reason = "System.err/System.exit")
|
||||
private static void fail(String reason) {
|
||||
System.err.println(reason);
|
||||
System.exit(1);
|
||||
}
|
||||
|
||||
static Class<?> loadClassWithoutInitializing(String name) {
|
||||
try {
|
||||
return Class.forName(name,
|
||||
// Don't initialize the class to save time. Not needed for this test and this doesn't share a VM with any other tests.
|
||||
false,
|
||||
// Use our classloader rather than the bootstrap class loader.
|
||||
NamingConventionsCheck.class.getClassLoader());
|
||||
} catch (ClassNotFoundException e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
}
|
||||
}
|
|
@ -19,7 +19,6 @@
|
|||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]Action.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]ActionModule.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]ActionRequestBuilder.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]ReplicationResponse.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]admin[/\\]cluster[/\\]health[/\\]ClusterHealthRequestBuilder.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]admin[/\\]cluster[/\\]health[/\\]TransportClusterHealthAction.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]admin[/\\]cluster[/\\]node[/\\]hotthreads[/\\]NodesHotThreadsRequestBuilder.java" checks="LineLength" />
|
||||
|
@ -101,7 +100,6 @@
|
|||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]admin[/\\]indices[/\\]open[/\\]TransportOpenIndexAction.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]admin[/\\]indices[/\\]recovery[/\\]TransportRecoveryAction.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]admin[/\\]indices[/\\]refresh[/\\]TransportRefreshAction.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]admin[/\\]indices[/\\]refresh[/\\]TransportShardRefreshAction.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]admin[/\\]indices[/\\]segments[/\\]IndexSegments.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]admin[/\\]indices[/\\]segments[/\\]IndicesSegmentResponse.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]admin[/\\]indices[/\\]segments[/\\]IndicesSegmentsRequestBuilder.java" checks="LineLength" />
|
||||
|
@ -484,7 +482,6 @@
|
|||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]indices[/\\]analysis[/\\]PreBuiltCacheFactory.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]indices[/\\]analysis[/\\]PreBuiltTokenFilters.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]indices[/\\]breaker[/\\]HierarchyCircuitBreakerService.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]indices[/\\]cluster[/\\]IndicesClusterStateService.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]indices[/\\]fielddata[/\\]cache[/\\]IndicesFieldDataCache.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]indices[/\\]fielddata[/\\]cache[/\\]IndicesFieldDataCacheListener.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]indices[/\\]flush[/\\]ShardsSyncedFlushResult.java" checks="LineLength" />
|
||||
|
@ -503,7 +500,6 @@
|
|||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]ingest[/\\]core[/\\]CompoundProcessor.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]ingest[/\\]core[/\\]IngestDocument.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]ingest[/\\]core[/\\]Pipeline.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]ingest[/\\]processor[/\\]ConvertProcessor.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]monitor[/\\]jvm[/\\]GcNames.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]monitor[/\\]jvm[/\\]HotThreads.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]monitor[/\\]jvm[/\\]JvmStats.java" checks="LineLength" />
|
||||
|
@ -961,7 +957,6 @@
|
|||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]mapper[/\\]simple[/\\]SimpleMapperTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]mapper[/\\]source[/\\]DefaultSourceMappingTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]mapper[/\\]string[/\\]SimpleStringMappingTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]mapper[/\\]string[/\\]StringFieldMapperPositionIncrementGapTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]mapper[/\\]timestamp[/\\]TimestampMappingTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]mapper[/\\]ttl[/\\]TTLMappingTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]mapper[/\\]typelevels[/\\]ParseDocumentTypeLevelsTests.java" checks="LineLength" />
|
||||
|
@ -1042,14 +1037,6 @@
|
|||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]ingest[/\\]core[/\\]CompoundProcessorTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]ingest[/\\]core[/\\]PipelineFactoryTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]ingest[/\\]core[/\\]ValueSourceTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]ingest[/\\]processor[/\\]AbstractStringProcessorTestCase.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]ingest[/\\]processor[/\\]AppendProcessorTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]ingest[/\\]processor[/\\]DateFormatTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]ingest[/\\]processor[/\\]DateProcessorTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]ingest[/\\]processor[/\\]GsubProcessorTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]ingest[/\\]processor[/\\]RenameProcessorTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]ingest[/\\]processor[/\\]SetProcessorTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]ingest[/\\]processor[/\\]SplitProcessorTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]mget[/\\]SimpleMgetIT.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]monitor[/\\]jvm[/\\]JvmGcMonitorServiceSettingsTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]monitor[/\\]os[/\\]OsProbeTests.java" checks="LineLength" />
|
||||
|
@ -1161,9 +1148,6 @@
|
|||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]update[/\\]UpdateIT.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]validate[/\\]SimpleValidateQueryIT.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]versioning[/\\]SimpleVersioningIT.java" checks="LineLength" />
|
||||
<suppress files="modules[/\\]ingest-grok[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]ingest[/\\]grok[/\\]Grok.java" checks="LineLength" />
|
||||
<suppress files="modules[/\\]ingest-grok[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]ingest[/\\]grok[/\\]GrokProcessorTests.java" checks="LineLength" />
|
||||
<suppress files="modules[/\\]ingest-grok[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]ingest[/\\]grok[/\\]GrokTests.java" checks="LineLength" />
|
||||
<suppress files="modules[/\\]lang-expression[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]script[/\\]expression[/\\]ExpressionPlugin.java" checks="LineLength" />
|
||||
<suppress files="modules[/\\]lang-expression[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]script[/\\]expression[/\\]ExpressionScriptEngineService.java" checks="LineLength" />
|
||||
<suppress files="modules[/\\]lang-expression[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]script[/\\]expression[/\\]ExpressionSearchScript.java" checks="LineLength" />
|
||||
|
@ -1197,7 +1181,6 @@
|
|||
<suppress files="modules[/\\]lang-groovy[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]script[/\\]groovy[/\\]GroovySecurityTests.java" checks="LineLength" />
|
||||
<suppress files="modules[/\\]lang-mustache[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]script[/\\]mustache[/\\]MustachePlugin.java" checks="LineLength" />
|
||||
<suppress files="modules[/\\]lang-mustache[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]messy[/\\]tests[/\\]RenderSearchTemplateTests.java" checks="LineLength" />
|
||||
<suppress files="modules[/\\]lang-mustache[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]messy[/\\]tests[/\\]SuggestSearchTests.java" checks="LineLength" />
|
||||
<suppress files="modules[/\\]lang-mustache[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]messy[/\\]tests[/\\]TemplateQueryParserTests.java" checks="LineLength" />
|
||||
<suppress files="modules[/\\]lang-mustache[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]messy[/\\]tests[/\\]TemplateQueryTests.java" checks="LineLength" />
|
||||
<suppress files="modules[/\\]lang-mustache[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]messy[/\\]tests[/\\]package-info.java" checks="LineLength" />
|
||||
|
@ -1279,14 +1262,6 @@
|
|||
<suppress files="plugins[/\\]repository-s3[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]cloud[/\\]aws[/\\]blobstore[/\\]MockDefaultS3OutputStream.java" checks="LineLength" />
|
||||
<suppress files="plugins[/\\]repository-s3[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]repositories[/\\]s3[/\\]AbstractS3SnapshotRestoreTest.java" checks="LineLength" />
|
||||
<suppress files="plugins[/\\]store-smb[/\\]src[/\\]main[/\\]java[/\\]org[/\\]apache[/\\]lucene[/\\]store[/\\]SmbDirectoryWrapper.java" checks="LineLength" />
|
||||
<suppress files="qa[/\\]evil-tests[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]bootstrap[/\\]ESPolicyUnitTests.java" checks="LineLength" />
|
||||
<suppress files="qa[/\\]evil-tests[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]bootstrap[/\\]EvilSecurityTests.java" checks="LineLength" />
|
||||
<suppress files="qa[/\\]evil-tests[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]cli[/\\]CheckFileCommandTests.java" checks="LineLength" />
|
||||
<suppress files="qa[/\\]evil-tests[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]tribe[/\\]TribeUnitTests.java" checks="LineLength" />
|
||||
<suppress files="qa[/\\]smoke-test-client[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]smoketest[/\\]ESSmokeClientTestCase.java" checks="LineLength" />
|
||||
<suppress files="qa[/\\]smoke-test-ingest-with-all-dependencies[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]ingest[/\\]CombineProcessorsTests.java" checks="LineLength" />
|
||||
<suppress files="qa[/\\]smoke-test-ingest-with-all-dependencies[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]ingest[/\\]IngestDocumentMustacheIT.java" checks="LineLength" />
|
||||
<suppress files="qa[/\\]smoke-test-ingest-with-all-dependencies[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]ingest[/\\]IngestMustacheSetProcessorIT.java" checks="LineLength" />
|
||||
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]bootstrap[/\\]BootstrapForTesting.java" checks="LineLength" />
|
||||
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]MockInternalClusterInfoService.java" checks="LineLength" />
|
||||
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]routing[/\\]TestShardRouting.java" checks="LineLength" />
|
||||
|
|
|
@ -31,5 +31,3 @@ org.apache.lucene.index.IndexReader#getCombinedCoreAndDeletesKey()
|
|||
|
||||
@defaultMessage Soon to be removed
|
||||
org.apache.lucene.document.FieldType#numericType()
|
||||
|
||||
org.apache.lucene.document.InetAddressPoint#newPrefixQuery(java.lang.String, java.net.InetAddress, int) @LUCENE-7232
|
||||
|
|
|
@ -92,3 +92,8 @@ org.joda.time.DateTime#<init>(int, int, int, int, int, int)
|
|||
org.joda.time.DateTime#<init>(int, int, int, int, int, int, int)
|
||||
org.joda.time.DateTime#now()
|
||||
org.joda.time.DateTimeZone#getDefault()
|
||||
|
||||
@defaultMessage Don't use MethodHandles in slow ways, except in tests.
|
||||
java.lang.invoke.MethodHandle#invoke(java.lang.Object[])
|
||||
java.lang.invoke.MethodHandle#invokeWithArguments(java.lang.Object[])
|
||||
java.lang.invoke.MethodHandle#invokeWithArguments(java.util.List)
|
||||
|
|
|
@ -21,5 +21,7 @@ com.carrotsearch.randomizedtesting.annotations.Repeat @ Don't commit hardcoded r
|
|||
org.apache.lucene.codecs.Codec#setDefault(org.apache.lucene.codecs.Codec) @ Use the SuppressCodecs("*") annotation instead
|
||||
org.apache.lucene.util.LuceneTestCase$Slow @ Don't write slow tests
|
||||
org.junit.Ignore @ Use AwaitsFix instead
|
||||
org.apache.lucene.util.LuceneTestCase$Nightly @ We don't run nightly tests at this point!
|
||||
com.carrotsearch.randomizedtesting.annotations.Nightly @ We don't run nightly tests at this point!
|
||||
|
||||
org.junit.Test @defaultMessage Just name your test method testFooBar
|
||||
|
|
|
@ -17,9 +17,7 @@
|
|||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.test.test;
|
||||
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
package org.elasticsearch.test;
|
||||
|
||||
import junit.framework.TestCase;
|
||||
|
||||
|
@ -30,21 +28,35 @@ public class NamingConventionsCheckBadClasses {
|
|||
public static final class NotImplementingTests {
|
||||
}
|
||||
|
||||
public static final class WrongName extends ESTestCase {
|
||||
public static final class WrongName extends UnitTestCase {
|
||||
/*
|
||||
* Dummy test so the tests pass. We do this *and* skip the tests so anyone who jumps back to a branch without these tests can still
|
||||
* compile without a failure. That is because clean doesn't actually clean these....
|
||||
*/
|
||||
public void testDummy() {}
|
||||
}
|
||||
|
||||
public static abstract class DummyAbstractTests extends ESTestCase {
|
||||
public static abstract class DummyAbstractTests extends UnitTestCase {
|
||||
}
|
||||
|
||||
public interface DummyInterfaceTests {
|
||||
}
|
||||
|
||||
public static final class InnerTests extends ESTestCase {
|
||||
public static final class InnerTests extends UnitTestCase {
|
||||
public void testDummy() {}
|
||||
}
|
||||
|
||||
public static final class WrongNameTheSecond extends ESTestCase {
|
||||
public static final class WrongNameTheSecond extends UnitTestCase {
|
||||
public void testDummy() {}
|
||||
}
|
||||
|
||||
public static final class PlainUnit extends TestCase {
|
||||
public void testDummy() {}
|
||||
}
|
||||
|
||||
public abstract static class UnitTestCase extends TestCase {
|
||||
}
|
||||
|
||||
public abstract static class IntegTestCase extends UnitTestCase {
|
||||
}
|
||||
}
|
|
@ -1,5 +1,5 @@
|
|||
elasticsearch = 5.0.0-alpha3
|
||||
lucene = 6.0.0
|
||||
elasticsearch = 5.0.0-alpha4
|
||||
lucene = 6.1.0-snapshot-3a57bea
|
||||
|
||||
# optional dependencies
|
||||
spatial4j = 0.6
|
||||
|
|
|
@ -63,7 +63,7 @@ dependencies {
|
|||
compile 'com.carrotsearch:hppc:0.7.1'
|
||||
|
||||
// time handling, remove with java 8 time
|
||||
compile 'joda-time:joda-time:2.8.2'
|
||||
compile 'joda-time:joda-time:2.9.4'
|
||||
// joda 2.0 moved to using volatile fields for datetime
|
||||
// When updating to a new version, make sure to update our copy of BaseDateTime
|
||||
compile 'org.joda:joda-convert:1.2'
|
||||
|
|
|
@ -1,117 +0,0 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.lucene.document;
|
||||
|
||||
import java.net.InetAddress;
|
||||
import java.net.UnknownHostException;
|
||||
import java.util.Arrays;
|
||||
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.apache.lucene.util.NumericUtils;
|
||||
import org.elasticsearch.common.SuppressForbidden;
|
||||
|
||||
/**
|
||||
* Forked utility methods from Lucene's InetAddressPoint until LUCENE-7232 and
|
||||
* LUCENE-7234 are released.
|
||||
*/
|
||||
// TODO: remove me when we upgrade to Lucene 6.1
|
||||
@SuppressForbidden(reason="uses InetAddress.getHostAddress")
|
||||
public final class XInetAddressPoint {
|
||||
|
||||
private XInetAddressPoint() {}
|
||||
|
||||
/** The minimum value that an ip address can hold. */
|
||||
public static final InetAddress MIN_VALUE;
|
||||
/** The maximum value that an ip address can hold. */
|
||||
public static final InetAddress MAX_VALUE;
|
||||
static {
|
||||
MIN_VALUE = InetAddressPoint.decode(new byte[InetAddressPoint.BYTES]);
|
||||
byte[] maxValueBytes = new byte[InetAddressPoint.BYTES];
|
||||
Arrays.fill(maxValueBytes, (byte) 0xFF);
|
||||
MAX_VALUE = InetAddressPoint.decode(maxValueBytes);
|
||||
}
|
||||
|
||||
/**
|
||||
* Return the {@link InetAddress} that compares immediately greater than
|
||||
* {@code address}.
|
||||
* @throws ArithmeticException if the provided address is the
|
||||
* {@link #MAX_VALUE maximum ip address}
|
||||
*/
|
||||
public static InetAddress nextUp(InetAddress address) {
|
||||
if (address.equals(MAX_VALUE)) {
|
||||
throw new ArithmeticException("Overflow: there is no greater InetAddress than "
|
||||
+ address.getHostAddress());
|
||||
}
|
||||
byte[] delta = new byte[InetAddressPoint.BYTES];
|
||||
delta[InetAddressPoint.BYTES-1] = 1;
|
||||
byte[] nextUpBytes = new byte[InetAddressPoint.BYTES];
|
||||
NumericUtils.add(InetAddressPoint.BYTES, 0, InetAddressPoint.encode(address), delta, nextUpBytes);
|
||||
return InetAddressPoint.decode(nextUpBytes);
|
||||
}
|
||||
|
||||
/**
|
||||
* Return the {@link InetAddress} that compares immediately less than
|
||||
* {@code address}.
|
||||
* @throws ArithmeticException if the provided address is the
|
||||
* {@link #MIN_VALUE minimum ip address}
|
||||
*/
|
||||
public static InetAddress nextDown(InetAddress address) {
|
||||
if (address.equals(MIN_VALUE)) {
|
||||
throw new ArithmeticException("Underflow: there is no smaller InetAddress than "
|
||||
+ address.getHostAddress());
|
||||
}
|
||||
byte[] delta = new byte[InetAddressPoint.BYTES];
|
||||
delta[InetAddressPoint.BYTES-1] = 1;
|
||||
byte[] nextDownBytes = new byte[InetAddressPoint.BYTES];
|
||||
NumericUtils.subtract(InetAddressPoint.BYTES, 0, InetAddressPoint.encode(address), delta, nextDownBytes);
|
||||
return InetAddressPoint.decode(nextDownBytes);
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a prefix query for matching a CIDR network range.
|
||||
*
|
||||
* @param field field name. must not be {@code null}.
|
||||
* @param value any host address
|
||||
* @param prefixLength the network prefix length for this address. This is also known as the subnet mask in the context of IPv4
|
||||
* addresses.
|
||||
* @throws IllegalArgumentException if {@code field} is null, or prefixLength is invalid.
|
||||
* @return a query matching documents with addresses contained within this network
|
||||
*/
|
||||
// TODO: remove me when we upgrade to Lucene 6.0.1
|
||||
public static Query newPrefixQuery(String field, InetAddress value, int prefixLength) {
|
||||
if (value == null) {
|
||||
throw new IllegalArgumentException("InetAddress must not be null");
|
||||
}
|
||||
if (prefixLength < 0 || prefixLength > 8 * value.getAddress().length) {
|
||||
throw new IllegalArgumentException("illegal prefixLength '" + prefixLength
|
||||
+ "'. Must be 0-32 for IPv4 ranges, 0-128 for IPv6 ranges");
|
||||
}
|
||||
// create the lower value by zeroing out the host portion, upper value by filling it with all ones.
|
||||
byte lower[] = value.getAddress();
|
||||
byte upper[] = value.getAddress();
|
||||
for (int i = prefixLength; i < 8 * lower.length; i++) {
|
||||
int m = 1 << (7 - (i & 7));
|
||||
lower[i >> 3] &= ~m;
|
||||
upper[i >> 3] |= m;
|
||||
}
|
||||
try {
|
||||
return InetAddressPoint.newRangeQuery(field, InetAddress.getByAddress(lower), InetAddress.getByAddress(upper));
|
||||
} catch (UnknownHostException e) {
|
||||
throw new AssertionError(e); // values are coming from InetAddress
|
||||
}
|
||||
}
|
||||
}
|
|
@ -283,7 +283,7 @@ public abstract class BlendedTermQuery extends Query {
|
|||
@Override
|
||||
public boolean equals(Object o) {
|
||||
if (this == o) return true;
|
||||
if (!super.equals(o)) return false;
|
||||
if (sameClassAs(o) == false) return false;
|
||||
|
||||
BlendedTermQuery that = (BlendedTermQuery) o;
|
||||
return Arrays.equals(equalsTerms(), that.equalsTerms());
|
||||
|
@ -291,7 +291,7 @@ public abstract class BlendedTermQuery extends Query {
|
|||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(super.hashCode(), Arrays.hashCode(equalsTerms()));
|
||||
return Objects.hash(classHash(), Arrays.hashCode(equalsTerms()));
|
||||
}
|
||||
|
||||
public static BlendedTermQuery booleanBlendedQuery(Term[] terms, final boolean disableCoord) {
|
||||
|
|
|
@ -44,12 +44,12 @@ public final class MinDocQuery extends Query {
|
|||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(super.hashCode(), minDoc);
|
||||
return Objects.hash(classHash(), minDoc);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if (super.equals(obj) == false) {
|
||||
if (sameClassAs(obj) == false) {
|
||||
return false;
|
||||
}
|
||||
MinDocQuery that = (MinDocQuery) obj;
|
||||
|
|
|
@ -63,9 +63,6 @@ import org.elasticsearch.common.io.PathUtils;
|
|||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.io.OutputStream;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Path;
|
||||
import java.nio.file.Paths;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
|
@ -622,8 +619,12 @@ public long ramBytesUsed() {
|
|||
Set<BytesRef> seenSurfaceForms = new HashSet<>();
|
||||
|
||||
int dedup = 0;
|
||||
while (reader.read(scratch)) {
|
||||
input.reset(scratch.bytes(), 0, scratch.length());
|
||||
while (true) {
|
||||
BytesRef bytes = reader.next();
|
||||
if (bytes == null) {
|
||||
break;
|
||||
}
|
||||
input.reset(bytes.bytes, bytes.offset, bytes.length);
|
||||
short analyzedLength = input.readShort();
|
||||
analyzed.grow(analyzedLength+2);
|
||||
input.readBytes(analyzed.bytes(), 0, analyzedLength);
|
||||
|
@ -631,13 +632,13 @@ public long ramBytesUsed() {
|
|||
|
||||
long cost = input.readInt();
|
||||
|
||||
surface.bytes = scratch.bytes();
|
||||
surface.bytes = bytes.bytes;
|
||||
if (hasPayloads) {
|
||||
surface.length = input.readShort();
|
||||
surface.offset = input.getPosition();
|
||||
} else {
|
||||
surface.offset = input.getPosition();
|
||||
surface.length = scratch.length() - surface.offset;
|
||||
surface.length = bytes.length - surface.offset;
|
||||
}
|
||||
|
||||
if (previousAnalyzed == null) {
|
||||
|
@ -679,11 +680,11 @@ public long ramBytesUsed() {
|
|||
builder.add(scratchInts.get(), outputs.newPair(cost, BytesRef.deepCopyOf(surface)));
|
||||
} else {
|
||||
int payloadOffset = input.getPosition() + surface.length;
|
||||
int payloadLength = scratch.length() - payloadOffset;
|
||||
int payloadLength = bytes.length - payloadOffset;
|
||||
BytesRef br = new BytesRef(surface.length + 1 + payloadLength);
|
||||
System.arraycopy(surface.bytes, surface.offset, br.bytes, 0, surface.length);
|
||||
br.bytes[surface.length] = (byte) payloadSep;
|
||||
System.arraycopy(scratch.bytes(), payloadOffset, br.bytes, surface.length+1, payloadLength);
|
||||
System.arraycopy(bytes.bytes, payloadOffset, br.bytes, surface.length+1, payloadLength);
|
||||
br.length = br.bytes.length;
|
||||
builder.add(scratchInts.get(), outputs.newPair(cost, br));
|
||||
}
|
||||
|
|
|
@ -76,7 +76,9 @@ public class Version {
|
|||
public static final Version V_5_0_0_alpha2 = new Version(V_5_0_0_alpha2_ID, org.apache.lucene.util.Version.LUCENE_6_0_0);
|
||||
public static final int V_5_0_0_alpha3_ID = 5000003;
|
||||
public static final Version V_5_0_0_alpha3 = new Version(V_5_0_0_alpha3_ID, org.apache.lucene.util.Version.LUCENE_6_0_0);
|
||||
public static final Version CURRENT = V_5_0_0_alpha3;
|
||||
public static final int V_5_0_0_alpha4_ID = 5000004;
|
||||
public static final Version V_5_0_0_alpha4 = new Version(V_5_0_0_alpha4_ID, org.apache.lucene.util.Version.LUCENE_6_1_0);
|
||||
public static final Version CURRENT = V_5_0_0_alpha4;
|
||||
|
||||
static {
|
||||
assert CURRENT.luceneVersion.equals(org.apache.lucene.util.Version.LATEST) : "Version must be upgraded to ["
|
||||
|
@ -89,6 +91,8 @@ public class Version {
|
|||
|
||||
public static Version fromId(int id) {
|
||||
switch (id) {
|
||||
case V_5_0_0_alpha4_ID:
|
||||
return V_5_0_0_alpha4;
|
||||
case V_5_0_0_alpha3_ID:
|
||||
return V_5_0_0_alpha3;
|
||||
case V_5_0_0_alpha2_ID:
|
||||
|
|
|
@ -32,6 +32,8 @@ import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsAction;
|
|||
import org.elasticsearch.action.admin.cluster.node.stats.TransportNodesStatsAction;
|
||||
import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksAction;
|
||||
import org.elasticsearch.action.admin.cluster.node.tasks.cancel.TransportCancelTasksAction;
|
||||
import org.elasticsearch.action.admin.cluster.node.tasks.get.GetTaskAction;
|
||||
import org.elasticsearch.action.admin.cluster.node.tasks.get.TransportGetTaskAction;
|
||||
import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksAction;
|
||||
import org.elasticsearch.action.admin.cluster.node.tasks.list.TransportListTasksAction;
|
||||
import org.elasticsearch.action.admin.cluster.repositories.delete.DeleteRepositoryAction;
|
||||
|
@ -115,6 +117,8 @@ import org.elasticsearch.action.admin.indices.settings.put.TransportUpdateSettin
|
|||
import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsAction;
|
||||
import org.elasticsearch.action.admin.indices.shards.IndicesShardStoresAction;
|
||||
import org.elasticsearch.action.admin.indices.shards.TransportIndicesShardStoresAction;
|
||||
import org.elasticsearch.action.admin.indices.shrink.ShrinkAction;
|
||||
import org.elasticsearch.action.admin.indices.shrink.TransportShrinkAction;
|
||||
import org.elasticsearch.action.admin.indices.stats.IndicesStatsAction;
|
||||
import org.elasticsearch.action.admin.indices.stats.TransportIndicesStatsAction;
|
||||
import org.elasticsearch.action.admin.indices.template.delete.DeleteIndexTemplateAction;
|
||||
|
@ -139,7 +143,7 @@ import org.elasticsearch.action.delete.TransportDeleteAction;
|
|||
import org.elasticsearch.action.explain.ExplainAction;
|
||||
import org.elasticsearch.action.explain.TransportExplainAction;
|
||||
import org.elasticsearch.action.fieldstats.FieldStatsAction;
|
||||
import org.elasticsearch.action.fieldstats.TransportFieldStatsTransportAction;
|
||||
import org.elasticsearch.action.fieldstats.TransportFieldStatsAction;
|
||||
import org.elasticsearch.action.get.GetAction;
|
||||
import org.elasticsearch.action.get.MultiGetAction;
|
||||
import org.elasticsearch.action.get.TransportGetAction;
|
||||
|
@ -262,6 +266,7 @@ public class ActionModule extends AbstractModule {
|
|||
registerAction(NodesStatsAction.INSTANCE, TransportNodesStatsAction.class);
|
||||
registerAction(NodesHotThreadsAction.INSTANCE, TransportNodesHotThreadsAction.class);
|
||||
registerAction(ListTasksAction.INSTANCE, TransportListTasksAction.class);
|
||||
registerAction(GetTaskAction.INSTANCE, TransportGetTaskAction.class);
|
||||
registerAction(CancelTasksAction.INSTANCE, TransportCancelTasksAction.class);
|
||||
|
||||
registerAction(ClusterAllocationExplainAction.INSTANCE, TransportClusterAllocationExplainAction.class);
|
||||
|
@ -286,6 +291,7 @@ public class ActionModule extends AbstractModule {
|
|||
registerAction(IndicesSegmentsAction.INSTANCE, TransportIndicesSegmentsAction.class);
|
||||
registerAction(IndicesShardStoresAction.INSTANCE, TransportIndicesShardStoresAction.class);
|
||||
registerAction(CreateIndexAction.INSTANCE, TransportCreateIndexAction.class);
|
||||
registerAction(ShrinkAction.INSTANCE, TransportShrinkAction.class);
|
||||
registerAction(DeleteIndexAction.INSTANCE, TransportDeleteIndexAction.class);
|
||||
registerAction(GetIndexAction.INSTANCE, TransportGetIndexAction.class);
|
||||
registerAction(OpenIndexAction.INSTANCE, TransportOpenIndexAction.class);
|
||||
|
@ -338,7 +344,7 @@ public class ActionModule extends AbstractModule {
|
|||
registerAction(GetStoredScriptAction.INSTANCE, TransportGetStoredScriptAction.class);
|
||||
registerAction(DeleteStoredScriptAction.INSTANCE, TransportDeleteStoredScriptAction.class);
|
||||
|
||||
registerAction(FieldStatsAction.INSTANCE, TransportFieldStatsTransportAction.class);
|
||||
registerAction(FieldStatsAction.INSTANCE, TransportFieldStatsAction.class);
|
||||
|
||||
registerAction(PutPipelineAction.INSTANCE, PutPipelineTransportAction.class);
|
||||
registerAction(GetPipelineAction.INSTANCE, GetPipelineTransportAction.class);
|
||||
|
|
|
@ -39,6 +39,9 @@ public abstract class ActionRequest<Request extends ActionRequest<Request>> exte
|
|||
|
||||
public abstract ActionRequestValidationException validate();
|
||||
|
||||
/**
|
||||
* Should this task persist its result after it has finished?
|
||||
*/
|
||||
public boolean getShouldPersistResult() {
|
||||
return false;
|
||||
}
|
||||
|
|
|
@ -18,10 +18,15 @@
|
|||
*/
|
||||
package org.elasticsearch.action;
|
||||
|
||||
import org.elasticsearch.action.support.WriteRequest;
|
||||
import org.elasticsearch.action.support.WriteResponse;
|
||||
import org.elasticsearch.action.support.WriteRequest.RefreshPolicy;
|
||||
import org.elasticsearch.action.support.replication.ReplicationResponse;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.xcontent.StatusToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
import org.elasticsearch.rest.RestStatus;
|
||||
|
||||
|
@ -30,12 +35,13 @@ import java.io.IOException;
|
|||
/**
|
||||
* A base class for the response of a write operation that involves a single doc
|
||||
*/
|
||||
public abstract class DocWriteResponse extends ReplicationResponse implements StatusToXContent {
|
||||
public abstract class DocWriteResponse extends ReplicationResponse implements WriteResponse, StatusToXContent {
|
||||
|
||||
private ShardId shardId;
|
||||
private String id;
|
||||
private String type;
|
||||
private long version;
|
||||
private boolean forcedRefresh;
|
||||
|
||||
public DocWriteResponse(ShardId shardId, String type, String id, long version) {
|
||||
this.shardId = shardId;
|
||||
|
@ -84,6 +90,20 @@ public abstract class DocWriteResponse extends ReplicationResponse implements St
|
|||
return this.version;
|
||||
}
|
||||
|
||||
/**
|
||||
* Did this request force a refresh? Requests that set {@link WriteRequest#setRefreshPolicy(RefreshPolicy)} to
|
||||
* {@link RefreshPolicy#IMMEDIATE} will always return true for this. Requests that set it to {@link RefreshPolicy#WAIT_UNTIL} will
|
||||
* only return true here if they run out of refresh listener slots (see {@link IndexSettings#MAX_REFRESH_LISTENERS_PER_SHARD}).
|
||||
*/
|
||||
public boolean forcedRefresh() {
|
||||
return forcedRefresh;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setForcedRefresh(boolean forcedRefresh) {
|
||||
this.forcedRefresh = forcedRefresh;
|
||||
}
|
||||
|
||||
/** returns the rest status for this response (based on {@link ShardInfo#status()} */
|
||||
public RestStatus status() {
|
||||
return getShardInfo().status();
|
||||
|
@ -97,6 +117,7 @@ public abstract class DocWriteResponse extends ReplicationResponse implements St
|
|||
type = in.readString();
|
||||
id = in.readString();
|
||||
version = in.readZLong();
|
||||
forcedRefresh = in.readBoolean();
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -106,6 +127,7 @@ public abstract class DocWriteResponse extends ReplicationResponse implements St
|
|||
out.writeString(type);
|
||||
out.writeString(id);
|
||||
out.writeZLong(version);
|
||||
out.writeBoolean(forcedRefresh);
|
||||
}
|
||||
|
||||
static final class Fields {
|
||||
|
@ -121,7 +143,8 @@ public abstract class DocWriteResponse extends ReplicationResponse implements St
|
|||
builder.field(Fields._INDEX, shardId.getIndexName())
|
||||
.field(Fields._TYPE, type)
|
||||
.field(Fields._ID, id)
|
||||
.field(Fields._VERSION, version);
|
||||
.field(Fields._VERSION, version)
|
||||
.field("forced_refresh", forcedRefresh);
|
||||
shardInfo.toXContent(builder, params);
|
||||
return builder;
|
||||
}
|
||||
|
|
|
@ -40,7 +40,7 @@ public interface IndicesRequest {
|
|||
*/
|
||||
IndicesOptions indicesOptions();
|
||||
|
||||
static interface Replaceable extends IndicesRequest {
|
||||
interface Replaceable extends IndicesRequest {
|
||||
/**
|
||||
* Sets the indices that the action relates to.
|
||||
*/
|
||||
|
|
|
@ -38,7 +38,6 @@ import org.elasticsearch.cluster.metadata.MetaData;
|
|||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.cluster.routing.RoutingNode;
|
||||
import org.elasticsearch.cluster.routing.RoutingNodes;
|
||||
import org.elasticsearch.cluster.routing.RoutingNodes.RoutingNodesIterator;
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.cluster.routing.UnassignedInfo;
|
||||
import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
|
||||
|
@ -226,10 +225,8 @@ public class TransportClusterAllocationExplainAction
|
|||
// get the existing unassigned info if available
|
||||
UnassignedInfo ui = shard.unassignedInfo();
|
||||
|
||||
RoutingNodesIterator iter = routingNodes.nodes();
|
||||
Map<DiscoveryNode, Decision> nodeToDecision = new HashMap<>();
|
||||
while (iter.hasNext()) {
|
||||
RoutingNode node = iter.next();
|
||||
for (RoutingNode node : routingNodes) {
|
||||
DiscoveryNode discoNode = node.node();
|
||||
if (discoNode.isDataNode()) {
|
||||
Decision d = tryShardOnNode(shard, node, allocation, includeYesDecisions);
|
||||
|
|
|
@ -33,8 +33,6 @@ import org.elasticsearch.common.unit.TimeValue;
|
|||
import java.io.IOException;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
import static org.elasticsearch.common.unit.TimeValue.readTimeValue;
|
||||
|
||||
/**
|
||||
*
|
||||
*/
|
||||
|
@ -160,7 +158,7 @@ public class ClusterHealthRequest extends MasterNodeReadRequest<ClusterHealthReq
|
|||
indices[i] = in.readString();
|
||||
}
|
||||
}
|
||||
timeout = readTimeValue(in);
|
||||
timeout = new TimeValue(in);
|
||||
if (in.readBoolean()) {
|
||||
waitForStatus = ClusterHealthStatus.fromValue(in.readByte());
|
||||
}
|
||||
|
|
|
@ -182,12 +182,12 @@ public class ClusterHealthResponse extends ActionResponse implements StatusToXCo
|
|||
super.readFrom(in);
|
||||
clusterName = in.readString();
|
||||
clusterHealthStatus = ClusterHealthStatus.fromValue(in.readByte());
|
||||
clusterStateHealth = ClusterStateHealth.readClusterHealth(in);
|
||||
clusterStateHealth = new ClusterStateHealth(in);
|
||||
numberOfPendingTasks = in.readInt();
|
||||
timedOut = in.readBoolean();
|
||||
numberOfInFlightFetch = in.readInt();
|
||||
delayedUnassignedShards= in.readInt();
|
||||
taskMaxWaitingTime = TimeValue.readTimeValue(in);
|
||||
taskMaxWaitingTime = new TimeValue(in);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -222,50 +222,48 @@ public class ClusterHealthResponse extends ActionResponse implements StatusToXCo
|
|||
return isTimedOut() ? RestStatus.REQUEST_TIMEOUT : RestStatus.OK;
|
||||
}
|
||||
|
||||
static final class Fields {
|
||||
static final String CLUSTER_NAME = "cluster_name";
|
||||
static final String STATUS = "status";
|
||||
static final String TIMED_OUT = "timed_out";
|
||||
static final String NUMBER_OF_NODES = "number_of_nodes";
|
||||
static final String NUMBER_OF_DATA_NODES = "number_of_data_nodes";
|
||||
static final String NUMBER_OF_PENDING_TASKS = "number_of_pending_tasks";
|
||||
static final String NUMBER_OF_IN_FLIGHT_FETCH = "number_of_in_flight_fetch";
|
||||
static final String DELAYED_UNASSIGNED_SHARDS = "delayed_unassigned_shards";
|
||||
static final String TASK_MAX_WAIT_TIME_IN_QUEUE = "task_max_waiting_in_queue";
|
||||
static final String TASK_MAX_WAIT_TIME_IN_QUEUE_IN_MILLIS = "task_max_waiting_in_queue_millis";
|
||||
static final String ACTIVE_SHARDS_PERCENT_AS_NUMBER = "active_shards_percent_as_number";
|
||||
static final String ACTIVE_SHARDS_PERCENT = "active_shards_percent";
|
||||
static final String ACTIVE_PRIMARY_SHARDS = "active_primary_shards";
|
||||
static final String ACTIVE_SHARDS = "active_shards";
|
||||
static final String RELOCATING_SHARDS = "relocating_shards";
|
||||
static final String INITIALIZING_SHARDS = "initializing_shards";
|
||||
static final String UNASSIGNED_SHARDS = "unassigned_shards";
|
||||
static final String INDICES = "indices";
|
||||
}
|
||||
private static final String CLUSTER_NAME = "cluster_name";
|
||||
private static final String STATUS = "status";
|
||||
private static final String TIMED_OUT = "timed_out";
|
||||
private static final String NUMBER_OF_NODES = "number_of_nodes";
|
||||
private static final String NUMBER_OF_DATA_NODES = "number_of_data_nodes";
|
||||
private static final String NUMBER_OF_PENDING_TASKS = "number_of_pending_tasks";
|
||||
private static final String NUMBER_OF_IN_FLIGHT_FETCH = "number_of_in_flight_fetch";
|
||||
private static final String DELAYED_UNASSIGNED_SHARDS = "delayed_unassigned_shards";
|
||||
private static final String TASK_MAX_WAIT_TIME_IN_QUEUE = "task_max_waiting_in_queue";
|
||||
private static final String TASK_MAX_WAIT_TIME_IN_QUEUE_IN_MILLIS = "task_max_waiting_in_queue_millis";
|
||||
private static final String ACTIVE_SHARDS_PERCENT_AS_NUMBER = "active_shards_percent_as_number";
|
||||
private static final String ACTIVE_SHARDS_PERCENT = "active_shards_percent";
|
||||
private static final String ACTIVE_PRIMARY_SHARDS = "active_primary_shards";
|
||||
private static final String ACTIVE_SHARDS = "active_shards";
|
||||
private static final String RELOCATING_SHARDS = "relocating_shards";
|
||||
private static final String INITIALIZING_SHARDS = "initializing_shards";
|
||||
private static final String UNASSIGNED_SHARDS = "unassigned_shards";
|
||||
private static final String INDICES = "indices";
|
||||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.field(Fields.CLUSTER_NAME, getClusterName());
|
||||
builder.field(Fields.STATUS, getStatus().name().toLowerCase(Locale.ROOT));
|
||||
builder.field(Fields.TIMED_OUT, isTimedOut());
|
||||
builder.field(Fields.NUMBER_OF_NODES, getNumberOfNodes());
|
||||
builder.field(Fields.NUMBER_OF_DATA_NODES, getNumberOfDataNodes());
|
||||
builder.field(Fields.ACTIVE_PRIMARY_SHARDS, getActivePrimaryShards());
|
||||
builder.field(Fields.ACTIVE_SHARDS, getActiveShards());
|
||||
builder.field(Fields.RELOCATING_SHARDS, getRelocatingShards());
|
||||
builder.field(Fields.INITIALIZING_SHARDS, getInitializingShards());
|
||||
builder.field(Fields.UNASSIGNED_SHARDS, getUnassignedShards());
|
||||
builder.field(Fields.DELAYED_UNASSIGNED_SHARDS, getDelayedUnassignedShards());
|
||||
builder.field(Fields.NUMBER_OF_PENDING_TASKS, getNumberOfPendingTasks());
|
||||
builder.field(Fields.NUMBER_OF_IN_FLIGHT_FETCH, getNumberOfInFlightFetch());
|
||||
builder.timeValueField(Fields.TASK_MAX_WAIT_TIME_IN_QUEUE_IN_MILLIS, Fields.TASK_MAX_WAIT_TIME_IN_QUEUE, getTaskMaxWaitingTime());
|
||||
builder.percentageField(Fields.ACTIVE_SHARDS_PERCENT_AS_NUMBER, Fields.ACTIVE_SHARDS_PERCENT, getActiveShardsPercent());
|
||||
builder.field(CLUSTER_NAME, getClusterName());
|
||||
builder.field(STATUS, getStatus().name().toLowerCase(Locale.ROOT));
|
||||
builder.field(TIMED_OUT, isTimedOut());
|
||||
builder.field(NUMBER_OF_NODES, getNumberOfNodes());
|
||||
builder.field(NUMBER_OF_DATA_NODES, getNumberOfDataNodes());
|
||||
builder.field(ACTIVE_PRIMARY_SHARDS, getActivePrimaryShards());
|
||||
builder.field(ACTIVE_SHARDS, getActiveShards());
|
||||
builder.field(RELOCATING_SHARDS, getRelocatingShards());
|
||||
builder.field(INITIALIZING_SHARDS, getInitializingShards());
|
||||
builder.field(UNASSIGNED_SHARDS, getUnassignedShards());
|
||||
builder.field(DELAYED_UNASSIGNED_SHARDS, getDelayedUnassignedShards());
|
||||
builder.field(NUMBER_OF_PENDING_TASKS, getNumberOfPendingTasks());
|
||||
builder.field(NUMBER_OF_IN_FLIGHT_FETCH, getNumberOfInFlightFetch());
|
||||
builder.timeValueField(TASK_MAX_WAIT_TIME_IN_QUEUE_IN_MILLIS, TASK_MAX_WAIT_TIME_IN_QUEUE, getTaskMaxWaitingTime());
|
||||
builder.percentageField(ACTIVE_SHARDS_PERCENT_AS_NUMBER, ACTIVE_SHARDS_PERCENT, getActiveShardsPercent());
|
||||
|
||||
String level = params.param("level", "cluster");
|
||||
boolean outputIndices = "indices".equals(level) || "shards".equals(level);
|
||||
|
||||
if (outputIndices) {
|
||||
builder.startObject(Fields.INDICES);
|
||||
builder.startObject(INDICES);
|
||||
for (ClusterIndexHealth indexHealth : clusterStateHealth.getIndices().values()) {
|
||||
builder.startObject(indexHealth.getIndex());
|
||||
indexHealth.toXContent(builder, params);
|
||||
|
|
|
@ -101,7 +101,7 @@ public class NodesHotThreadsRequest extends BaseNodesRequest<NodesHotThreadsRequ
|
|||
threads = in.readInt();
|
||||
ignoreIdleThreads = in.readBoolean();
|
||||
type = in.readString();
|
||||
interval = TimeValue.readTimeValue(in);
|
||||
interval = new TimeValue(in);
|
||||
snapshots = in.readInt();
|
||||
}
|
||||
|
||||
|
|
|
@ -38,7 +38,8 @@ public final class TransportLivenessAction implements TransportRequestHandler<Li
|
|||
ClusterService clusterService, TransportService transportService) {
|
||||
this.clusterService = clusterService;
|
||||
this.clusterName = clusterName;
|
||||
transportService.registerRequestHandler(NAME, LivenessRequest::new, ThreadPool.Names.SAME, this);
|
||||
transportService.registerRequestHandler(NAME, LivenessRequest::new, ThreadPool.Names.SAME,
|
||||
false, false /*can not trip circuit breaker*/, this);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -22,7 +22,7 @@ package org.elasticsearch.action.admin.cluster.node.tasks.cancel;
|
|||
import org.elasticsearch.action.FailedNodeException;
|
||||
import org.elasticsearch.action.TaskOperationFailure;
|
||||
import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksResponse;
|
||||
import org.elasticsearch.action.admin.cluster.node.tasks.list.TaskInfo;
|
||||
import org.elasticsearch.tasks.TaskInfo;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
|
|
|
@ -22,7 +22,6 @@ package org.elasticsearch.action.admin.cluster.node.tasks.cancel;
|
|||
import org.elasticsearch.ResourceNotFoundException;
|
||||
import org.elasticsearch.action.FailedNodeException;
|
||||
import org.elasticsearch.action.TaskOperationFailure;
|
||||
import org.elasticsearch.action.admin.cluster.node.tasks.list.TaskInfo;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.tasks.TransportTasksAction;
|
||||
import org.elasticsearch.cluster.ClusterName;
|
||||
|
@ -36,6 +35,7 @@ import org.elasticsearch.common.io.stream.StreamOutput;
|
|||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.tasks.CancellableTask;
|
||||
import org.elasticsearch.tasks.TaskId;
|
||||
import org.elasticsearch.tasks.TaskInfo;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.EmptyTransportResponseHandler;
|
||||
import org.elasticsearch.transport.TransportChannel;
|
||||
|
|
|
@ -0,0 +1,46 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.admin.cluster.node.tasks.get;
|
||||
|
||||
import org.elasticsearch.action.Action;
|
||||
import org.elasticsearch.client.ElasticsearchClient;
|
||||
|
||||
/**
|
||||
* Action for retrieving a list of currently running tasks
|
||||
*/
|
||||
public class GetTaskAction extends Action<GetTaskRequest, GetTaskResponse, GetTaskRequestBuilder> {
|
||||
|
||||
public static final GetTaskAction INSTANCE = new GetTaskAction();
|
||||
public static final String NAME = "cluster:monitor/task/get";
|
||||
|
||||
private GetTaskAction() {
|
||||
super(NAME);
|
||||
}
|
||||
|
||||
@Override
|
||||
public GetTaskResponse newResponse() {
|
||||
return new GetTaskResponse();
|
||||
}
|
||||
|
||||
@Override
|
||||
public GetTaskRequestBuilder newRequestBuilder(ElasticsearchClient client) {
|
||||
return new GetTaskRequestBuilder(client, this);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,119 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.admin.cluster.node.tasks.get;
|
||||
|
||||
import org.elasticsearch.action.ActionRequest;
|
||||
import org.elasticsearch.action.ActionRequestValidationException;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.tasks.TaskId;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import static org.elasticsearch.action.ValidateActions.addValidationError;
|
||||
|
||||
/**
|
||||
* A request to get node tasks
|
||||
*/
|
||||
public class GetTaskRequest extends ActionRequest<GetTaskRequest> {
|
||||
private TaskId taskId = TaskId.EMPTY_TASK_ID;
|
||||
private boolean waitForCompletion = false;
|
||||
private TimeValue timeout = null;
|
||||
|
||||
/**
|
||||
* Get the TaskId to look up.
|
||||
*/
|
||||
public TaskId getTaskId() {
|
||||
return taskId;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the TaskId to look up. Required.
|
||||
*/
|
||||
public GetTaskRequest setTaskId(TaskId taskId) {
|
||||
this.taskId = taskId;
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Should this request wait for all found tasks to complete?
|
||||
*/
|
||||
public boolean getWaitForCompletion() {
|
||||
return waitForCompletion;
|
||||
}
|
||||
|
||||
/**
|
||||
* Should this request wait for all found tasks to complete?
|
||||
*/
|
||||
public GetTaskRequest setWaitForCompletion(boolean waitForCompletion) {
|
||||
this.waitForCompletion = waitForCompletion;
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Timeout to wait for any async actions this request must take. It must take anywhere from 0 to 2.
|
||||
*/
|
||||
public TimeValue getTimeout() {
|
||||
return timeout;
|
||||
}
|
||||
|
||||
/**
|
||||
* Timeout to wait for any async actions this request must take. It must take anywhere from 0 to 2.
|
||||
*/
|
||||
public GetTaskRequest setTimeout(TimeValue timeout) {
|
||||
this.timeout = timeout;
|
||||
return this;
|
||||
}
|
||||
|
||||
GetTaskRequest nodeRequest(String thisNodeId, long thisTaskId) {
|
||||
GetTaskRequest copy = new GetTaskRequest();
|
||||
copy.setParentTask(thisNodeId, thisTaskId);
|
||||
copy.setTaskId(taskId);
|
||||
copy.setTimeout(timeout);
|
||||
copy.setWaitForCompletion(waitForCompletion);
|
||||
return copy;
|
||||
}
|
||||
|
||||
@Override
|
||||
public ActionRequestValidationException validate() {
|
||||
ActionRequestValidationException validationException = null;
|
||||
if (false == getTaskId().isSet()) {
|
||||
validationException = addValidationError("task id is required", validationException);
|
||||
}
|
||||
return validationException;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
taskId = TaskId.readFromStream(in);
|
||||
timeout = in.readOptionalWriteable(TimeValue::new);
|
||||
waitForCompletion = in.readBoolean();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
taskId.writeTo(out);
|
||||
out.writeOptionalWriteable(timeout);
|
||||
out.writeBoolean(waitForCompletion);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,58 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.admin.cluster.node.tasks.get;
|
||||
|
||||
import org.elasticsearch.action.ActionRequestBuilder;
|
||||
import org.elasticsearch.client.ElasticsearchClient;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.tasks.TaskId;
|
||||
|
||||
/**
|
||||
* Builder for the request to retrieve the list of tasks running on the specified nodes
|
||||
*/
|
||||
public class GetTaskRequestBuilder extends ActionRequestBuilder<GetTaskRequest, GetTaskResponse, GetTaskRequestBuilder> {
|
||||
public GetTaskRequestBuilder(ElasticsearchClient client, GetTaskAction action) {
|
||||
super(client, action, new GetTaskRequest());
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the TaskId to look up. Required.
|
||||
*/
|
||||
public final GetTaskRequestBuilder setTaskId(TaskId taskId) {
|
||||
request.setTaskId(taskId);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Should this request wait for all found tasks to complete?
|
||||
*/
|
||||
public final GetTaskRequestBuilder setWaitForCompletion(boolean waitForCompletion) {
|
||||
request.setWaitForCompletion(waitForCompletion);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Timeout to wait for any async actions this request must take. It must take anywhere from 0 to 2.
|
||||
*/
|
||||
public final GetTaskRequestBuilder setTimeout(TimeValue timeout) {
|
||||
request.setTimeout(timeout);
|
||||
return this;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,75 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.admin.cluster.node.tasks.get;
|
||||
|
||||
import org.elasticsearch.action.ActionResponse;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.tasks.PersistedTaskInfo;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import static java.util.Objects.requireNonNull;
|
||||
|
||||
/**
|
||||
* Returns the list of tasks currently running on the nodes
|
||||
*/
|
||||
public class GetTaskResponse extends ActionResponse implements ToXContent {
|
||||
private PersistedTaskInfo task;
|
||||
|
||||
public GetTaskResponse() {
|
||||
}
|
||||
|
||||
public GetTaskResponse(PersistedTaskInfo task) {
|
||||
this.task = requireNonNull(task, "task is required");
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
task = in.readOptionalWriteable(PersistedTaskInfo::new);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
out.writeOptionalWriteable(task);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the actual result of the fetch.
|
||||
*/
|
||||
public PersistedTaskInfo getTask() {
|
||||
return task;
|
||||
}
|
||||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
return task.innerToXContent(builder, params);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return Strings.toString(this);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,216 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.admin.cluster.node.tasks.get;
|
||||
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.ExceptionsHelper;
|
||||
import org.elasticsearch.ResourceNotFoundException;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.get.GetRequest;
|
||||
import org.elasticsearch.action.get.GetResponse;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.HandledTransportAction;
|
||||
import org.elasticsearch.client.Client;
|
||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.common.ParseFieldMatcher;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.util.concurrent.AbstractRunnable;
|
||||
import org.elasticsearch.common.xcontent.XContentHelper;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.index.IndexNotFoundException;
|
||||
import org.elasticsearch.tasks.Task;
|
||||
import org.elasticsearch.tasks.TaskId;
|
||||
import org.elasticsearch.tasks.PersistedTaskInfo;
|
||||
import org.elasticsearch.tasks.TaskPersistenceService;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.BaseTransportResponseHandler;
|
||||
import org.elasticsearch.transport.TransportException;
|
||||
import org.elasticsearch.transport.TransportRequestOptions;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import static org.elasticsearch.action.admin.cluster.node.tasks.list.TransportListTasksAction.waitForCompletionTimeout;
|
||||
import static org.elasticsearch.action.admin.cluster.node.tasks.list.TransportListTasksAction.waitForTaskCompletion;
|
||||
|
||||
/**
|
||||
* Action to get a single task. If the task isn't running then it'll try to request the status from request index.
|
||||
*
|
||||
* The general flow is:
|
||||
* <ul>
|
||||
* <li>If this isn't being executed on the node to which the requested TaskId belongs then move to that node.
|
||||
* <li>Look up the task and return it if it exists
|
||||
* <li>If it doesn't then look up the task from the results index
|
||||
* </ul>
|
||||
*/
|
||||
public class TransportGetTaskAction extends HandledTransportAction<GetTaskRequest, GetTaskResponse> {
|
||||
private final ClusterService clusterService;
|
||||
private final TransportService transportService;
|
||||
private final Client client;
|
||||
|
||||
@Inject
|
||||
public TransportGetTaskAction(Settings settings, ThreadPool threadPool, TransportService transportService, ActionFilters actionFilters,
|
||||
IndexNameExpressionResolver indexNameExpressionResolver, ClusterService clusterService, Client client) {
|
||||
super(settings, GetTaskAction.NAME, threadPool, transportService, actionFilters, indexNameExpressionResolver, GetTaskRequest::new);
|
||||
this.clusterService = clusterService;
|
||||
this.transportService = transportService;
|
||||
this.client = client;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doExecute(GetTaskRequest request, ActionListener<GetTaskResponse> listener) {
|
||||
throw new UnsupportedOperationException("Task is required");
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doExecute(Task thisTask, GetTaskRequest request, ActionListener<GetTaskResponse> listener) {
|
||||
if (clusterService.localNode().getId().equals(request.getTaskId().getNodeId())) {
|
||||
getRunningTaskFromNode(thisTask, request, listener);
|
||||
} else {
|
||||
runOnNodeWithTaskIfPossible(thisTask, request, listener);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Executed on the coordinating node to forward execution of the remaining work to the node that matches that requested
|
||||
* {@link TaskId#getNodeId()}. If the node isn't in the cluster then this will just proceed to
|
||||
* {@link #getFinishedTaskFromIndex(Task, GetTaskRequest, ActionListener)} on this node.
|
||||
*/
|
||||
private void runOnNodeWithTaskIfPossible(Task thisTask, GetTaskRequest request, ActionListener<GetTaskResponse> listener) {
|
||||
TransportRequestOptions.Builder builder = TransportRequestOptions.builder();
|
||||
if (request.getTimeout() != null) {
|
||||
builder.withTimeout(request.getTimeout());
|
||||
}
|
||||
builder.withCompress(false);
|
||||
DiscoveryNode node = clusterService.state().nodes().get(request.getTaskId().getNodeId());
|
||||
if (node == null) {
|
||||
// Node is no longer part of the cluster! Try and look the task up from the results index.
|
||||
getFinishedTaskFromIndex(thisTask, request, listener);
|
||||
return;
|
||||
}
|
||||
GetTaskRequest nodeRequest = request.nodeRequest(clusterService.localNode().getId(), thisTask.getId());
|
||||
taskManager.registerChildTask(thisTask, node.getId());
|
||||
transportService.sendRequest(node, GetTaskAction.NAME, nodeRequest, builder.build(),
|
||||
new BaseTransportResponseHandler<GetTaskResponse>() {
|
||||
@Override
|
||||
public GetTaskResponse newInstance() {
|
||||
return new GetTaskResponse();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void handleResponse(GetTaskResponse response) {
|
||||
listener.onResponse(response);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void handleException(TransportException exp) {
|
||||
listener.onFailure(exp);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String executor() {
|
||||
return ThreadPool.Names.SAME;
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Executed on the node that should be running the task to find and return the running task. Falls back to
|
||||
* {@link #getFinishedTaskFromIndex(Task, GetTaskRequest, ActionListener)} if the task isn't still running.
|
||||
*/
|
||||
void getRunningTaskFromNode(Task thisTask, GetTaskRequest request, ActionListener<GetTaskResponse> listener) {
|
||||
Task runningTask = taskManager.getTask(request.getTaskId().getId());
|
||||
if (runningTask == null) {
|
||||
getFinishedTaskFromIndex(thisTask, request, listener);
|
||||
} else {
|
||||
if (request.getWaitForCompletion()) {
|
||||
// Shift to the generic thread pool and let it wait for the task to complete so we don't block any important threads.
|
||||
threadPool.generic().execute(new AbstractRunnable() {
|
||||
@Override
|
||||
protected void doRun() throws Exception {
|
||||
waitForTaskCompletion(taskManager, runningTask, waitForCompletionTimeout(request.getTimeout()));
|
||||
// TODO look up the task's result from the .tasks index now that it is done
|
||||
listener.onResponse(
|
||||
new GetTaskResponse(new PersistedTaskInfo(runningTask.taskInfo(clusterService.localNode(), true))));
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Throwable t) {
|
||||
listener.onFailure(t);
|
||||
}
|
||||
});
|
||||
} else {
|
||||
listener.onResponse(new GetTaskResponse(new PersistedTaskInfo(runningTask.taskInfo(clusterService.localNode(), true))));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Send a {@link GetRequest} to the results index looking for the results of the task. It'll only be found only if the task's result was
|
||||
* persisted. Called on the node that once had the task if that node is part of the cluster or on the coordinating node if the node
|
||||
* wasn't part of the cluster.
|
||||
*/
|
||||
void getFinishedTaskFromIndex(Task thisTask, GetTaskRequest request, ActionListener<GetTaskResponse> listener) {
|
||||
GetRequest get = new GetRequest(TaskPersistenceService.TASK_INDEX, TaskPersistenceService.TASK_TYPE,
|
||||
request.getTaskId().toString());
|
||||
get.setParentTask(clusterService.localNode().getId(), thisTask.getId());
|
||||
client.get(get, new ActionListener<GetResponse>() {
|
||||
@Override
|
||||
public void onResponse(GetResponse getResponse) {
|
||||
try {
|
||||
onGetFinishedTaskFromIndex(getResponse, listener);
|
||||
} catch (Throwable e) {
|
||||
listener.onFailure(e);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Throwable e) {
|
||||
if (ExceptionsHelper.unwrap(e, IndexNotFoundException.class) != null) {
|
||||
// We haven't yet created the index for the task results so it can't be found.
|
||||
listener.onFailure(new ResourceNotFoundException("task [{}] isn't running or persisted", e, request.getTaskId()));
|
||||
} else {
|
||||
listener.onFailure(e);
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Called with the {@linkplain GetResponse} from loading the task from the results index. Called on the node that once had the task if
|
||||
* that node is part of the cluster or on the coordinating node if the node wasn't part of the cluster.
|
||||
*/
|
||||
void onGetFinishedTaskFromIndex(GetResponse response, ActionListener<GetTaskResponse> listener) throws IOException {
|
||||
if (false == response.isExists()) {
|
||||
listener.onFailure(new ResourceNotFoundException("task [{}] isn't running or persisted", response.getId()));
|
||||
}
|
||||
if (response.isSourceEmpty()) {
|
||||
listener.onFailure(new ElasticsearchException("Stored task status for [{}] didn't contain any source!", response.getId()));
|
||||
return;
|
||||
}
|
||||
try (XContentParser parser = XContentHelper.createParser(response.getSourceAsBytesRef())) {
|
||||
PersistedTaskInfo result = PersistedTaskInfo.PARSER.apply(parser, () -> ParseFieldMatcher.STRICT);
|
||||
listener.onResponse(new GetTaskResponse(result));
|
||||
}
|
||||
}
|
||||
}
|
|
@ -23,21 +23,21 @@ import org.elasticsearch.action.FailedNodeException;
|
|||
import org.elasticsearch.action.TaskOperationFailure;
|
||||
import org.elasticsearch.action.support.tasks.BaseTasksResponse;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNodes;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.tasks.TaskId;
|
||||
import org.elasticsearch.tasks.TaskInfo;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
/**
|
||||
|
@ -47,10 +47,12 @@ public class ListTasksResponse extends BaseTasksResponse implements ToXContent {
|
|||
|
||||
private List<TaskInfo> tasks;
|
||||
|
||||
private Map<DiscoveryNode, List<TaskInfo>> nodes;
|
||||
private Map<String, List<TaskInfo>> perNodeTasks;
|
||||
|
||||
private List<TaskGroup> groups;
|
||||
|
||||
private DiscoveryNodes discoveryNodes;
|
||||
|
||||
public ListTasksResponse() {
|
||||
}
|
||||
|
||||
|
@ -75,28 +77,11 @@ public class ListTasksResponse extends BaseTasksResponse implements ToXContent {
|
|||
/**
|
||||
* Returns the list of tasks by node
|
||||
*/
|
||||
public Map<DiscoveryNode, List<TaskInfo>> getPerNodeTasks() {
|
||||
if (nodes != null) {
|
||||
return nodes;
|
||||
public Map<String, List<TaskInfo>> getPerNodeTasks() {
|
||||
if (perNodeTasks == null) {
|
||||
perNodeTasks = tasks.stream().collect(Collectors.groupingBy(t -> t.getTaskId().getNodeId()));
|
||||
}
|
||||
Map<DiscoveryNode, List<TaskInfo>> nodeTasks = new HashMap<>();
|
||||
|
||||
Set<DiscoveryNode> nodes = new HashSet<>();
|
||||
for (TaskInfo shard : tasks) {
|
||||
nodes.add(shard.getNode());
|
||||
}
|
||||
|
||||
for (DiscoveryNode node : nodes) {
|
||||
List<TaskInfo> tasks = new ArrayList<>();
|
||||
for (TaskInfo taskInfo : this.tasks) {
|
||||
if (taskInfo.getNode().equals(node)) {
|
||||
tasks.add(taskInfo);
|
||||
}
|
||||
}
|
||||
nodeTasks.put(node, tasks);
|
||||
}
|
||||
this.nodes = nodeTasks;
|
||||
return nodeTasks;
|
||||
return perNodeTasks;
|
||||
}
|
||||
|
||||
public List<TaskGroup> getTaskGroups() {
|
||||
|
@ -138,6 +123,14 @@ public class ListTasksResponse extends BaseTasksResponse implements ToXContent {
|
|||
return tasks;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set a reference to the {@linkplain DiscoveryNodes}. Used for calling {@link #toXContent(XContentBuilder, ToXContent.Params)} with
|
||||
* {@code group_by=nodes}.
|
||||
*/
|
||||
public void setDiscoveryNodes(DiscoveryNodes discoveryNodes) {
|
||||
this.discoveryNodes = discoveryNodes;
|
||||
}
|
||||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
if (getTaskFailures() != null && getTaskFailures().size() > 0) {
|
||||
|
@ -161,33 +154,38 @@ public class ListTasksResponse extends BaseTasksResponse implements ToXContent {
|
|||
}
|
||||
String groupBy = params.param("group_by", "nodes");
|
||||
if ("nodes".equals(groupBy)) {
|
||||
if (discoveryNodes == null) {
|
||||
throw new IllegalStateException("discoveryNodes must be set before calling toXContent with group_by=nodes");
|
||||
}
|
||||
builder.startObject("nodes");
|
||||
for (Map.Entry<DiscoveryNode, List<TaskInfo>> entry : getPerNodeTasks().entrySet()) {
|
||||
DiscoveryNode node = entry.getKey();
|
||||
builder.startObject(node.getId());
|
||||
builder.field("name", node.getName());
|
||||
builder.field("transport_address", node.getAddress().toString());
|
||||
builder.field("host", node.getHostName());
|
||||
builder.field("ip", node.getAddress());
|
||||
for (Map.Entry<String, List<TaskInfo>> entry : getPerNodeTasks().entrySet()) {
|
||||
DiscoveryNode node = discoveryNodes.get(entry.getKey());
|
||||
builder.startObject(entry.getKey());
|
||||
if (node != null) {
|
||||
// If the node is no longer part of the cluster, oh well, we'll just skip it's useful information.
|
||||
builder.field("name", node.getName());
|
||||
builder.field("transport_address", node.getAddress().toString());
|
||||
builder.field("host", node.getHostName());
|
||||
builder.field("ip", node.getAddress());
|
||||
|
||||
builder.startArray("roles");
|
||||
for (DiscoveryNode.Role role : node.getRoles()) {
|
||||
builder.value(role.getRoleName());
|
||||
}
|
||||
builder.endArray();
|
||||
|
||||
if (!node.getAttributes().isEmpty()) {
|
||||
builder.startObject("attributes");
|
||||
for (Map.Entry<String, String> attrEntry : node.getAttributes().entrySet()) {
|
||||
builder.field(attrEntry.getKey(), attrEntry.getValue());
|
||||
builder.startArray("roles");
|
||||
for (DiscoveryNode.Role role : node.getRoles()) {
|
||||
builder.value(role.getRoleName());
|
||||
}
|
||||
builder.endArray();
|
||||
|
||||
if (!node.getAttributes().isEmpty()) {
|
||||
builder.startObject("attributes");
|
||||
for (Map.Entry<String, String> attrEntry : node.getAttributes().entrySet()) {
|
||||
builder.field(attrEntry.getKey(), attrEntry.getValue());
|
||||
}
|
||||
builder.endObject();
|
||||
}
|
||||
builder.endObject();
|
||||
}
|
||||
builder.startObject("tasks");
|
||||
for(TaskInfo task : entry.getValue()) {
|
||||
builder.startObject(task.getTaskId().toString());
|
||||
builder.field(task.getTaskId().toString());
|
||||
task.toXContent(builder, params);
|
||||
builder.endObject();
|
||||
}
|
||||
builder.endObject();
|
||||
builder.endObject();
|
||||
|
@ -196,9 +194,8 @@ public class ListTasksResponse extends BaseTasksResponse implements ToXContent {
|
|||
} else if ("parents".equals(groupBy)) {
|
||||
builder.startObject("tasks");
|
||||
for (TaskGroup group : getTaskGroups()) {
|
||||
builder.startObject(group.getTaskInfo().getTaskId().toString());
|
||||
builder.field(group.getTaskInfo().getTaskId().toString());
|
||||
group.toXContent(builder, params);
|
||||
builder.endObject();
|
||||
}
|
||||
builder.endObject();
|
||||
}
|
||||
|
|
|
@ -21,6 +21,7 @@ package org.elasticsearch.action.admin.cluster.node.tasks.list;
|
|||
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.tasks.TaskInfo;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
|
@ -79,16 +80,15 @@ public class TaskGroup implements ToXContent {
|
|||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
task.toXContent(builder, params);
|
||||
builder.startObject();
|
||||
task.innerToXContent(builder, params);
|
||||
if (childTasks.isEmpty() == false) {
|
||||
builder.startArray("children");
|
||||
for (TaskGroup taskGroup : childTasks) {
|
||||
builder.startObject();
|
||||
taskGroup.toXContent(builder, params);
|
||||
builder.endObject();
|
||||
}
|
||||
builder.endArray();
|
||||
}
|
||||
return builder;
|
||||
return builder.endObject();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -33,6 +33,8 @@ import org.elasticsearch.common.io.stream.StreamInput;
|
|||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.tasks.Task;
|
||||
import org.elasticsearch.tasks.TaskInfo;
|
||||
import org.elasticsearch.tasks.TaskManager;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
|
@ -47,6 +49,26 @@ import static org.elasticsearch.common.unit.TimeValue.timeValueSeconds;
|
|||
*
|
||||
*/
|
||||
public class TransportListTasksAction extends TransportTasksAction<Task, ListTasksRequest, ListTasksResponse, TaskInfo> {
|
||||
public static void waitForTaskCompletion(TaskManager taskManager, Task task, long untilInNanos) {
|
||||
while (System.nanoTime() - untilInNanos < 0) {
|
||||
if (taskManager.getTask(task.getId()) == null) {
|
||||
return;
|
||||
}
|
||||
try {
|
||||
Thread.sleep(WAIT_FOR_COMPLETION_POLL.millis());
|
||||
} catch (InterruptedException e) {
|
||||
throw new ElasticsearchException("Interrupted waiting for completion of [{}]", e, task);
|
||||
}
|
||||
}
|
||||
throw new ElasticsearchTimeoutException("Timed out waiting for completion of [{}]", task);
|
||||
}
|
||||
public static long waitForCompletionTimeout(TimeValue timeout) {
|
||||
if (timeout == null) {
|
||||
timeout = DEFAULT_WAIT_FOR_COMPLETION_TIMEOUT;
|
||||
}
|
||||
return System.nanoTime() + timeout.nanos();
|
||||
}
|
||||
|
||||
private static final TimeValue WAIT_FOR_COMPLETION_POLL = timeValueMillis(100);
|
||||
private static final TimeValue DEFAULT_WAIT_FOR_COMPLETION_TIMEOUT = timeValueSeconds(30);
|
||||
|
||||
|
@ -75,35 +97,18 @@ public class TransportListTasksAction extends TransportTasksAction<Task, ListTas
|
|||
|
||||
@Override
|
||||
protected void processTasks(ListTasksRequest request, Consumer<Task> operation) {
|
||||
if (false == request.getWaitForCompletion()) {
|
||||
super.processTasks(request, operation);
|
||||
return;
|
||||
}
|
||||
// If we should wait for completion then we have to intercept every found task and wait for it to leave the manager.
|
||||
TimeValue timeout = request.getTimeout();
|
||||
if (timeout == null) {
|
||||
timeout = DEFAULT_WAIT_FOR_COMPLETION_TIMEOUT;
|
||||
}
|
||||
long timeoutTime = System.nanoTime() + timeout.nanos();
|
||||
super.processTasks(request, operation.andThen((Task t) -> {
|
||||
while (System.nanoTime() - timeoutTime < 0) {
|
||||
Task task = taskManager.getTask(t.getId());
|
||||
if (task == null) {
|
||||
return;
|
||||
}
|
||||
if (request.getWaitForCompletion()) {
|
||||
long timeoutNanos = waitForCompletionTimeout(request.getTimeout());
|
||||
operation = operation.andThen(task -> {
|
||||
if (task.getAction().startsWith(ListTasksAction.NAME)) {
|
||||
// It doesn't make sense to wait for List Tasks and it can cause an infinite loop of the task waiting
|
||||
// for itself of one of its child tasks
|
||||
// for itself or one of its child tasks
|
||||
return;
|
||||
}
|
||||
try {
|
||||
Thread.sleep(WAIT_FOR_COMPLETION_POLL.millis());
|
||||
} catch (InterruptedException e) {
|
||||
throw new ElasticsearchException("Interrupted waiting for completion of [{}]", e, t);
|
||||
}
|
||||
}
|
||||
throw new ElasticsearchTimeoutException("Timed out waiting for completion of [{}]", t);
|
||||
}));
|
||||
waitForTaskCompletion(taskManager, task, timeoutNanos);
|
||||
});
|
||||
}
|
||||
super.processTasks(request, operation);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -26,10 +26,10 @@ import org.elasticsearch.cluster.ClusterState;
|
|||
import org.elasticsearch.cluster.block.ClusterBlockException;
|
||||
import org.elasticsearch.cluster.block.ClusterBlockLevel;
|
||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.cluster.metadata.SnapshotId;
|
||||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.snapshots.Snapshot;
|
||||
import org.elasticsearch.snapshots.SnapshotInfo;
|
||||
import org.elasticsearch.snapshots.SnapshotsService;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
|
@ -72,7 +72,7 @@ public class TransportCreateSnapshotAction extends TransportMasterNodeAction<Cre
|
|||
@Override
|
||||
protected void masterOperation(final CreateSnapshotRequest request, ClusterState state, final ActionListener<CreateSnapshotResponse> listener) {
|
||||
SnapshotsService.SnapshotRequest snapshotRequest =
|
||||
new SnapshotsService.SnapshotRequest("create_snapshot [" + request.snapshot() + "]", request.snapshot(), request.repository())
|
||||
new SnapshotsService.SnapshotRequest(request.repository(), request.snapshot(), "create_snapshot [" + request.snapshot() + "]")
|
||||
.indices(request.indices())
|
||||
.indicesOptions(request.indicesOptions())
|
||||
.partial(request.partial())
|
||||
|
@ -84,19 +84,19 @@ public class TransportCreateSnapshotAction extends TransportMasterNodeAction<Cre
|
|||
public void onResponse() {
|
||||
if (request.waitForCompletion()) {
|
||||
snapshotsService.addListener(new SnapshotsService.SnapshotCompletionListener() {
|
||||
SnapshotId snapshotId = new SnapshotId(request.repository(), request.snapshot());
|
||||
|
||||
@Override
|
||||
public void onSnapshotCompletion(SnapshotId snapshotId, SnapshotInfo snapshot) {
|
||||
if (this.snapshotId.equals(snapshotId)) {
|
||||
listener.onResponse(new CreateSnapshotResponse(snapshot));
|
||||
public void onSnapshotCompletion(Snapshot snapshot, SnapshotInfo snapshotInfo) {
|
||||
if (snapshot.getRepository().equals(request.repository()) &&
|
||||
snapshot.getSnapshotId().getName().equals(request.snapshot())) {
|
||||
listener.onResponse(new CreateSnapshotResponse(snapshotInfo));
|
||||
snapshotsService.removeListener(this);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onSnapshotFailure(SnapshotId snapshotId, Throwable t) {
|
||||
if (this.snapshotId.equals(snapshotId)) {
|
||||
public void onSnapshotFailure(Snapshot snapshot, Throwable t) {
|
||||
if (snapshot.getRepository().equals(request.repository()) &&
|
||||
snapshot.getSnapshotId().getName().equals(request.snapshot())) {
|
||||
listener.onFailure(t);
|
||||
snapshotsService.removeListener(this);
|
||||
}
|
||||
|
|
|
@ -26,7 +26,6 @@ import org.elasticsearch.cluster.ClusterState;
|
|||
import org.elasticsearch.cluster.block.ClusterBlockException;
|
||||
import org.elasticsearch.cluster.block.ClusterBlockLevel;
|
||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.cluster.metadata.SnapshotId;
|
||||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
|
@ -66,8 +65,7 @@ public class TransportDeleteSnapshotAction extends TransportMasterNodeAction<Del
|
|||
|
||||
@Override
|
||||
protected void masterOperation(final DeleteSnapshotRequest request, ClusterState state, final ActionListener<DeleteSnapshotResponse> listener) {
|
||||
SnapshotId snapshotIds = new SnapshotId(request.repository(), request.snapshot());
|
||||
snapshotsService.deleteSnapshot(snapshotIds, new SnapshotsService.DeleteSnapshotListener() {
|
||||
snapshotsService.deleteSnapshot(request.repository(), request.snapshot(), new SnapshotsService.DeleteSnapshotListener() {
|
||||
@Override
|
||||
public void onResponse() {
|
||||
listener.onResponse(new DeleteSnapshotResponse(true));
|
||||
|
|
|
@ -42,7 +42,7 @@ public class GetSnapshotsResponse extends ActionResponse implements ToXContent {
|
|||
}
|
||||
|
||||
GetSnapshotsResponse(List<SnapshotInfo> snapshots) {
|
||||
this.snapshots = snapshots;
|
||||
this.snapshots = Collections.unmodifiableList(snapshots);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -26,20 +26,22 @@ import org.elasticsearch.cluster.ClusterState;
|
|||
import org.elasticsearch.cluster.block.ClusterBlockException;
|
||||
import org.elasticsearch.cluster.block.ClusterBlockLevel;
|
||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.cluster.metadata.SnapshotId;
|
||||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.regex.Regex;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.snapshots.SnapshotId;
|
||||
import org.elasticsearch.snapshots.SnapshotInfo;
|
||||
import org.elasticsearch.snapshots.SnapshotMissingException;
|
||||
import org.elasticsearch.snapshots.SnapshotsService;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.LinkedHashSet;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
||||
/**
|
||||
|
@ -52,7 +54,8 @@ public class TransportGetSnapshotsAction extends TransportMasterNodeAction<GetSn
|
|||
public TransportGetSnapshotsAction(Settings settings, TransportService transportService, ClusterService clusterService,
|
||||
ThreadPool threadPool, SnapshotsService snapshotsService, ActionFilters actionFilters,
|
||||
IndexNameExpressionResolver indexNameExpressionResolver) {
|
||||
super(settings, GetSnapshotsAction.NAME, transportService, clusterService, threadPool, actionFilters, indexNameExpressionResolver, GetSnapshotsRequest::new);
|
||||
super(settings, GetSnapshotsAction.NAME, transportService, clusterService, threadPool, actionFilters, indexNameExpressionResolver,
|
||||
GetSnapshotsRequest::new);
|
||||
this.snapshotsService = snapshotsService;
|
||||
}
|
||||
|
||||
|
@ -72,36 +75,51 @@ public class TransportGetSnapshotsAction extends TransportMasterNodeAction<GetSn
|
|||
}
|
||||
|
||||
@Override
|
||||
protected void masterOperation(final GetSnapshotsRequest request, ClusterState state, final ActionListener<GetSnapshotsResponse> listener) {
|
||||
protected void masterOperation(final GetSnapshotsRequest request, ClusterState state,
|
||||
final ActionListener<GetSnapshotsResponse> listener) {
|
||||
try {
|
||||
final String repository = request.repository();
|
||||
List<SnapshotInfo> snapshotInfoBuilder = new ArrayList<>();
|
||||
if (isAllSnapshots(request.snapshots())) {
|
||||
snapshotInfoBuilder.addAll(snapshotsService.snapshots(request.repository(), request.ignoreUnavailable()));
|
||||
snapshotInfoBuilder.addAll(snapshotsService.currentSnapshots(repository));
|
||||
snapshotInfoBuilder.addAll(snapshotsService.snapshots(repository,
|
||||
snapshotsService.snapshotIds(repository),
|
||||
request.ignoreUnavailable()));
|
||||
} else if (isCurrentSnapshots(request.snapshots())) {
|
||||
snapshotInfoBuilder.addAll(snapshotsService.currentSnapshots(request.repository()));
|
||||
snapshotInfoBuilder.addAll(snapshotsService.currentSnapshots(repository));
|
||||
} else {
|
||||
Set<String> snapshotsToGet = new LinkedHashSet<>(); // to keep insertion order
|
||||
List<SnapshotInfo> snapshots = null;
|
||||
final Map<String, SnapshotId> allSnapshotIds = new HashMap<>();
|
||||
for (SnapshotInfo snapshotInfo : snapshotsService.currentSnapshots(repository)) {
|
||||
SnapshotId snapshotId = snapshotInfo.snapshotId();
|
||||
allSnapshotIds.put(snapshotId.getName(), snapshotId);
|
||||
}
|
||||
for (SnapshotId snapshotId : snapshotsService.snapshotIds(repository)) {
|
||||
allSnapshotIds.put(snapshotId.getName(), snapshotId);
|
||||
}
|
||||
final Set<SnapshotId> toResolve = new LinkedHashSet<>(); // maintain order
|
||||
for (String snapshotOrPattern : request.snapshots()) {
|
||||
if (Regex.isSimpleMatchPattern(snapshotOrPattern) == false) {
|
||||
snapshotsToGet.add(snapshotOrPattern);
|
||||
} else {
|
||||
if (snapshots == null) { // lazily load snapshots
|
||||
snapshots = snapshotsService.snapshots(request.repository(), request.ignoreUnavailable());
|
||||
if (allSnapshotIds.containsKey(snapshotOrPattern)) {
|
||||
toResolve.add(allSnapshotIds.get(snapshotOrPattern));
|
||||
} else if (request.ignoreUnavailable() == false) {
|
||||
throw new SnapshotMissingException(repository, snapshotOrPattern);
|
||||
}
|
||||
for (SnapshotInfo snapshot : snapshots) {
|
||||
if (Regex.simpleMatch(snapshotOrPattern, snapshot.name())) {
|
||||
snapshotsToGet.add(snapshot.name());
|
||||
} else {
|
||||
for (Map.Entry<String, SnapshotId> entry : allSnapshotIds.entrySet()) {
|
||||
if (Regex.simpleMatch(snapshotOrPattern, entry.getKey())) {
|
||||
toResolve.add(entry.getValue());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
for (String snapshot : snapshotsToGet) {
|
||||
SnapshotId snapshotId = new SnapshotId(request.repository(), snapshot);
|
||||
snapshotInfoBuilder.add(snapshotsService.snapshot(snapshotId));
|
||||
|
||||
if (toResolve.isEmpty() && request.ignoreUnavailable() == false) {
|
||||
throw new SnapshotMissingException(repository, request.snapshots()[0]);
|
||||
}
|
||||
|
||||
snapshotInfoBuilder.addAll(snapshotsService.snapshots(repository, new ArrayList<>(toResolve), request.ignoreUnavailable()));
|
||||
}
|
||||
listener.onResponse(new GetSnapshotsResponse(Collections.unmodifiableList(snapshotInfoBuilder)));
|
||||
listener.onResponse(new GetSnapshotsResponse(snapshotInfoBuilder));
|
||||
} catch (Throwable t) {
|
||||
listener.onFailure(t);
|
||||
}
|
||||
|
|
|
@ -57,7 +57,7 @@ public class RestoreSnapshotRequest extends MasterNodeRequest<RestoreSnapshotReq
|
|||
private String renamePattern;
|
||||
private String renameReplacement;
|
||||
private boolean waitForCompletion;
|
||||
private boolean includeGlobalState = true;
|
||||
private boolean includeGlobalState = false;
|
||||
private boolean partial = false;
|
||||
private boolean includeAliases = true;
|
||||
private Settings settings = EMPTY_SETTINGS;
|
||||
|
|
|
@ -26,12 +26,12 @@ import org.elasticsearch.cluster.ClusterState;
|
|||
import org.elasticsearch.cluster.block.ClusterBlockException;
|
||||
import org.elasticsearch.cluster.block.ClusterBlockLevel;
|
||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.cluster.metadata.SnapshotId;
|
||||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.snapshots.RestoreInfo;
|
||||
import org.elasticsearch.snapshots.RestoreService;
|
||||
import org.elasticsearch.snapshots.Snapshot;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
|
@ -72,23 +72,22 @@ public class TransportRestoreSnapshotAction extends TransportMasterNodeAction<Re
|
|||
}
|
||||
|
||||
@Override
|
||||
protected void masterOperation(final RestoreSnapshotRequest request, ClusterState state, final ActionListener<RestoreSnapshotResponse> listener) {
|
||||
RestoreService.RestoreRequest restoreRequest = new RestoreService.RestoreRequest(
|
||||
"restore_snapshot[" + request.snapshot() + "]", request.repository(), request.snapshot(),
|
||||
protected void masterOperation(final RestoreSnapshotRequest request, final ClusterState state, final ActionListener<RestoreSnapshotResponse> listener) {
|
||||
RestoreService.RestoreRequest restoreRequest = new RestoreService.RestoreRequest(request.repository(), request.snapshot(),
|
||||
request.indices(), request.indicesOptions(), request.renamePattern(), request.renameReplacement(),
|
||||
request.settings(), request.masterNodeTimeout(), request.includeGlobalState(), request.partial(), request.includeAliases(),
|
||||
request.indexSettings(), request.ignoreIndexSettings());
|
||||
request.indexSettings(), request.ignoreIndexSettings(), "restore_snapshot[" + request.snapshot() + "]");
|
||||
|
||||
restoreService.restoreSnapshot(restoreRequest, new ActionListener<RestoreInfo>() {
|
||||
@Override
|
||||
public void onResponse(RestoreInfo restoreInfo) {
|
||||
if (restoreInfo == null && request.waitForCompletion()) {
|
||||
restoreService.addListener(new ActionListener<RestoreService.RestoreCompletionResponse>() {
|
||||
SnapshotId snapshotId = new SnapshotId(request.repository(), request.snapshot());
|
||||
|
||||
@Override
|
||||
public void onResponse(RestoreService.RestoreCompletionResponse restoreCompletionResponse) {
|
||||
if (this.snapshotId.equals(restoreCompletionResponse.getSnapshotId())) {
|
||||
final Snapshot snapshot = restoreCompletionResponse.getSnapshot();
|
||||
if (snapshot.getRepository().equals(request.repository()) &&
|
||||
snapshot.getSnapshotId().getName().equals(request.snapshot())) {
|
||||
listener.onResponse(new RestoreSnapshotResponse(restoreCompletionResponse.getRestoreInfo()));
|
||||
restoreService.removeListener(this);
|
||||
}
|
||||
|
|
|
@ -20,7 +20,7 @@
|
|||
package org.elasticsearch.action.admin.cluster.snapshots.status;
|
||||
|
||||
import org.elasticsearch.cluster.SnapshotsInProgress.State;
|
||||
import org.elasticsearch.cluster.metadata.SnapshotId;
|
||||
import org.elasticsearch.snapshots.Snapshot;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.io.stream.Streamable;
|
||||
|
@ -35,6 +35,7 @@ import java.util.HashMap;
|
|||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
import java.util.Set;
|
||||
|
||||
import static java.util.Collections.unmodifiableMap;
|
||||
|
@ -44,7 +45,7 @@ import static java.util.Collections.unmodifiableMap;
|
|||
*/
|
||||
public class SnapshotStatus implements ToXContent, Streamable {
|
||||
|
||||
private SnapshotId snapshotId;
|
||||
private Snapshot snapshot;
|
||||
|
||||
private State state;
|
||||
|
||||
|
@ -56,11 +57,10 @@ public class SnapshotStatus implements ToXContent, Streamable {
|
|||
|
||||
private SnapshotStats stats;
|
||||
|
||||
|
||||
SnapshotStatus(SnapshotId snapshotId, State state, List<SnapshotIndexShardStatus> shards) {
|
||||
this.snapshotId = snapshotId;
|
||||
this.state = state;
|
||||
this.shards = shards;
|
||||
SnapshotStatus(final Snapshot snapshot, final State state, final List<SnapshotIndexShardStatus> shards) {
|
||||
this.snapshot = Objects.requireNonNull(snapshot);
|
||||
this.state = Objects.requireNonNull(state);
|
||||
this.shards = Objects.requireNonNull(shards);
|
||||
shardsStats = new SnapshotShardsStats(shards);
|
||||
updateShardStats();
|
||||
}
|
||||
|
@ -69,10 +69,10 @@ public class SnapshotStatus implements ToXContent, Streamable {
|
|||
}
|
||||
|
||||
/**
|
||||
* Returns snapshot id
|
||||
* Returns snapshot
|
||||
*/
|
||||
public SnapshotId getSnapshotId() {
|
||||
return snapshotId;
|
||||
public Snapshot getSnapshot() {
|
||||
return snapshot;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -124,7 +124,7 @@ public class SnapshotStatus implements ToXContent, Streamable {
|
|||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
snapshotId = SnapshotId.readSnapshotId(in);
|
||||
snapshot = new Snapshot(in);
|
||||
state = State.fromValue(in.readByte());
|
||||
int size = in.readVInt();
|
||||
List<SnapshotIndexShardStatus> builder = new ArrayList<>();
|
||||
|
@ -137,7 +137,7 @@ public class SnapshotStatus implements ToXContent, Streamable {
|
|||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
snapshotId.writeTo(out);
|
||||
snapshot.writeTo(out);
|
||||
out.writeByte(state.value());
|
||||
out.writeVInt(shards.size());
|
||||
for (SnapshotIndexShardStatus shard : shards) {
|
||||
|
@ -170,7 +170,6 @@ public class SnapshotStatus implements ToXContent, Streamable {
|
|||
}
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Returns number of files in the snapshot
|
||||
*/
|
||||
|
@ -178,22 +177,22 @@ public class SnapshotStatus implements ToXContent, Streamable {
|
|||
return stats;
|
||||
}
|
||||
|
||||
static final class Fields {
|
||||
static final String SNAPSHOT = "snapshot";
|
||||
static final String REPOSITORY = "repository";
|
||||
static final String STATE = "state";
|
||||
static final String INDICES = "indices";
|
||||
}
|
||||
private static final String SNAPSHOT = "snapshot";
|
||||
private static final String REPOSITORY = "repository";
|
||||
private static final String UUID = "uuid";
|
||||
private static final String STATE = "state";
|
||||
private static final String INDICES = "indices";
|
||||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.startObject();
|
||||
builder.field(Fields.SNAPSHOT, snapshotId.getSnapshot());
|
||||
builder.field(Fields.REPOSITORY, snapshotId.getRepository());
|
||||
builder.field(Fields.STATE, state.name());
|
||||
builder.field(SNAPSHOT, snapshot.getSnapshotId().getName());
|
||||
builder.field(REPOSITORY, snapshot.getRepository());
|
||||
builder.field(UUID, snapshot.getSnapshotId().getUUID());
|
||||
builder.field(STATE, state.name());
|
||||
shardsStats.toXContent(builder, params);
|
||||
stats.toXContent(builder, params);
|
||||
builder.startObject(Fields.INDICES);
|
||||
builder.startObject(INDICES);
|
||||
for (SnapshotIndexStatus indexStatus : getIndices().values()) {
|
||||
indexStatus.toXContent(builder, params);
|
||||
}
|
||||
|
|
|
@ -29,7 +29,7 @@ import org.elasticsearch.action.support.nodes.BaseNodesResponse;
|
|||
import org.elasticsearch.action.support.nodes.TransportNodesAction;
|
||||
import org.elasticsearch.cluster.ClusterName;
|
||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.cluster.metadata.SnapshotId;
|
||||
import org.elasticsearch.snapshots.Snapshot;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
|
@ -94,11 +94,11 @@ public class TransportNodesSnapshotsStatus extends TransportNodesAction<Transpor
|
|||
|
||||
@Override
|
||||
protected NodeSnapshotStatus nodeOperation(NodeRequest request) {
|
||||
Map<SnapshotId, Map<ShardId, SnapshotIndexShardStatus>> snapshotMapBuilder = new HashMap<>();
|
||||
Map<Snapshot, Map<ShardId, SnapshotIndexShardStatus>> snapshotMapBuilder = new HashMap<>();
|
||||
try {
|
||||
String nodeId = clusterService.localNode().getId();
|
||||
for (SnapshotId snapshotId : request.snapshotIds) {
|
||||
Map<ShardId, IndexShardSnapshotStatus> shardsStatus = snapshotShardsService.currentSnapshotShards(snapshotId);
|
||||
for (Snapshot snapshot : request.snapshots) {
|
||||
Map<ShardId, IndexShardSnapshotStatus> shardsStatus = snapshotShardsService.currentSnapshotShards(snapshot);
|
||||
if (shardsStatus == null) {
|
||||
continue;
|
||||
}
|
||||
|
@ -114,7 +114,7 @@ public class TransportNodesSnapshotsStatus extends TransportNodesAction<Transpor
|
|||
}
|
||||
shardMapBuilder.put(shardEntry.getKey(), shardStatus);
|
||||
}
|
||||
snapshotMapBuilder.put(snapshotId, unmodifiableMap(shardMapBuilder));
|
||||
snapshotMapBuilder.put(snapshot, unmodifiableMap(shardMapBuilder));
|
||||
}
|
||||
return new NodeSnapshotStatus(clusterService.localNode(), unmodifiableMap(snapshotMapBuilder));
|
||||
} catch (Exception e) {
|
||||
|
@ -129,7 +129,7 @@ public class TransportNodesSnapshotsStatus extends TransportNodesAction<Transpor
|
|||
|
||||
public static class Request extends BaseNodesRequest<Request> {
|
||||
|
||||
private SnapshotId[] snapshotIds;
|
||||
private Snapshot[] snapshots;
|
||||
|
||||
public Request() {
|
||||
}
|
||||
|
@ -138,8 +138,8 @@ public class TransportNodesSnapshotsStatus extends TransportNodesAction<Transpor
|
|||
super(nodesIds);
|
||||
}
|
||||
|
||||
public Request snapshotIds(SnapshotId[] snapshotIds) {
|
||||
this.snapshotIds = snapshotIds;
|
||||
public Request snapshots(Snapshot[] snapshots) {
|
||||
this.snapshots = snapshots;
|
||||
return this;
|
||||
}
|
||||
|
||||
|
@ -179,42 +179,42 @@ public class TransportNodesSnapshotsStatus extends TransportNodesAction<Transpor
|
|||
|
||||
public static class NodeRequest extends BaseNodeRequest {
|
||||
|
||||
private List<SnapshotId> snapshotIds;
|
||||
private List<Snapshot> snapshots;
|
||||
|
||||
public NodeRequest() {
|
||||
}
|
||||
|
||||
NodeRequest(String nodeId, TransportNodesSnapshotsStatus.Request request) {
|
||||
super(nodeId);
|
||||
snapshotIds = Arrays.asList(request.snapshotIds);
|
||||
snapshots = Arrays.asList(request.snapshots);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
snapshotIds = in.readList(SnapshotId::readSnapshotId);
|
||||
snapshots = in.readList(Snapshot::new);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
out.writeStreamableList(snapshotIds);
|
||||
out.writeList(snapshots);
|
||||
}
|
||||
}
|
||||
|
||||
public static class NodeSnapshotStatus extends BaseNodeResponse {
|
||||
|
||||
private Map<SnapshotId, Map<ShardId, SnapshotIndexShardStatus>> status;
|
||||
private Map<Snapshot, Map<ShardId, SnapshotIndexShardStatus>> status;
|
||||
|
||||
NodeSnapshotStatus() {
|
||||
}
|
||||
|
||||
public NodeSnapshotStatus(DiscoveryNode node, Map<SnapshotId, Map<ShardId, SnapshotIndexShardStatus>> status) {
|
||||
public NodeSnapshotStatus(DiscoveryNode node, Map<Snapshot, Map<ShardId, SnapshotIndexShardStatus>> status) {
|
||||
super(node);
|
||||
this.status = status;
|
||||
}
|
||||
|
||||
public Map<SnapshotId, Map<ShardId, SnapshotIndexShardStatus>> status() {
|
||||
public Map<Snapshot, Map<ShardId, SnapshotIndexShardStatus>> status() {
|
||||
return status;
|
||||
}
|
||||
|
||||
|
@ -222,9 +222,9 @@ public class TransportNodesSnapshotsStatus extends TransportNodesAction<Transpor
|
|||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
int numberOfSnapshots = in.readVInt();
|
||||
Map<SnapshotId, Map<ShardId, SnapshotIndexShardStatus>> snapshotMapBuilder = new HashMap<>(numberOfSnapshots);
|
||||
Map<Snapshot, Map<ShardId, SnapshotIndexShardStatus>> snapshotMapBuilder = new HashMap<>(numberOfSnapshots);
|
||||
for (int i = 0; i < numberOfSnapshots; i++) {
|
||||
SnapshotId snapshotId = SnapshotId.readSnapshotId(in);
|
||||
Snapshot snapshot = new Snapshot(in);
|
||||
int numberOfShards = in.readVInt();
|
||||
Map<ShardId, SnapshotIndexShardStatus> shardMapBuilder = new HashMap<>(numberOfShards);
|
||||
for (int j = 0; j < numberOfShards; j++) {
|
||||
|
@ -232,7 +232,7 @@ public class TransportNodesSnapshotsStatus extends TransportNodesAction<Transpor
|
|||
SnapshotIndexShardStatus status = SnapshotIndexShardStatus.readShardSnapshotStatus(in);
|
||||
shardMapBuilder.put(shardId, status);
|
||||
}
|
||||
snapshotMapBuilder.put(snapshotId, unmodifiableMap(shardMapBuilder));
|
||||
snapshotMapBuilder.put(snapshot, unmodifiableMap(shardMapBuilder));
|
||||
}
|
||||
status = unmodifiableMap(snapshotMapBuilder);
|
||||
}
|
||||
|
@ -242,7 +242,7 @@ public class TransportNodesSnapshotsStatus extends TransportNodesAction<Transpor
|
|||
super.writeTo(out);
|
||||
if (status != null) {
|
||||
out.writeVInt(status.size());
|
||||
for (Map.Entry<SnapshotId, Map<ShardId, SnapshotIndexShardStatus>> entry : status.entrySet()) {
|
||||
for (Map.Entry<Snapshot, Map<ShardId, SnapshotIndexShardStatus>> entry : status.entrySet()) {
|
||||
entry.getKey().writeTo(out);
|
||||
out.writeVInt(entry.getValue().size());
|
||||
for (Map.Entry<ShardId, SnapshotIndexShardStatus> shardEntry : entry.getValue().entrySet()) {
|
||||
|
|
|
@ -29,26 +29,32 @@ import org.elasticsearch.cluster.SnapshotsInProgress;
|
|||
import org.elasticsearch.cluster.block.ClusterBlockException;
|
||||
import org.elasticsearch.cluster.block.ClusterBlockLevel;
|
||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.cluster.metadata.SnapshotId;
|
||||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.util.set.Sets;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
import org.elasticsearch.index.snapshots.IndexShardSnapshotStatus;
|
||||
import org.elasticsearch.snapshots.Snapshot;
|
||||
import org.elasticsearch.snapshots.SnapshotId;
|
||||
import org.elasticsearch.snapshots.SnapshotInfo;
|
||||
import org.elasticsearch.snapshots.SnapshotMissingException;
|
||||
import org.elasticsearch.snapshots.SnapshotsService;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.function.Function;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
/**
|
||||
*/
|
||||
|
@ -87,8 +93,8 @@ public class TransportSnapshotsStatusAction extends TransportMasterNodeAction<Sn
|
|||
protected void masterOperation(final SnapshotsStatusRequest request,
|
||||
final ClusterState state,
|
||||
final ActionListener<SnapshotsStatusResponse> listener) throws Exception {
|
||||
List<SnapshotsInProgress.Entry> currentSnapshots = snapshotsService.currentSnapshots(request.repository(), request.snapshots());
|
||||
|
||||
List<SnapshotsInProgress.Entry> currentSnapshots =
|
||||
snapshotsService.currentSnapshots(request.repository(), Arrays.asList(request.snapshots()));
|
||||
if (currentSnapshots.isEmpty()) {
|
||||
listener.onResponse(buildResponse(request, currentSnapshots, null));
|
||||
return;
|
||||
|
@ -105,19 +111,19 @@ public class TransportSnapshotsStatusAction extends TransportMasterNodeAction<Sn
|
|||
|
||||
if (!nodesIds.isEmpty()) {
|
||||
// There are still some snapshots running - check their progress
|
||||
SnapshotId[] snapshotIds = new SnapshotId[currentSnapshots.size()];
|
||||
Snapshot[] snapshots = new Snapshot[currentSnapshots.size()];
|
||||
for (int i = 0; i < currentSnapshots.size(); i++) {
|
||||
snapshotIds[i] = currentSnapshots.get(i).snapshotId();
|
||||
snapshots[i] = currentSnapshots.get(i).snapshot();
|
||||
}
|
||||
|
||||
TransportNodesSnapshotsStatus.Request nodesRequest = new TransportNodesSnapshotsStatus.Request(nodesIds.toArray(new String[nodesIds.size()]))
|
||||
.snapshotIds(snapshotIds).timeout(request.masterNodeTimeout());
|
||||
.snapshots(snapshots).timeout(request.masterNodeTimeout());
|
||||
transportNodesSnapshotsStatus.execute(nodesRequest, new ActionListener<TransportNodesSnapshotsStatus.NodesSnapshotStatus>() {
|
||||
@Override
|
||||
public void onResponse(TransportNodesSnapshotsStatus.NodesSnapshotStatus nodeSnapshotStatuses) {
|
||||
try {
|
||||
List<SnapshotsInProgress.Entry> currentSnapshots =
|
||||
snapshotsService.currentSnapshots(request.repository(), request.snapshots());
|
||||
snapshotsService.currentSnapshots(request.repository(), Arrays.asList(request.snapshots()));
|
||||
listener.onResponse(buildResponse(request, currentSnapshots, nodeSnapshotStatuses));
|
||||
} catch (Throwable e) {
|
||||
listener.onFailure(e);
|
||||
|
@ -136,12 +142,12 @@ public class TransportSnapshotsStatusAction extends TransportMasterNodeAction<Sn
|
|||
|
||||
}
|
||||
|
||||
private SnapshotsStatusResponse buildResponse(SnapshotsStatusRequest request, List<SnapshotsInProgress.Entry> currentSnapshots,
|
||||
private SnapshotsStatusResponse buildResponse(SnapshotsStatusRequest request, List<SnapshotsInProgress.Entry> currentSnapshotEntries,
|
||||
TransportNodesSnapshotsStatus.NodesSnapshotStatus nodeSnapshotStatuses) throws IOException {
|
||||
// First process snapshot that are currently processed
|
||||
List<SnapshotStatus> builder = new ArrayList<>();
|
||||
Set<SnapshotId> currentSnapshotIds = new HashSet<>();
|
||||
if (!currentSnapshots.isEmpty()) {
|
||||
Set<String> currentSnapshotNames = new HashSet<>();
|
||||
if (!currentSnapshotEntries.isEmpty()) {
|
||||
Map<String, TransportNodesSnapshotsStatus.NodeSnapshotStatus> nodeSnapshotStatusMap;
|
||||
if (nodeSnapshotStatuses != null) {
|
||||
nodeSnapshotStatusMap = nodeSnapshotStatuses.getNodesMap();
|
||||
|
@ -149,8 +155,8 @@ public class TransportSnapshotsStatusAction extends TransportMasterNodeAction<Sn
|
|||
nodeSnapshotStatusMap = new HashMap<>();
|
||||
}
|
||||
|
||||
for (SnapshotsInProgress.Entry entry : currentSnapshots) {
|
||||
currentSnapshotIds.add(entry.snapshotId());
|
||||
for (SnapshotsInProgress.Entry entry : currentSnapshotEntries) {
|
||||
currentSnapshotNames.add(entry.snapshot().getSnapshotId().getName());
|
||||
List<SnapshotIndexShardStatus> shardStatusBuilder = new ArrayList<>();
|
||||
for (ObjectObjectCursor<ShardId, SnapshotsInProgress.ShardSnapshotStatus> shardEntry : entry.shards()) {
|
||||
SnapshotsInProgress.ShardSnapshotStatus status = shardEntry.value;
|
||||
|
@ -158,7 +164,7 @@ public class TransportSnapshotsStatusAction extends TransportMasterNodeAction<Sn
|
|||
// We should have information about this shard from the shard:
|
||||
TransportNodesSnapshotsStatus.NodeSnapshotStatus nodeStatus = nodeSnapshotStatusMap.get(status.nodeId());
|
||||
if (nodeStatus != null) {
|
||||
Map<ShardId, SnapshotIndexShardStatus> shardStatues = nodeStatus.status().get(entry.snapshotId());
|
||||
Map<ShardId, SnapshotIndexShardStatus> shardStatues = nodeStatus.status().get(entry.snapshot());
|
||||
if (shardStatues != null) {
|
||||
SnapshotIndexShardStatus shardStatus = shardStatues.get(shardEntry.key);
|
||||
if (shardStatus != null) {
|
||||
|
@ -190,41 +196,50 @@ public class TransportSnapshotsStatusAction extends TransportMasterNodeAction<Sn
|
|||
SnapshotIndexShardStatus shardStatus = new SnapshotIndexShardStatus(shardEntry.key, stage);
|
||||
shardStatusBuilder.add(shardStatus);
|
||||
}
|
||||
builder.add(new SnapshotStatus(entry.snapshotId(), entry.state(), Collections.unmodifiableList(shardStatusBuilder)));
|
||||
builder.add(new SnapshotStatus(entry.snapshot(), entry.state(), Collections.unmodifiableList(shardStatusBuilder)));
|
||||
}
|
||||
}
|
||||
// Now add snapshots on disk that are not currently running
|
||||
if (Strings.hasText(request.repository())) {
|
||||
if (request.snapshots() != null && request.snapshots().length > 0) {
|
||||
for (String snapshotName : request.snapshots()) {
|
||||
SnapshotId snapshotId = new SnapshotId(request.repository(), snapshotName);
|
||||
if (currentSnapshotIds.contains(snapshotId)) {
|
||||
// This is a snapshot the is currently running - skipping
|
||||
final String repositoryName = request.repository();
|
||||
if (Strings.hasText(repositoryName) && request.snapshots() != null && request.snapshots().length > 0) {
|
||||
final Set<String> requestedSnapshotNames = Sets.newHashSet(request.snapshots());
|
||||
final Map<String, SnapshotId> matchedSnapshotIds = snapshotsService.snapshotIds(repositoryName).stream()
|
||||
.filter(s -> requestedSnapshotNames.contains(s.getName()))
|
||||
.collect(Collectors.toMap(SnapshotId::getName, Function.identity()));
|
||||
for (final String snapshotName : request.snapshots()) {
|
||||
SnapshotId snapshotId = matchedSnapshotIds.get(snapshotName);
|
||||
if (snapshotId == null) {
|
||||
if (currentSnapshotNames.contains(snapshotName)) {
|
||||
// we've already found this snapshot in the current snapshot entries, so skip over
|
||||
continue;
|
||||
} else {
|
||||
// neither in the current snapshot entries nor found in the repository
|
||||
throw new SnapshotMissingException(repositoryName, snapshotName);
|
||||
}
|
||||
SnapshotInfo snapshot = snapshotsService.snapshot(snapshotId);
|
||||
List<SnapshotIndexShardStatus> shardStatusBuilder = new ArrayList<>();
|
||||
if (snapshot.state().completed()) {
|
||||
Map<ShardId, IndexShardSnapshotStatus> shardStatues = snapshotsService.snapshotShards(snapshotId);
|
||||
for (Map.Entry<ShardId, IndexShardSnapshotStatus> shardStatus : shardStatues.entrySet()) {
|
||||
shardStatusBuilder.add(new SnapshotIndexShardStatus(shardStatus.getKey(), shardStatus.getValue()));
|
||||
}
|
||||
final SnapshotsInProgress.State state;
|
||||
switch (snapshot.state()) {
|
||||
case FAILED:
|
||||
state = SnapshotsInProgress.State.FAILED;
|
||||
break;
|
||||
case SUCCESS:
|
||||
case PARTIAL:
|
||||
// Translating both PARTIAL and SUCCESS to SUCCESS for now
|
||||
// TODO: add the differentiation on the metadata level in the next major release
|
||||
state = SnapshotsInProgress.State.SUCCESS;
|
||||
break;
|
||||
default:
|
||||
throw new IllegalArgumentException("Unknown snapshot state " + snapshot.state());
|
||||
}
|
||||
builder.add(new SnapshotStatus(snapshotId, state, Collections.unmodifiableList(shardStatusBuilder)));
|
||||
}
|
||||
SnapshotInfo snapshotInfo = snapshotsService.snapshot(repositoryName, snapshotId);
|
||||
List<SnapshotIndexShardStatus> shardStatusBuilder = new ArrayList<>();
|
||||
if (snapshotInfo.state().completed()) {
|
||||
Map<ShardId, IndexShardSnapshotStatus> shardStatues =
|
||||
snapshotsService.snapshotShards(request.repository(), snapshotInfo);
|
||||
for (Map.Entry<ShardId, IndexShardSnapshotStatus> shardStatus : shardStatues.entrySet()) {
|
||||
shardStatusBuilder.add(new SnapshotIndexShardStatus(shardStatus.getKey(), shardStatus.getValue()));
|
||||
}
|
||||
final SnapshotsInProgress.State state;
|
||||
switch (snapshotInfo.state()) {
|
||||
case FAILED:
|
||||
state = SnapshotsInProgress.State.FAILED;
|
||||
break;
|
||||
case SUCCESS:
|
||||
case PARTIAL:
|
||||
// Translating both PARTIAL and SUCCESS to SUCCESS for now
|
||||
// TODO: add the differentiation on the metadata level in the next major release
|
||||
state = SnapshotsInProgress.State.SUCCESS;
|
||||
break;
|
||||
default:
|
||||
throw new IllegalArgumentException("Unknown snapshot state " + snapshotInfo.state());
|
||||
}
|
||||
builder.add(new SnapshotStatus(new Snapshot(repositoryName, snapshotInfo.snapshotId()), state, Collections.unmodifiableList(shardStatusBuilder)));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -24,6 +24,7 @@ import org.elasticsearch.cluster.ack.ClusterStateUpdateRequest;
|
|||
import org.elasticsearch.cluster.block.ClusterBlock;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.index.Index;
|
||||
import org.elasticsearch.transport.TransportMessage;
|
||||
|
||||
import java.util.HashMap;
|
||||
|
@ -40,6 +41,7 @@ public class CreateIndexClusterStateUpdateRequest extends ClusterStateUpdateRequ
|
|||
private final String cause;
|
||||
private final String index;
|
||||
private final boolean updateAllTypes;
|
||||
private Index shrinkFrom;
|
||||
|
||||
private IndexMetaData.State state = IndexMetaData.State.OPEN;
|
||||
|
||||
|
@ -54,7 +56,7 @@ public class CreateIndexClusterStateUpdateRequest extends ClusterStateUpdateRequ
|
|||
private final Set<ClusterBlock> blocks = new HashSet<>();
|
||||
|
||||
|
||||
CreateIndexClusterStateUpdateRequest(TransportMessage originalMessage, String cause, String index, boolean updateAllTypes) {
|
||||
public CreateIndexClusterStateUpdateRequest(TransportMessage originalMessage, String cause, String index, boolean updateAllTypes) {
|
||||
this.originalMessage = originalMessage;
|
||||
this.cause = cause;
|
||||
this.index = index;
|
||||
|
@ -91,6 +93,11 @@ public class CreateIndexClusterStateUpdateRequest extends ClusterStateUpdateRequ
|
|||
return this;
|
||||
}
|
||||
|
||||
public CreateIndexClusterStateUpdateRequest shrinkFrom(Index shrinkFrom) {
|
||||
this.shrinkFrom = shrinkFrom;
|
||||
return this;
|
||||
}
|
||||
|
||||
public TransportMessage originalMessage() {
|
||||
return originalMessage;
|
||||
}
|
||||
|
@ -127,6 +134,10 @@ public class CreateIndexClusterStateUpdateRequest extends ClusterStateUpdateRequ
|
|||
return blocks;
|
||||
}
|
||||
|
||||
public Index shrinkFrom() {
|
||||
return shrinkFrom;
|
||||
}
|
||||
|
||||
/** True if all fields that span multiple types should be updated, false otherwise */
|
||||
public boolean updateAllTypes() {
|
||||
return updateAllTypes;
|
||||
|
|
|
@ -30,10 +30,10 @@ import java.io.IOException;
|
|||
*/
|
||||
public class CreateIndexResponse extends AcknowledgedResponse {
|
||||
|
||||
CreateIndexResponse() {
|
||||
protected CreateIndexResponse() {
|
||||
}
|
||||
|
||||
CreateIndexResponse(boolean acknowledged) {
|
||||
protected CreateIndexResponse(boolean acknowledged) {
|
||||
super(acknowledged);
|
||||
}
|
||||
|
||||
|
|
|
@ -16,22 +16,16 @@
|
|||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.action.admin.indices.delete;
|
||||
|
||||
package org.elasticsearch.painless;
|
||||
|
||||
import org.elasticsearch.painless.Variables.Reserved;
|
||||
import org.elasticsearch.painless.node.SSource;
|
||||
import org.elasticsearch.cluster.ack.IndicesClusterStateUpdateRequest;
|
||||
|
||||
/**
|
||||
* Runs the analysis phase of compilation using the Painless AST.
|
||||
* Cluster state update request that allows to close one or more indices
|
||||
*/
|
||||
final class Analyzer {
|
||||
static Variables analyze(Reserved shortcut, SSource root) {
|
||||
Variables variables = new Variables(shortcut);
|
||||
root.analyze(variables);
|
||||
public class DeleteIndexClusterStateUpdateRequest extends IndicesClusterStateUpdateRequest<DeleteIndexClusterStateUpdateRequest> {
|
||||
|
||||
DeleteIndexClusterStateUpdateRequest() {
|
||||
|
||||
return variables;
|
||||
}
|
||||
|
||||
private Analyzer() {}
|
||||
}
|
|
@ -23,26 +23,22 @@ import org.elasticsearch.action.ActionRequestValidationException;
|
|||
import org.elasticsearch.action.IndicesRequest;
|
||||
import org.elasticsearch.action.support.IndicesOptions;
|
||||
import org.elasticsearch.action.support.master.AcknowledgedRequest;
|
||||
import org.elasticsearch.action.support.master.MasterNodeRequest;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.util.CollectionUtils;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import static org.elasticsearch.action.ValidateActions.addValidationError;
|
||||
import static org.elasticsearch.common.unit.TimeValue.readTimeValue;
|
||||
|
||||
/**
|
||||
* A request to delete an index. Best created with {@link org.elasticsearch.client.Requests#deleteIndexRequest(String)}.
|
||||
*/
|
||||
public class DeleteIndexRequest extends MasterNodeRequest<DeleteIndexRequest> implements IndicesRequest.Replaceable {
|
||||
public class DeleteIndexRequest extends AcknowledgedRequest<DeleteIndexRequest> implements IndicesRequest.Replaceable {
|
||||
|
||||
private String[] indices;
|
||||
// Delete index should work by default on both open and closed indices.
|
||||
private IndicesOptions indicesOptions = IndicesOptions.fromOptions(false, true, true, true);
|
||||
private TimeValue timeout = AcknowledgedRequest.DEFAULT_ACK_TIMEOUT;
|
||||
|
||||
public DeleteIndexRequest() {
|
||||
}
|
||||
|
@ -98,37 +94,11 @@ public class DeleteIndexRequest extends MasterNodeRequest<DeleteIndexRequest> im
|
|||
return indices;
|
||||
}
|
||||
|
||||
/**
|
||||
* Timeout to wait for the index deletion to be acknowledged by current cluster nodes. Defaults
|
||||
* to <tt>10s</tt>.
|
||||
*/
|
||||
public TimeValue timeout() {
|
||||
return timeout;
|
||||
}
|
||||
|
||||
/**
|
||||
* Timeout to wait for the index deletion to be acknowledged by current cluster nodes. Defaults
|
||||
* to <tt>10s</tt>.
|
||||
*/
|
||||
public DeleteIndexRequest timeout(TimeValue timeout) {
|
||||
this.timeout = timeout;
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Timeout to wait for the index deletion to be acknowledged by current cluster nodes. Defaults
|
||||
* to <tt>10s</tt>.
|
||||
*/
|
||||
public DeleteIndexRequest timeout(String timeout) {
|
||||
return timeout(TimeValue.parseTimeValue(timeout, null, getClass().getSimpleName() + ".timeout"));
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
indices = in.readStringArray();
|
||||
indicesOptions = IndicesOptions.readIndicesOptions(in);
|
||||
timeout = readTimeValue(in);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -136,6 +106,5 @@ public class DeleteIndexRequest extends MasterNodeRequest<DeleteIndexRequest> im
|
|||
super.writeTo(out);
|
||||
out.writeStringArray(indices);
|
||||
indicesOptions.writeIndicesOptions(out);
|
||||
timeout.writeTo(out);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -24,6 +24,7 @@ import org.elasticsearch.action.support.ActionFilters;
|
|||
import org.elasticsearch.action.support.DestructiveOperations;
|
||||
import org.elasticsearch.action.support.master.TransportMasterNodeAction;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.ack.ClusterStateUpdateResponse;
|
||||
import org.elasticsearch.cluster.block.ClusterBlockException;
|
||||
import org.elasticsearch.cluster.block.ClusterBlockLevel;
|
||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
|
@ -85,15 +86,21 @@ public class TransportDeleteIndexAction extends TransportMasterNodeAction<Delete
|
|||
listener.onResponse(new DeleteIndexResponse(true));
|
||||
return;
|
||||
}
|
||||
deleteIndexService.deleteIndices(new MetaDataDeleteIndexService.Request(concreteIndices).timeout(request.timeout()).masterTimeout(request.masterNodeTimeout()), new MetaDataDeleteIndexService.Listener() {
|
||||
|
||||
DeleteIndexClusterStateUpdateRequest deleteRequest = new DeleteIndexClusterStateUpdateRequest()
|
||||
.ackTimeout(request.timeout()).masterNodeTimeout(request.masterNodeTimeout())
|
||||
.indices(concreteIndices.toArray(new Index[concreteIndices.size()]));
|
||||
|
||||
deleteIndexService.deleteIndices(deleteRequest, new ActionListener<ClusterStateUpdateResponse>() {
|
||||
|
||||
@Override
|
||||
public void onResponse(MetaDataDeleteIndexService.Response response) {
|
||||
listener.onResponse(new DeleteIndexResponse(response.acknowledged()));
|
||||
public void onResponse(ClusterStateUpdateResponse response) {
|
||||
listener.onResponse(new DeleteIndexResponse(response.isAcknowledged()));
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Throwable t) {
|
||||
logger.debug("failed to delete indices [{}]", t, concreteIndices);
|
||||
listener.onFailure(t);
|
||||
}
|
||||
});
|
||||
|
|
|
@ -19,9 +19,9 @@
|
|||
|
||||
package org.elasticsearch.action.admin.indices.flush;
|
||||
|
||||
import org.elasticsearch.action.ReplicationResponse;
|
||||
import org.elasticsearch.action.ShardOperationFailedException;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.replication.ReplicationResponse;
|
||||
import org.elasticsearch.action.support.replication.TransportBroadcastReplicationAction;
|
||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
|
|
|
@ -19,14 +19,13 @@
|
|||
|
||||
package org.elasticsearch.action.admin.indices.flush;
|
||||
|
||||
import org.elasticsearch.action.ReplicationResponse;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.replication.ReplicationResponse;
|
||||
import org.elasticsearch.action.support.replication.TransportReplicationAction;
|
||||
import org.elasticsearch.cluster.action.shard.ShardStateAction;
|
||||
import org.elasticsearch.cluster.block.ClusterBlockLevel;
|
||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.common.collect.Tuple;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.index.shard.IndexShard;
|
||||
|
@ -55,18 +54,19 @@ public class TransportShardFlushAction extends TransportReplicationAction<ShardF
|
|||
}
|
||||
|
||||
@Override
|
||||
protected Tuple<ReplicationResponse, ShardFlushRequest> shardOperationOnPrimary(ShardFlushRequest shardRequest) {
|
||||
protected PrimaryResult shardOperationOnPrimary(ShardFlushRequest shardRequest) {
|
||||
IndexShard indexShard = indicesService.indexServiceSafe(shardRequest.shardId().getIndex()).getShard(shardRequest.shardId().id());
|
||||
indexShard.flush(shardRequest.getRequest());
|
||||
logger.trace("{} flush request executed on primary", indexShard.shardId());
|
||||
return new Tuple<>(new ReplicationResponse(), shardRequest);
|
||||
return new PrimaryResult(shardRequest, new ReplicationResponse());
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void shardOperationOnReplica(ShardFlushRequest request) {
|
||||
protected ReplicaResult shardOperationOnReplica(ShardFlushRequest request) {
|
||||
IndexShard indexShard = indicesService.indexServiceSafe(request.shardId().getIndex()).getShard(request.shardId().id());
|
||||
indexShard.flush(request.getRequest());
|
||||
logger.trace("{} flush request executed on replica", indexShard.shardId());
|
||||
return new ReplicaResult();
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -19,10 +19,10 @@
|
|||
|
||||
package org.elasticsearch.action.admin.indices.refresh;
|
||||
|
||||
import org.elasticsearch.action.ReplicationResponse;
|
||||
import org.elasticsearch.action.ShardOperationFailedException;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.replication.BasicReplicationRequest;
|
||||
import org.elasticsearch.action.support.replication.ReplicationResponse;
|
||||
import org.elasticsearch.action.support.replication.TransportBroadcastReplicationAction;
|
||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
|
|
|
@ -19,15 +19,14 @@
|
|||
|
||||
package org.elasticsearch.action.admin.indices.refresh;
|
||||
|
||||
import org.elasticsearch.action.ReplicationResponse;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.replication.BasicReplicationRequest;
|
||||
import org.elasticsearch.action.support.replication.ReplicationResponse;
|
||||
import org.elasticsearch.action.support.replication.TransportReplicationAction;
|
||||
import org.elasticsearch.cluster.action.shard.ShardStateAction;
|
||||
import org.elasticsearch.cluster.block.ClusterBlockLevel;
|
||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.common.collect.Tuple;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.index.shard.IndexShard;
|
||||
|
@ -36,10 +35,8 @@ import org.elasticsearch.indices.IndicesService;
|
|||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
/**
|
||||
*
|
||||
*/
|
||||
public class TransportShardRefreshAction extends TransportReplicationAction<BasicReplicationRequest, BasicReplicationRequest, ReplicationResponse> {
|
||||
public class TransportShardRefreshAction
|
||||
extends TransportReplicationAction<BasicReplicationRequest, BasicReplicationRequest, ReplicationResponse> {
|
||||
|
||||
public static final String NAME = RefreshAction.NAME + "[s]";
|
||||
|
||||
|
@ -47,8 +44,8 @@ public class TransportShardRefreshAction extends TransportReplicationAction<Basi
|
|||
public TransportShardRefreshAction(Settings settings, TransportService transportService, ClusterService clusterService,
|
||||
IndicesService indicesService, ThreadPool threadPool, ShardStateAction shardStateAction,
|
||||
ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) {
|
||||
super(settings, NAME, transportService, clusterService, indicesService, threadPool, shardStateAction,
|
||||
actionFilters, indexNameExpressionResolver, BasicReplicationRequest::new, BasicReplicationRequest::new, ThreadPool.Names.REFRESH);
|
||||
super(settings, NAME, transportService, clusterService, indicesService, threadPool, shardStateAction, actionFilters,
|
||||
indexNameExpressionResolver, BasicReplicationRequest::new, BasicReplicationRequest::new, ThreadPool.Names.REFRESH);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -57,19 +54,20 @@ public class TransportShardRefreshAction extends TransportReplicationAction<Basi
|
|||
}
|
||||
|
||||
@Override
|
||||
protected Tuple<ReplicationResponse, BasicReplicationRequest> shardOperationOnPrimary(BasicReplicationRequest shardRequest) {
|
||||
protected PrimaryResult shardOperationOnPrimary(BasicReplicationRequest shardRequest) {
|
||||
IndexShard indexShard = indicesService.indexServiceSafe(shardRequest.shardId().getIndex()).getShard(shardRequest.shardId().id());
|
||||
indexShard.refresh("api");
|
||||
logger.trace("{} refresh request executed on primary", indexShard.shardId());
|
||||
return new Tuple<>(new ReplicationResponse(), shardRequest);
|
||||
return new PrimaryResult(shardRequest, new ReplicationResponse());
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void shardOperationOnReplica(BasicReplicationRequest request) {
|
||||
protected ReplicaResult shardOperationOnReplica(BasicReplicationRequest request) {
|
||||
final ShardId shardId = request.shardId();
|
||||
IndexShard indexShard = indicesService.indexServiceSafe(shardId.getIndex()).getShard(shardId.id());
|
||||
indexShard.refresh("api");
|
||||
logger.trace("{} refresh request executed on replica", indexShard.shardId());
|
||||
return new ReplicaResult();
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -0,0 +1,45 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.admin.indices.shrink;
|
||||
|
||||
import org.elasticsearch.action.Action;
|
||||
import org.elasticsearch.client.ElasticsearchClient;
|
||||
|
||||
/**
|
||||
*/
|
||||
public class ShrinkAction extends Action<ShrinkRequest, ShrinkResponse, ShrinkRequestBuilder> {
|
||||
|
||||
public static final ShrinkAction INSTANCE = new ShrinkAction();
|
||||
public static final String NAME = "indices:admin/shrink";
|
||||
|
||||
private ShrinkAction() {
|
||||
super(NAME);
|
||||
}
|
||||
|
||||
@Override
|
||||
public ShrinkResponse newResponse() {
|
||||
return new ShrinkResponse();
|
||||
}
|
||||
|
||||
@Override
|
||||
public ShrinkRequestBuilder newRequestBuilder(ElasticsearchClient client) {
|
||||
return new ShrinkRequestBuilder(client, this);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,107 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.action.admin.indices.shrink;
|
||||
|
||||
import org.elasticsearch.action.ActionRequestValidationException;
|
||||
import org.elasticsearch.action.IndicesRequest;
|
||||
import org.elasticsearch.action.admin.indices.create.CreateIndexRequest;
|
||||
import org.elasticsearch.action.support.IndicesOptions;
|
||||
import org.elasticsearch.action.support.master.AcknowledgedRequest;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Objects;
|
||||
|
||||
import static org.elasticsearch.action.ValidateActions.addValidationError;
|
||||
|
||||
/**
|
||||
* Request class to shrink an index into a single shard
|
||||
*/
|
||||
public class ShrinkRequest extends AcknowledgedRequest<ShrinkRequest> implements IndicesRequest {
|
||||
|
||||
private CreateIndexRequest shrinkIndexRequest;
|
||||
private String sourceIndex;
|
||||
|
||||
ShrinkRequest() {}
|
||||
|
||||
public ShrinkRequest(String targetIndex, String sourceindex) {
|
||||
this.shrinkIndexRequest = new CreateIndexRequest(targetIndex);
|
||||
this.sourceIndex = sourceindex;
|
||||
}
|
||||
|
||||
@Override
|
||||
public ActionRequestValidationException validate() {
|
||||
ActionRequestValidationException validationException = shrinkIndexRequest == null ? null : shrinkIndexRequest.validate();
|
||||
if (sourceIndex == null) {
|
||||
validationException = addValidationError("source index is missing", validationException);
|
||||
}
|
||||
if (shrinkIndexRequest == null) {
|
||||
validationException = addValidationError("shrink index request is missing", validationException);
|
||||
}
|
||||
return validationException;
|
||||
}
|
||||
|
||||
public void setSourceIndex(String index) {
|
||||
this.sourceIndex = index;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
shrinkIndexRequest = new CreateIndexRequest();
|
||||
shrinkIndexRequest.readFrom(in);
|
||||
sourceIndex = in.readString();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
shrinkIndexRequest.writeTo(out);
|
||||
out.writeString(sourceIndex);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String[] indices() {
|
||||
return new String[] {sourceIndex};
|
||||
}
|
||||
|
||||
@Override
|
||||
public IndicesOptions indicesOptions() {
|
||||
return IndicesOptions.lenientExpandOpen();
|
||||
}
|
||||
|
||||
public void setShrinkIndex(CreateIndexRequest shrinkIndexRequest) {
|
||||
this.shrinkIndexRequest = Objects.requireNonNull(shrinkIndexRequest, "shrink index request must not be null");
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the {@link CreateIndexRequest} for the shrink index
|
||||
*/
|
||||
public CreateIndexRequest getShrinkIndexRequest() {
|
||||
return shrinkIndexRequest;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the source index name
|
||||
*/
|
||||
public String getSourceIndex() {
|
||||
return sourceIndex;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,47 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.action.admin.indices.shrink;
|
||||
|
||||
import org.elasticsearch.action.admin.indices.create.CreateIndexRequest;
|
||||
import org.elasticsearch.action.support.master.AcknowledgedRequestBuilder;
|
||||
import org.elasticsearch.client.ElasticsearchClient;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
|
||||
public class ShrinkRequestBuilder extends AcknowledgedRequestBuilder<ShrinkRequest, ShrinkResponse,
|
||||
ShrinkRequestBuilder> {
|
||||
public ShrinkRequestBuilder(ElasticsearchClient client, ShrinkAction action) {
|
||||
super(client, action, new ShrinkRequest());
|
||||
}
|
||||
|
||||
|
||||
public ShrinkRequestBuilder setTargetIndex(CreateIndexRequest request) {
|
||||
this.request.setShrinkIndex(request);
|
||||
return this;
|
||||
}
|
||||
|
||||
public ShrinkRequestBuilder setSourceIndex(String index) {
|
||||
this.request.setSourceIndex(index);
|
||||
return this;
|
||||
}
|
||||
|
||||
public ShrinkRequestBuilder setSettings(Settings settings) {
|
||||
this.request.getShrinkIndexRequest().settings(settings);
|
||||
return this;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,31 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.admin.indices.shrink;
|
||||
|
||||
import org.elasticsearch.action.admin.indices.create.CreateIndexResponse;
|
||||
|
||||
public final class ShrinkResponse extends CreateIndexResponse {
|
||||
ShrinkResponse() {
|
||||
}
|
||||
|
||||
ShrinkResponse(boolean acknowledged) {
|
||||
super(acknowledged);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,168 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.admin.indices.shrink;
|
||||
|
||||
import org.apache.lucene.index.IndexWriter;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.admin.indices.create.CreateIndexClusterStateUpdateRequest;
|
||||
import org.elasticsearch.action.admin.indices.create.CreateIndexRequest;
|
||||
import org.elasticsearch.action.admin.indices.stats.IndexShardStats;
|
||||
import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.master.TransportMasterNodeAction;
|
||||
import org.elasticsearch.client.Client;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.ack.ClusterStateUpdateResponse;
|
||||
import org.elasticsearch.cluster.block.ClusterBlockException;
|
||||
import org.elasticsearch.cluster.block.ClusterBlockLevel;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.cluster.metadata.MetaDataCreateIndexService;
|
||||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.index.shard.DocsStats;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
import org.elasticsearch.indices.IndexAlreadyExistsException;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
import java.util.Set;
|
||||
import java.util.function.IntFunction;
|
||||
|
||||
/**
|
||||
* Main class to initiate shrinking an index into a new index with a single shard
|
||||
*/
|
||||
public class TransportShrinkAction extends TransportMasterNodeAction<ShrinkRequest, ShrinkResponse> {
|
||||
|
||||
private final MetaDataCreateIndexService createIndexService;
|
||||
private final Client client;
|
||||
|
||||
@Inject
|
||||
public TransportShrinkAction(Settings settings, TransportService transportService, ClusterService clusterService,
|
||||
ThreadPool threadPool, MetaDataCreateIndexService createIndexService,
|
||||
ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, Client client) {
|
||||
super(settings, ShrinkAction.NAME, transportService, clusterService, threadPool, actionFilters, indexNameExpressionResolver,
|
||||
ShrinkRequest::new);
|
||||
this.createIndexService = createIndexService;
|
||||
this.client = client;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected String executor() {
|
||||
// we go async right away
|
||||
return ThreadPool.Names.SAME;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected ShrinkResponse newResponse() {
|
||||
return new ShrinkResponse();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected ClusterBlockException checkBlock(ShrinkRequest request, ClusterState state) {
|
||||
return state.blocks().indexBlockedException(ClusterBlockLevel.METADATA_WRITE, request.getShrinkIndexRequest().index());
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void masterOperation(final ShrinkRequest shrinkRequest, final ClusterState state,
|
||||
final ActionListener<ShrinkResponse> listener) {
|
||||
final String sourceIndex = indexNameExpressionResolver.resolveDateMathExpression(shrinkRequest.getSourceIndex());
|
||||
client.admin().indices().prepareStats(sourceIndex).clear().setDocs(true).execute(new ActionListener<IndicesStatsResponse>() {
|
||||
@Override
|
||||
public void onResponse(IndicesStatsResponse indicesStatsResponse) {
|
||||
CreateIndexClusterStateUpdateRequest updateRequest = prepareCreateIndexRequest(shrinkRequest, state,
|
||||
(i) -> {
|
||||
IndexShardStats shard = indicesStatsResponse.getIndex(sourceIndex).getIndexShards().get(i);
|
||||
return shard == null ? null : shard.getPrimary().getDocs();
|
||||
}, indexNameExpressionResolver);
|
||||
createIndexService.createIndex(updateRequest, new ActionListener<ClusterStateUpdateResponse>() {
|
||||
@Override
|
||||
public void onResponse(ClusterStateUpdateResponse response) {
|
||||
listener.onResponse(new ShrinkResponse(response.isAcknowledged()));
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Throwable t) {
|
||||
if (t instanceof IndexAlreadyExistsException) {
|
||||
logger.trace("[{}] failed to create shrink index", t, updateRequest.index());
|
||||
} else {
|
||||
logger.debug("[{}] failed to create shrink index", t, updateRequest.index());
|
||||
}
|
||||
listener.onFailure(t);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Throwable e) {
|
||||
listener.onFailure(e);
|
||||
}
|
||||
});
|
||||
|
||||
}
|
||||
|
||||
// static for unittesting this method
|
||||
static CreateIndexClusterStateUpdateRequest prepareCreateIndexRequest(final ShrinkRequest shrinkReqeust, final ClusterState state
|
||||
, final IntFunction<DocsStats> perShardDocStats, IndexNameExpressionResolver indexNameExpressionResolver) {
|
||||
final String sourceIndex = indexNameExpressionResolver.resolveDateMathExpression(shrinkReqeust.getSourceIndex());
|
||||
final CreateIndexRequest targetIndex = shrinkReqeust.getShrinkIndexRequest();
|
||||
final String targetIndexName = indexNameExpressionResolver.resolveDateMathExpression(targetIndex.index());
|
||||
final IndexMetaData metaData = state.metaData().index(sourceIndex);
|
||||
final Settings targetIndexSettings = Settings.builder().put(targetIndex.settings())
|
||||
.normalizePrefix(IndexMetaData.INDEX_SETTING_PREFIX).build();
|
||||
int numShards = 1;
|
||||
if (IndexMetaData.INDEX_NUMBER_OF_SHARDS_SETTING.exists(targetIndexSettings)) {
|
||||
numShards = IndexMetaData.INDEX_NUMBER_OF_SHARDS_SETTING.get(targetIndexSettings);
|
||||
}
|
||||
for (int i = 0; i < numShards; i++) {
|
||||
Set<ShardId> shardIds = IndexMetaData.selectShrinkShards(i, metaData, numShards);
|
||||
long count = 0;
|
||||
for (ShardId id : shardIds) {
|
||||
DocsStats docsStats = perShardDocStats.apply(id.id());
|
||||
if (docsStats != null) {
|
||||
count += docsStats.getCount();
|
||||
}
|
||||
if (count > IndexWriter.MAX_DOCS) {
|
||||
throw new IllegalStateException("Can't merge index with more than [" + IndexWriter.MAX_DOCS
|
||||
+ "] docs - too many documents in shards " + shardIds);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
targetIndex.cause("shrink_index");
|
||||
Settings.Builder settingsBuilder = Settings.builder().put(targetIndexSettings);
|
||||
settingsBuilder.put("index.number_of_shards", numShards);
|
||||
targetIndex.settings(settingsBuilder);
|
||||
|
||||
return new CreateIndexClusterStateUpdateRequest(targetIndex,
|
||||
"shrink_index", targetIndexName, true)
|
||||
// mappings are updated on the node when merging in the shards, this prevents race-conditions since all mapping must be
|
||||
// applied once we took the snapshot and if somebody fucks things up and switches the index read/write and adds docs we miss
|
||||
// the mappings for everything is corrupted and hard to debug
|
||||
.ackTimeout(targetIndex.timeout())
|
||||
.masterNodeTimeout(targetIndex.masterNodeTimeout())
|
||||
.settings(targetIndex.settings())
|
||||
.aliases(targetIndex.aliases())
|
||||
.customs(targetIndex.customs())
|
||||
.shrinkFrom(metaData.getIndex());
|
||||
}
|
||||
|
||||
}
|
|
@ -28,7 +28,6 @@ import org.elasticsearch.action.support.ActionFilters;
|
|||
import org.elasticsearch.action.support.DefaultShardOperationFailedException;
|
||||
import org.elasticsearch.action.support.broadcast.BroadcastShardOperationFailedException;
|
||||
import org.elasticsearch.action.support.broadcast.TransportBroadcastAction;
|
||||
import org.elasticsearch.cache.recycler.PageCacheRecycler;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.block.ClusterBlockException;
|
||||
import org.elasticsearch.cluster.block.ClusterBlockLevel;
|
||||
|
@ -43,7 +42,6 @@ import org.elasticsearch.common.settings.Settings;
|
|||
import org.elasticsearch.common.util.BigArrays;
|
||||
import org.elasticsearch.index.IndexService;
|
||||
import org.elasticsearch.index.engine.Engine;
|
||||
import org.elasticsearch.index.query.QueryShardContext;
|
||||
import org.elasticsearch.index.query.QueryShardException;
|
||||
import org.elasticsearch.index.shard.IndexShard;
|
||||
import org.elasticsearch.indices.IndicesService;
|
||||
|
@ -73,8 +71,6 @@ public class TransportValidateQueryAction extends TransportBroadcastAction<Valid
|
|||
|
||||
private final ScriptService scriptService;
|
||||
|
||||
private final PageCacheRecycler pageCacheRecycler;
|
||||
|
||||
private final BigArrays bigArrays;
|
||||
|
||||
private final FetchPhase fetchPhase;
|
||||
|
@ -82,13 +78,12 @@ public class TransportValidateQueryAction extends TransportBroadcastAction<Valid
|
|||
@Inject
|
||||
public TransportValidateQueryAction(Settings settings, ThreadPool threadPool, ClusterService clusterService,
|
||||
TransportService transportService, IndicesService indicesService, ScriptService scriptService,
|
||||
PageCacheRecycler pageCacheRecycler, BigArrays bigArrays, ActionFilters actionFilters,
|
||||
BigArrays bigArrays, ActionFilters actionFilters,
|
||||
IndexNameExpressionResolver indexNameExpressionResolver, FetchPhase fetchPhase) {
|
||||
super(settings, ValidateQueryAction.NAME, threadPool, clusterService, transportService, actionFilters,
|
||||
indexNameExpressionResolver, ValidateQueryRequest::new, ShardValidateQueryRequest::new, ThreadPool.Names.SEARCH);
|
||||
this.indicesService = indicesService;
|
||||
this.scriptService = scriptService;
|
||||
this.pageCacheRecycler = pageCacheRecycler;
|
||||
this.bigArrays = bigArrays;
|
||||
this.fetchPhase = fetchPhase;
|
||||
}
|
||||
|
@ -176,7 +171,7 @@ public class TransportValidateQueryAction extends TransportBroadcastAction<Valid
|
|||
|
||||
DefaultSearchContext searchContext = new DefaultSearchContext(0,
|
||||
new ShardSearchLocalRequest(request.types(), request.nowInMillis(), request.filteringAliases()), null, searcher,
|
||||
indexService, indexShard, scriptService, pageCacheRecycler, bigArrays, threadPool.estimatedTimeInMillisCounter(),
|
||||
indexService, indexShard, scriptService, bigArrays, threadPool.estimatedTimeInMillisCounter(),
|
||||
parseFieldMatcher, SearchService.NO_TIMEOUT, fetchPhase);
|
||||
SearchContext.setCurrent(searchContext);
|
||||
try {
|
||||
|
|
|
@ -26,6 +26,7 @@ import org.elasticsearch.action.IndicesRequest;
|
|||
import org.elasticsearch.action.WriteConsistencyLevel;
|
||||
import org.elasticsearch.action.delete.DeleteRequest;
|
||||
import org.elasticsearch.action.index.IndexRequest;
|
||||
import org.elasticsearch.action.support.WriteRequest;
|
||||
import org.elasticsearch.action.update.UpdateRequest;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.Strings;
|
||||
|
@ -54,16 +55,21 @@ import static org.elasticsearch.action.ValidateActions.addValidationError;
|
|||
* Note that we only support refresh on the bulk request not per item.
|
||||
* @see org.elasticsearch.client.Client#bulk(BulkRequest)
|
||||
*/
|
||||
public class BulkRequest extends ActionRequest<BulkRequest> implements CompositeIndicesRequest {
|
||||
public class BulkRequest extends ActionRequest<BulkRequest> implements CompositeIndicesRequest, WriteRequest<BulkRequest> {
|
||||
|
||||
private static final int REQUEST_OVERHEAD = 50;
|
||||
|
||||
/**
|
||||
* Requests that are part of this request. It is only possible to add things that are both {@link ActionRequest}s and
|
||||
* {@link WriteRequest}s to this but java doesn't support syntax to declare that everything in the array has both types so we declare
|
||||
* the one with the least casts.
|
||||
*/
|
||||
final List<ActionRequest<?>> requests = new ArrayList<>();
|
||||
List<Object> payloads = null;
|
||||
|
||||
protected TimeValue timeout = BulkShardRequest.DEFAULT_TIMEOUT;
|
||||
private WriteConsistencyLevel consistencyLevel = WriteConsistencyLevel.DEFAULT;
|
||||
private boolean refresh = false;
|
||||
private RefreshPolicy refreshPolicy = RefreshPolicy.NONE;
|
||||
|
||||
private long sizeInBytes = 0;
|
||||
|
||||
|
@ -437,18 +443,15 @@ public class BulkRequest extends ActionRequest<BulkRequest> implements Composite
|
|||
return this.consistencyLevel;
|
||||
}
|
||||
|
||||
/**
|
||||
* Should a refresh be executed post this bulk operation causing the operations to
|
||||
* be searchable. Note, heavy indexing should not set this to <tt>true</tt>. Defaults
|
||||
* to <tt>false</tt>.
|
||||
*/
|
||||
public BulkRequest refresh(boolean refresh) {
|
||||
this.refresh = refresh;
|
||||
@Override
|
||||
public BulkRequest setRefreshPolicy(RefreshPolicy refreshPolicy) {
|
||||
this.refreshPolicy = refreshPolicy;
|
||||
return this;
|
||||
}
|
||||
|
||||
public boolean refresh() {
|
||||
return this.refresh;
|
||||
@Override
|
||||
public RefreshPolicy getRefreshPolicy() {
|
||||
return refreshPolicy;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -483,7 +486,7 @@ public class BulkRequest extends ActionRequest<BulkRequest> implements Composite
|
|||
* @return Whether this bulk request contains index request with an ingest pipeline enabled.
|
||||
*/
|
||||
public boolean hasIndexRequestsWithPipelines() {
|
||||
for (ActionRequest actionRequest : requests) {
|
||||
for (ActionRequest<?> actionRequest : requests) {
|
||||
if (actionRequest instanceof IndexRequest) {
|
||||
IndexRequest indexRequest = (IndexRequest) actionRequest;
|
||||
if (Strings.hasText(indexRequest.getPipeline())) {
|
||||
|
@ -503,10 +506,9 @@ public class BulkRequest extends ActionRequest<BulkRequest> implements Composite
|
|||
}
|
||||
for (ActionRequest<?> request : requests) {
|
||||
// We first check if refresh has been set
|
||||
if ((request instanceof DeleteRequest && ((DeleteRequest)request).refresh()) ||
|
||||
(request instanceof UpdateRequest && ((UpdateRequest)request).refresh()) ||
|
||||
(request instanceof IndexRequest && ((IndexRequest)request).refresh())) {
|
||||
validationException = addValidationError("Refresh is not supported on an item request, set the refresh flag on the BulkRequest instead.", validationException);
|
||||
if (((WriteRequest<?>) request).getRefreshPolicy() != RefreshPolicy.NONE) {
|
||||
validationException = addValidationError(
|
||||
"RefreshPolicy is not supported on an item request. Set it on the BulkRequest instead.", validationException);
|
||||
}
|
||||
ActionRequestValidationException ex = request.validate();
|
||||
if (ex != null) {
|
||||
|
@ -541,8 +543,8 @@ public class BulkRequest extends ActionRequest<BulkRequest> implements Composite
|
|||
requests.add(request);
|
||||
}
|
||||
}
|
||||
refresh = in.readBoolean();
|
||||
timeout = TimeValue.readTimeValue(in);
|
||||
refreshPolicy = RefreshPolicy.readFrom(in);
|
||||
timeout = new TimeValue(in);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -560,7 +562,7 @@ public class BulkRequest extends ActionRequest<BulkRequest> implements Composite
|
|||
}
|
||||
request.writeTo(out);
|
||||
}
|
||||
out.writeBoolean(refresh);
|
||||
refreshPolicy.writeTo(out);
|
||||
timeout.writeTo(out);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -25,6 +25,7 @@ import org.elasticsearch.action.delete.DeleteRequest;
|
|||
import org.elasticsearch.action.delete.DeleteRequestBuilder;
|
||||
import org.elasticsearch.action.index.IndexRequest;
|
||||
import org.elasticsearch.action.index.IndexRequestBuilder;
|
||||
import org.elasticsearch.action.support.WriteRequestBuilder;
|
||||
import org.elasticsearch.action.update.UpdateRequest;
|
||||
import org.elasticsearch.action.update.UpdateRequestBuilder;
|
||||
import org.elasticsearch.client.ElasticsearchClient;
|
||||
|
@ -35,7 +36,8 @@ import org.elasticsearch.common.unit.TimeValue;
|
|||
* A bulk request holds an ordered {@link IndexRequest}s and {@link DeleteRequest}s and allows to executes
|
||||
* it in a single batch.
|
||||
*/
|
||||
public class BulkRequestBuilder extends ActionRequestBuilder<BulkRequest, BulkResponse, BulkRequestBuilder> {
|
||||
public class BulkRequestBuilder extends ActionRequestBuilder<BulkRequest, BulkResponse, BulkRequestBuilder>
|
||||
implements WriteRequestBuilder<BulkRequestBuilder> {
|
||||
|
||||
public BulkRequestBuilder(ElasticsearchClient client, BulkAction action) {
|
||||
super(client, action, new BulkRequest());
|
||||
|
@ -116,16 +118,6 @@ public class BulkRequestBuilder extends ActionRequestBuilder<BulkRequest, BulkRe
|
|||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Should a refresh be executed post this bulk operation causing the operations to
|
||||
* be searchable. Note, heavy indexing should not set this to <tt>true</tt>. Defaults
|
||||
* to <tt>false</tt>.
|
||||
*/
|
||||
public BulkRequestBuilder setRefresh(boolean refresh) {
|
||||
request.refresh(refresh);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* A timeout to wait if the index operation can't be performed immediately. Defaults to <tt>1m</tt>.
|
||||
*/
|
||||
|
|
|
@ -19,7 +19,7 @@
|
|||
|
||||
package org.elasticsearch.action.bulk;
|
||||
|
||||
import org.elasticsearch.action.support.replication.ReplicationRequest;
|
||||
import org.elasticsearch.action.support.replication.ReplicatedWriteRequest;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
|
@ -31,23 +31,17 @@ import java.util.List;
|
|||
/**
|
||||
*
|
||||
*/
|
||||
public class BulkShardRequest extends ReplicationRequest<BulkShardRequest> {
|
||||
public class BulkShardRequest extends ReplicatedWriteRequest<BulkShardRequest> {
|
||||
|
||||
private BulkItemRequest[] items;
|
||||
|
||||
private boolean refresh;
|
||||
|
||||
public BulkShardRequest() {
|
||||
}
|
||||
|
||||
BulkShardRequest(BulkRequest bulkRequest, ShardId shardId, boolean refresh, BulkItemRequest[] items) {
|
||||
BulkShardRequest(BulkRequest bulkRequest, ShardId shardId, RefreshPolicy refreshPolicy, BulkItemRequest[] items) {
|
||||
super(shardId);
|
||||
this.items = items;
|
||||
this.refresh = refresh;
|
||||
}
|
||||
|
||||
boolean refresh() {
|
||||
return this.refresh;
|
||||
setRefreshPolicy(refreshPolicy);
|
||||
}
|
||||
|
||||
BulkItemRequest[] items() {
|
||||
|
@ -77,7 +71,6 @@ public class BulkShardRequest extends ReplicationRequest<BulkShardRequest> {
|
|||
out.writeBoolean(false);
|
||||
}
|
||||
}
|
||||
out.writeBoolean(refresh);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -89,7 +82,6 @@ public class BulkShardRequest extends ReplicationRequest<BulkShardRequest> {
|
|||
items[i] = BulkItemRequest.readBulkItem(in);
|
||||
}
|
||||
}
|
||||
refresh = in.readBoolean();
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -97,8 +89,15 @@ public class BulkShardRequest extends ReplicationRequest<BulkShardRequest> {
|
|||
// This is included in error messages so we'll try to make it somewhat user friendly.
|
||||
StringBuilder b = new StringBuilder("BulkShardRequest to [");
|
||||
b.append(index).append("] containing [").append(items.length).append("] requests");
|
||||
if (refresh) {
|
||||
switch (getRefreshPolicy()) {
|
||||
case IMMEDIATE:
|
||||
b.append(" and a refresh");
|
||||
break;
|
||||
case WAIT_UNTIL:
|
||||
b.append(" blocking until refresh");
|
||||
break;
|
||||
case NONE:
|
||||
break;
|
||||
}
|
||||
return b.toString();
|
||||
}
|
||||
|
|
|
@ -19,7 +19,9 @@
|
|||
|
||||
package org.elasticsearch.action.bulk;
|
||||
|
||||
import org.elasticsearch.action.ReplicationResponse;
|
||||
import org.elasticsearch.action.DocWriteResponse;
|
||||
import org.elasticsearch.action.support.WriteResponse;
|
||||
import org.elasticsearch.action.support.replication.ReplicationResponse;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
|
@ -29,7 +31,7 @@ import java.io.IOException;
|
|||
/**
|
||||
*
|
||||
*/
|
||||
public class BulkShardResponse extends ReplicationResponse {
|
||||
public class BulkShardResponse extends ReplicationResponse implements WriteResponse {
|
||||
|
||||
private ShardId shardId;
|
||||
private BulkItemResponse[] responses;
|
||||
|
@ -50,6 +52,20 @@ public class BulkShardResponse extends ReplicationResponse {
|
|||
return responses;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setForcedRefresh(boolean forcedRefresh) {
|
||||
/*
|
||||
* Each DocWriteResponse already has a location for whether or not it forced a refresh so we just set that information on the
|
||||
* response.
|
||||
*/
|
||||
for (BulkItemResponse response : responses) {
|
||||
DocWriteResponse r = response.getResponse();
|
||||
if (r != null) {
|
||||
r.setForcedRefresh(forcedRefresh);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
|
|
|
@ -304,7 +304,7 @@ public class TransportBulkAction extends HandledTransportAction<BulkRequest, Bul
|
|||
if (request instanceof IndexRequest) {
|
||||
IndexRequest indexRequest = (IndexRequest) request;
|
||||
String concreteIndex = concreteIndices.getConcreteIndex(indexRequest.index()).getName();
|
||||
ShardId shardId = clusterService.operationRouting().indexShards(clusterState, concreteIndex, indexRequest.type(), indexRequest.id(), indexRequest.routing()).shardId();
|
||||
ShardId shardId = clusterService.operationRouting().indexShards(clusterState, concreteIndex, indexRequest.id(), indexRequest.routing()).shardId();
|
||||
List<BulkItemRequest> list = requestsByShard.get(shardId);
|
||||
if (list == null) {
|
||||
list = new ArrayList<>();
|
||||
|
@ -314,7 +314,7 @@ public class TransportBulkAction extends HandledTransportAction<BulkRequest, Bul
|
|||
} else if (request instanceof DeleteRequest) {
|
||||
DeleteRequest deleteRequest = (DeleteRequest) request;
|
||||
String concreteIndex = concreteIndices.getConcreteIndex(deleteRequest.index()).getName();
|
||||
ShardId shardId = clusterService.operationRouting().indexShards(clusterState, concreteIndex, deleteRequest.type(), deleteRequest.id(), deleteRequest.routing()).shardId();
|
||||
ShardId shardId = clusterService.operationRouting().indexShards(clusterState, concreteIndex, deleteRequest.id(), deleteRequest.routing()).shardId();
|
||||
List<BulkItemRequest> list = requestsByShard.get(shardId);
|
||||
if (list == null) {
|
||||
list = new ArrayList<>();
|
||||
|
@ -324,7 +324,7 @@ public class TransportBulkAction extends HandledTransportAction<BulkRequest, Bul
|
|||
} else if (request instanceof UpdateRequest) {
|
||||
UpdateRequest updateRequest = (UpdateRequest) request;
|
||||
String concreteIndex = concreteIndices.getConcreteIndex(updateRequest.index()).getName();
|
||||
ShardId shardId = clusterService.operationRouting().indexShards(clusterState, concreteIndex, updateRequest.type(), updateRequest.id(), updateRequest.routing()).shardId();
|
||||
ShardId shardId = clusterService.operationRouting().indexShards(clusterState, concreteIndex, updateRequest.id(), updateRequest.routing()).shardId();
|
||||
List<BulkItemRequest> list = requestsByShard.get(shardId);
|
||||
if (list == null) {
|
||||
list = new ArrayList<>();
|
||||
|
@ -344,7 +344,8 @@ public class TransportBulkAction extends HandledTransportAction<BulkRequest, Bul
|
|||
for (Map.Entry<ShardId, List<BulkItemRequest>> entry : requestsByShard.entrySet()) {
|
||||
final ShardId shardId = entry.getKey();
|
||||
final List<BulkItemRequest> requests = entry.getValue();
|
||||
BulkShardRequest bulkShardRequest = new BulkShardRequest(bulkRequest, shardId, bulkRequest.refresh(), requests.toArray(new BulkItemRequest[requests.size()]));
|
||||
BulkShardRequest bulkShardRequest = new BulkShardRequest(bulkRequest, shardId, bulkRequest.getRefreshPolicy(),
|
||||
requests.toArray(new BulkItemRequest[requests.size()]));
|
||||
bulkShardRequest.consistencyLevel(bulkRequest.consistencyLevel());
|
||||
bulkShardRequest.timeout(bulkRequest.timeout());
|
||||
if (task != null) {
|
||||
|
|
|
@ -30,7 +30,8 @@ import org.elasticsearch.action.index.IndexResponse;
|
|||
import org.elasticsearch.action.index.TransportIndexAction;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.replication.ReplicationRequest;
|
||||
import org.elasticsearch.action.support.replication.TransportReplicationAction;
|
||||
import org.elasticsearch.action.support.replication.TransportWriteAction;
|
||||
import org.elasticsearch.action.support.replication.ReplicationResponse.ShardInfo;
|
||||
import org.elasticsearch.action.update.UpdateHelper;
|
||||
import org.elasticsearch.action.update.UpdateRequest;
|
||||
import org.elasticsearch.action.update.UpdateResponse;
|
||||
|
@ -53,6 +54,7 @@ import org.elasticsearch.index.engine.VersionConflictEngineException;
|
|||
import org.elasticsearch.index.shard.IndexShard;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
import org.elasticsearch.index.translog.Translog;
|
||||
import org.elasticsearch.index.translog.Translog.Location;
|
||||
import org.elasticsearch.indices.IndicesService;
|
||||
import org.elasticsearch.rest.RestStatus;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
|
@ -67,7 +69,7 @@ import static org.elasticsearch.action.support.replication.ReplicationOperation.
|
|||
/**
|
||||
* Performs the index operation.
|
||||
*/
|
||||
public class TransportShardBulkAction extends TransportReplicationAction<BulkShardRequest, BulkShardRequest, BulkShardResponse> {
|
||||
public class TransportShardBulkAction extends TransportWriteAction<BulkShardRequest, BulkShardResponse> {
|
||||
|
||||
private final static String OP_TYPE_UPDATE = "update";
|
||||
private final static String OP_TYPE_DELETE = "delete";
|
||||
|
@ -83,9 +85,8 @@ public class TransportShardBulkAction extends TransportReplicationAction<BulkSha
|
|||
IndicesService indicesService, ThreadPool threadPool, ShardStateAction shardStateAction,
|
||||
MappingUpdatedAction mappingUpdatedAction, UpdateHelper updateHelper, ActionFilters actionFilters,
|
||||
IndexNameExpressionResolver indexNameExpressionResolver) {
|
||||
super(settings, ACTION_NAME, transportService, clusterService, indicesService, threadPool, shardStateAction,
|
||||
actionFilters, indexNameExpressionResolver,
|
||||
BulkShardRequest::new, BulkShardRequest::new, ThreadPool.Names.BULK);
|
||||
super(settings, ACTION_NAME, transportService, clusterService, indicesService, threadPool, shardStateAction, actionFilters,
|
||||
indexNameExpressionResolver, BulkShardRequest::new, ThreadPool.Names.BULK);
|
||||
this.updateHelper = updateHelper;
|
||||
this.allowIdGeneration = settings.getAsBoolean("action.allow_id_generation", true);
|
||||
this.mappingUpdatedAction = mappingUpdatedAction;
|
||||
|
@ -107,10 +108,9 @@ public class TransportShardBulkAction extends TransportReplicationAction<BulkSha
|
|||
}
|
||||
|
||||
@Override
|
||||
protected Tuple<BulkShardResponse, BulkShardRequest> shardOperationOnPrimary(BulkShardRequest request) {
|
||||
protected WriteResult<BulkShardResponse> onPrimaryShard(BulkShardRequest request, IndexShard indexShard) throws Exception {
|
||||
ShardId shardId = request.shardId();
|
||||
final IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex());
|
||||
final IndexShard indexShard = indexService.getShard(shardId.getId());
|
||||
final IndexMetaData metaData = indexService.getIndexSettings().getIndexMetaData();
|
||||
|
||||
long[] preVersions = new long[request.items().length];
|
||||
|
@ -121,13 +121,13 @@ public class TransportShardBulkAction extends TransportReplicationAction<BulkSha
|
|||
location = handleItem(metaData, request, indexShard, preVersions, preVersionTypes, location, requestIndex, item);
|
||||
}
|
||||
|
||||
processAfterWrite(request.refresh(), indexShard, location);
|
||||
BulkItemResponse[] responses = new BulkItemResponse[request.items().length];
|
||||
BulkItemRequest[] items = request.items();
|
||||
for (int i = 0; i < items.length; i++) {
|
||||
responses[i] = items[i].getPrimaryResponse();
|
||||
}
|
||||
return new Tuple<>(new BulkShardResponse(request.shardId(), responses), request);
|
||||
BulkShardResponse response = new BulkShardResponse(request.shardId(), responses);
|
||||
return new WriteResult<>(response, location);
|
||||
}
|
||||
|
||||
private Translog.Location handleItem(IndexMetaData metaData, BulkShardRequest request, IndexShard indexShard, long[] preVersions, VersionType[] preVersionTypes, Translog.Location location, int requestIndex, BulkItemRequest item) {
|
||||
|
@ -154,9 +154,9 @@ public class TransportShardBulkAction extends TransportReplicationAction<BulkSha
|
|||
preVersionTypes[requestIndex] = indexRequest.versionType();
|
||||
try {
|
||||
WriteResult<IndexResponse> result = shardIndexOperation(request, indexRequest, metaData, indexShard, true);
|
||||
location = locationToSync(location, result.location);
|
||||
location = locationToSync(location, result.getLocation());
|
||||
// add the response
|
||||
IndexResponse indexResponse = result.response();
|
||||
IndexResponse indexResponse = result.getResponse();
|
||||
setResponse(item, new BulkItemResponse(item.id(), indexRequest.opType().lowercase(), indexResponse));
|
||||
} catch (Throwable e) {
|
||||
// rethrow the failure if we are going to retry on primary and let parent failure to handle it
|
||||
|
@ -197,8 +197,8 @@ public class TransportShardBulkAction extends TransportReplicationAction<BulkSha
|
|||
try {
|
||||
// add the response
|
||||
final WriteResult<DeleteResponse> writeResult = TransportDeleteAction.executeDeleteRequestOnPrimary(deleteRequest, indexShard);
|
||||
DeleteResponse deleteResponse = writeResult.response();
|
||||
location = locationToSync(location, writeResult.location);
|
||||
DeleteResponse deleteResponse = writeResult.getResponse();
|
||||
location = locationToSync(location, writeResult.getLocation());
|
||||
setResponse(item, new BulkItemResponse(item.id(), OP_TYPE_DELETE, deleteResponse));
|
||||
} catch (Throwable e) {
|
||||
// rethrow the failure if we are going to retry on primary and let parent failure to handle it
|
||||
|
@ -237,16 +237,17 @@ public class TransportShardBulkAction extends TransportReplicationAction<BulkSha
|
|||
}
|
||||
if (updateResult.success()) {
|
||||
if (updateResult.writeResult != null) {
|
||||
location = locationToSync(location, updateResult.writeResult.location);
|
||||
location = locationToSync(location, updateResult.writeResult.getLocation());
|
||||
}
|
||||
switch (updateResult.result.operation()) {
|
||||
case UPSERT:
|
||||
case INDEX:
|
||||
@SuppressWarnings("unchecked")
|
||||
WriteResult<IndexResponse> result = updateResult.writeResult;
|
||||
IndexRequest indexRequest = updateResult.request();
|
||||
BytesReference indexSourceAsBytes = indexRequest.source();
|
||||
// add the response
|
||||
IndexResponse indexResponse = result.response();
|
||||
IndexResponse indexResponse = result.getResponse();
|
||||
UpdateResponse updateResponse = new UpdateResponse(indexResponse.getShardInfo(), indexResponse.getShardId(), indexResponse.getType(), indexResponse.getId(), indexResponse.getVersion(), indexResponse.isCreated());
|
||||
if (updateRequest.fields() != null && updateRequest.fields().length > 0) {
|
||||
Tuple<XContentType, Map<String, Object>> sourceAndContent = XContentHelper.convertToMap(indexSourceAsBytes, true);
|
||||
|
@ -256,8 +257,9 @@ public class TransportShardBulkAction extends TransportReplicationAction<BulkSha
|
|||
setResponse(item, new BulkItemResponse(item.id(), OP_TYPE_UPDATE, updateResponse));
|
||||
break;
|
||||
case DELETE:
|
||||
@SuppressWarnings("unchecked")
|
||||
WriteResult<DeleteResponse> writeResult = updateResult.writeResult;
|
||||
DeleteResponse response = writeResult.response();
|
||||
DeleteResponse response = writeResult.getResponse();
|
||||
DeleteRequest deleteRequest = updateResult.request();
|
||||
updateResponse = new UpdateResponse(response.getShardInfo(), response.getShardId(), response.getType(), response.getId(), response.getVersion(), false);
|
||||
updateResponse.setGetResult(updateHelper.extractGetResult(updateRequest, request.index(), response.getVersion(), updateResult.result.updatedSourceAsMap(), updateResult.result.updateSourceContentType(), null));
|
||||
|
@ -326,11 +328,14 @@ public class TransportShardBulkAction extends TransportReplicationAction<BulkSha
|
|||
request.setPrimaryResponse(response);
|
||||
if (response.isFailed()) {
|
||||
request.setIgnoreOnReplica();
|
||||
} else {
|
||||
// Set the ShardInfo to 0 so we can safely send it to the replicas. We won't use it in the real response though.
|
||||
response.getResponse().setShardInfo(new ShardInfo());
|
||||
}
|
||||
}
|
||||
|
||||
private WriteResult shardIndexOperation(BulkShardRequest request, IndexRequest indexRequest, IndexMetaData metaData,
|
||||
IndexShard indexShard, boolean processed) throws Throwable {
|
||||
private WriteResult<IndexResponse> shardIndexOperation(BulkShardRequest request, IndexRequest indexRequest, IndexMetaData metaData,
|
||||
IndexShard indexShard, boolean processed) throws Throwable {
|
||||
|
||||
MappingMetaData mappingMd = metaData.mappingOrDefault(indexRequest.type());
|
||||
if (!processed) {
|
||||
|
@ -431,12 +436,8 @@ public class TransportShardBulkAction extends TransportReplicationAction<BulkSha
|
|||
}
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
protected void shardOperationOnReplica(BulkShardRequest request) {
|
||||
final ShardId shardId = request.shardId();
|
||||
IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex());
|
||||
IndexShard indexShard = indexService.getShard(shardId.id());
|
||||
protected Location onReplicaShard(BulkShardRequest request, IndexShard indexShard) {
|
||||
Translog.Location location = null;
|
||||
for (int i = 0; i < request.items().length; i++) {
|
||||
BulkItemRequest item = request.items()[i];
|
||||
|
@ -472,8 +473,7 @@ public class TransportShardBulkAction extends TransportReplicationAction<BulkSha
|
|||
throw new IllegalStateException("Unexpected index operation: " + item.request());
|
||||
}
|
||||
}
|
||||
|
||||
processAfterWrite(request.refresh(), indexShard, location);
|
||||
return location;
|
||||
}
|
||||
|
||||
private void applyVersion(BulkItemRequest item, long version, VersionType versionType) {
|
||||
|
|
|
@ -21,7 +21,7 @@ package org.elasticsearch.action.delete;
|
|||
|
||||
import org.elasticsearch.action.ActionRequestValidationException;
|
||||
import org.elasticsearch.action.DocumentRequest;
|
||||
import org.elasticsearch.action.support.replication.ReplicationRequest;
|
||||
import org.elasticsearch.action.support.replication.ReplicatedWriteRequest;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
|
@ -43,7 +43,7 @@ import static org.elasticsearch.action.ValidateActions.addValidationError;
|
|||
* @see org.elasticsearch.client.Client#delete(DeleteRequest)
|
||||
* @see org.elasticsearch.client.Requests#deleteRequest(String)
|
||||
*/
|
||||
public class DeleteRequest extends ReplicationRequest<DeleteRequest> implements DocumentRequest<DeleteRequest> {
|
||||
public class DeleteRequest extends ReplicatedWriteRequest<DeleteRequest> implements DocumentRequest<DeleteRequest> {
|
||||
|
||||
private String type;
|
||||
private String id;
|
||||
|
@ -51,7 +51,6 @@ public class DeleteRequest extends ReplicationRequest<DeleteRequest> implements
|
|||
private String routing;
|
||||
@Nullable
|
||||
private String parent;
|
||||
private boolean refresh;
|
||||
private long version = Versions.MATCH_ANY;
|
||||
private VersionType versionType = VersionType.INTERNAL;
|
||||
|
||||
|
@ -165,20 +164,6 @@ public class DeleteRequest extends ReplicationRequest<DeleteRequest> implements
|
|||
return this.routing;
|
||||
}
|
||||
|
||||
/**
|
||||
* Should a refresh be executed post this index operation causing the operation to
|
||||
* be searchable. Note, heavy indexing should not set this to <tt>true</tt>. Defaults
|
||||
* to <tt>false</tt>.
|
||||
*/
|
||||
public DeleteRequest refresh(boolean refresh) {
|
||||
this.refresh = refresh;
|
||||
return this;
|
||||
}
|
||||
|
||||
public boolean refresh() {
|
||||
return this.refresh;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the version, which will cause the delete operation to only be performed if a matching
|
||||
* version exists and no changes happened on the doc since then.
|
||||
|
@ -208,7 +193,6 @@ public class DeleteRequest extends ReplicationRequest<DeleteRequest> implements
|
|||
id = in.readString();
|
||||
routing = in.readOptionalString();
|
||||
parent = in.readOptionalString();
|
||||
refresh = in.readBoolean();
|
||||
version = in.readLong();
|
||||
versionType = VersionType.fromValue(in.readByte());
|
||||
}
|
||||
|
@ -220,7 +204,6 @@ public class DeleteRequest extends ReplicationRequest<DeleteRequest> implements
|
|||
out.writeString(id);
|
||||
out.writeOptionalString(routing());
|
||||
out.writeOptionalString(parent());
|
||||
out.writeBoolean(refresh);
|
||||
out.writeLong(version);
|
||||
out.writeByte(versionType.getValue());
|
||||
}
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
|
||||
package org.elasticsearch.action.delete;
|
||||
|
||||
import org.elasticsearch.action.support.WriteRequestBuilder;
|
||||
import org.elasticsearch.action.support.replication.ReplicationRequestBuilder;
|
||||
import org.elasticsearch.client.ElasticsearchClient;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
|
@ -27,7 +28,8 @@ import org.elasticsearch.index.VersionType;
|
|||
/**
|
||||
* A delete document action request builder.
|
||||
*/
|
||||
public class DeleteRequestBuilder extends ReplicationRequestBuilder<DeleteRequest, DeleteResponse, DeleteRequestBuilder> {
|
||||
public class DeleteRequestBuilder extends ReplicationRequestBuilder<DeleteRequest, DeleteResponse, DeleteRequestBuilder>
|
||||
implements WriteRequestBuilder<DeleteRequestBuilder> {
|
||||
|
||||
public DeleteRequestBuilder(ElasticsearchClient client, DeleteAction action) {
|
||||
super(client, action, new DeleteRequest());
|
||||
|
@ -71,16 +73,6 @@ public class DeleteRequestBuilder extends ReplicationRequestBuilder<DeleteReques
|
|||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Should a refresh be executed post this index operation causing the operation to
|
||||
* be searchable. Note, heavy indexing should not set this to <tt>true</tt>. Defaults
|
||||
* to <tt>false</tt>.
|
||||
*/
|
||||
public DeleteRequestBuilder setRefresh(boolean refresh) {
|
||||
request.refresh(refresh);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the version, which will cause the delete operation to only be performed if a matching
|
||||
* version exists and no changes happened on the doc since then.
|
||||
|
|
|
@ -27,19 +27,19 @@ import org.elasticsearch.action.admin.indices.create.CreateIndexResponse;
|
|||
import org.elasticsearch.action.admin.indices.create.TransportCreateIndexAction;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.AutoCreateIndex;
|
||||
import org.elasticsearch.action.support.replication.TransportReplicationAction;
|
||||
import org.elasticsearch.action.support.replication.TransportWriteAction;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.action.shard.ShardStateAction;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.cluster.metadata.MetaData;
|
||||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.common.collect.Tuple;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.index.engine.Engine;
|
||||
import org.elasticsearch.index.shard.IndexShard;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
import org.elasticsearch.index.translog.Translog.Location;
|
||||
import org.elasticsearch.indices.IndexAlreadyExistsException;
|
||||
import org.elasticsearch.indices.IndicesService;
|
||||
import org.elasticsearch.tasks.Task;
|
||||
|
@ -49,7 +49,7 @@ import org.elasticsearch.transport.TransportService;
|
|||
/**
|
||||
* Performs the delete operation.
|
||||
*/
|
||||
public class TransportDeleteAction extends TransportReplicationAction<DeleteRequest, DeleteRequest, DeleteResponse> {
|
||||
public class TransportDeleteAction extends TransportWriteAction<DeleteRequest, DeleteResponse> {
|
||||
|
||||
private final AutoCreateIndex autoCreateIndex;
|
||||
private final TransportCreateIndexAction createIndexAction;
|
||||
|
@ -60,9 +60,8 @@ public class TransportDeleteAction extends TransportReplicationAction<DeleteRequ
|
|||
TransportCreateIndexAction createIndexAction, ActionFilters actionFilters,
|
||||
IndexNameExpressionResolver indexNameExpressionResolver,
|
||||
AutoCreateIndex autoCreateIndex) {
|
||||
super(settings, DeleteAction.NAME, transportService, clusterService, indicesService, threadPool, shardStateAction,
|
||||
actionFilters, indexNameExpressionResolver,
|
||||
DeleteRequest::new, DeleteRequest::new, ThreadPool.Names.INDEX);
|
||||
super(settings, DeleteAction.NAME, transportService, clusterService, indicesService, threadPool, shardStateAction, actionFilters,
|
||||
indexNameExpressionResolver, DeleteRequest::new, ThreadPool.Names.INDEX);
|
||||
this.createIndexAction = createIndexAction;
|
||||
this.autoCreateIndex = autoCreateIndex;
|
||||
}
|
||||
|
@ -119,11 +118,13 @@ public class TransportDeleteAction extends TransportReplicationAction<DeleteRequ
|
|||
}
|
||||
|
||||
@Override
|
||||
protected Tuple<DeleteResponse, DeleteRequest> shardOperationOnPrimary(DeleteRequest request) {
|
||||
IndexShard indexShard = indicesService.indexServiceSafe(request.shardId().getIndex()).getShard(request.shardId().id());
|
||||
final WriteResult<DeleteResponse> result = executeDeleteRequestOnPrimary(request, indexShard);
|
||||
processAfterWrite(request.refresh(), indexShard, result.location);
|
||||
return new Tuple<>(result.response, request);
|
||||
protected WriteResult<DeleteResponse> onPrimaryShard(DeleteRequest request, IndexShard indexShard) {
|
||||
return executeDeleteRequestOnPrimary(request, indexShard);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Location onReplicaShard(DeleteRequest request, IndexShard indexShard) {
|
||||
return executeDeleteRequestOnReplica(request, indexShard).getTranslogLocation();
|
||||
}
|
||||
|
||||
public static WriteResult<DeleteResponse> executeDeleteRequestOnPrimary(DeleteRequest request, IndexShard indexShard) {
|
||||
|
@ -134,9 +135,8 @@ public class TransportDeleteAction extends TransportReplicationAction<DeleteRequ
|
|||
request.version(delete.version());
|
||||
|
||||
assert request.versionType().validateVersionForWrites(request.version());
|
||||
return new WriteResult<>(
|
||||
new DeleteResponse(indexShard.shardId(), request.type(), request.id(), delete.version(), delete.found()),
|
||||
delete.getTranslogLocation());
|
||||
DeleteResponse response = new DeleteResponse(indexShard.shardId(), request.type(), request.id(), delete.version(), delete.found());
|
||||
return new WriteResult<>(response, delete.getTranslogLocation());
|
||||
}
|
||||
|
||||
public static Engine.Delete executeDeleteRequestOnReplica(DeleteRequest request, IndexShard indexShard) {
|
||||
|
@ -144,13 +144,4 @@ public class TransportDeleteAction extends TransportReplicationAction<DeleteRequ
|
|||
indexShard.delete(delete);
|
||||
return delete;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void shardOperationOnReplica(DeleteRequest request) {
|
||||
final ShardId shardId = request.shardId();
|
||||
IndexShard indexShard = indicesService.indexServiceSafe(shardId.getIndex()).getShard(shardId.id());
|
||||
Engine.Delete delete = executeDeleteRequestOnReplica(request, indexShard);
|
||||
processAfterWrite(request.refresh(), indexShard, delete.getTranslogLocation());
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -26,7 +26,6 @@ import org.elasticsearch.action.ActionListener;
|
|||
import org.elasticsearch.action.RoutingMissingException;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.single.shard.TransportSingleShardAction;
|
||||
import org.elasticsearch.cache.recycler.PageCacheRecycler;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.cluster.routing.ShardIterator;
|
||||
|
@ -65,7 +64,6 @@ public class TransportExplainAction extends TransportSingleShardAction<ExplainRe
|
|||
|
||||
private final ScriptService scriptService;
|
||||
|
||||
private final PageCacheRecycler pageCacheRecycler;
|
||||
|
||||
private final BigArrays bigArrays;
|
||||
|
||||
|
@ -74,13 +72,12 @@ public class TransportExplainAction extends TransportSingleShardAction<ExplainRe
|
|||
@Inject
|
||||
public TransportExplainAction(Settings settings, ThreadPool threadPool, ClusterService clusterService,
|
||||
TransportService transportService, IndicesService indicesService, ScriptService scriptService,
|
||||
PageCacheRecycler pageCacheRecycler, BigArrays bigArrays, ActionFilters actionFilters,
|
||||
IndexNameExpressionResolver indexNameExpressionResolver, FetchPhase fetchPhase) {
|
||||
BigArrays bigArrays, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver,
|
||||
FetchPhase fetchPhase) {
|
||||
super(settings, ExplainAction.NAME, threadPool, clusterService, transportService, actionFilters, indexNameExpressionResolver,
|
||||
ExplainRequest::new, ThreadPool.Names.GET);
|
||||
this.indicesService = indicesService;
|
||||
this.scriptService = scriptService;
|
||||
this.pageCacheRecycler = pageCacheRecycler;
|
||||
this.bigArrays = bigArrays;
|
||||
this.fetchPhase = fetchPhase;
|
||||
}
|
||||
|
@ -117,7 +114,7 @@ public class TransportExplainAction extends TransportSingleShardAction<ExplainRe
|
|||
|
||||
SearchContext context = new DefaultSearchContext(0,
|
||||
new ShardSearchLocalRequest(new String[] { request.type() }, request.nowInMillis, request.filteringAlias()), null,
|
||||
result.searcher(), indexService, indexShard, scriptService, pageCacheRecycler, bigArrays,
|
||||
result.searcher(), indexService, indexShard, scriptService, bigArrays,
|
||||
threadPool.estimatedTimeInMillisCounter(), parseFieldMatcher, SearchService.NO_TIMEOUT, fetchPhase);
|
||||
SearchContext.setCurrent(context);
|
||||
|
||||
|
@ -155,7 +152,7 @@ public class TransportExplainAction extends TransportSingleShardAction<ExplainRe
|
|||
@Override
|
||||
protected ShardIterator shards(ClusterState state, InternalRequest request) {
|
||||
return clusterService.operationRouting().getShards(
|
||||
clusterService.state(), request.concreteIndex(), request.request().type(), request.request().id(), request.request().routing(), request.request().preference()
|
||||
clusterService.state(), request.concreteIndex(), request.request().id(), request.request().routing(), request.request().preference()
|
||||
);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -33,7 +33,6 @@ import org.elasticsearch.cluster.routing.GroupShardsIterator;
|
|||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.regex.Regex;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.index.IndexService;
|
||||
import org.elasticsearch.index.engine.Engine;
|
||||
|
@ -45,27 +44,23 @@ import org.elasticsearch.indices.IndicesService;
|
|||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import java.util.Map;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Iterator;
|
||||
import java.util.Set;
|
||||
import java.util.HashSet;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.atomic.AtomicReferenceArray;
|
||||
|
||||
public class TransportFieldStatsTransportAction extends
|
||||
public class TransportFieldStatsAction extends
|
||||
TransportBroadcastAction<FieldStatsRequest, FieldStatsResponse, FieldStatsShardRequest, FieldStatsShardResponse> {
|
||||
|
||||
private final IndicesService indicesService;
|
||||
|
||||
@Inject
|
||||
public TransportFieldStatsTransportAction(Settings settings, ThreadPool threadPool, ClusterService clusterService,
|
||||
public TransportFieldStatsAction(Settings settings, ThreadPool threadPool, ClusterService clusterService,
|
||||
TransportService transportService, ActionFilters actionFilters,
|
||||
IndexNameExpressionResolver indexNameExpressionResolver,
|
||||
IndicesService indicesService) {
|
||||
|
@ -195,26 +190,18 @@ public class TransportFieldStatsTransportAction extends
|
|||
MapperService mapperService = indexServices.mapperService();
|
||||
IndexShard shard = indexServices.getShard(shardId.id());
|
||||
try (Engine.Searcher searcher = shard.acquireSearcher("fieldstats")) {
|
||||
// Resolve patterns and deduplicate
|
||||
Set<String> fieldNames = new HashSet<>();
|
||||
for (String field : request.getFields()) {
|
||||
Collection<String> matchFields;
|
||||
if (Regex.isSimpleMatchPattern(field)) {
|
||||
matchFields = mapperService.simpleMatchToIndexNames(field);
|
||||
} else {
|
||||
matchFields = Collections.singleton(field);
|
||||
}
|
||||
for (String matchField : matchFields) {
|
||||
MappedFieldType fieldType = mapperService.fullName(matchField);
|
||||
if (fieldType == null) {
|
||||
// ignore.
|
||||
continue;
|
||||
}
|
||||
FieldStats<?> stats = fieldType.stats(searcher.reader());
|
||||
if (stats != null) {
|
||||
fieldStats.put(matchField, stats);
|
||||
}
|
||||
fieldNames.addAll(mapperService.simpleMatchToIndexNames(field));
|
||||
}
|
||||
for (String field : fieldNames) {
|
||||
FieldStats<?> stats = indicesService.getFieldStats(shard, searcher, field);
|
||||
if (stats != null) {
|
||||
fieldStats.put(field, stats);
|
||||
}
|
||||
}
|
||||
} catch (IOException e) {
|
||||
} catch (Exception e) {
|
||||
throw ExceptionsHelper.convertToElastic(e);
|
||||
}
|
||||
return new FieldStatsShardResponse(shardId, fieldStats);
|
|
@ -62,7 +62,7 @@ public class TransportGetAction extends TransportSingleShardAction<GetRequest, G
|
|||
@Override
|
||||
protected ShardIterator shards(ClusterState state, InternalRequest request) {
|
||||
return clusterService.operationRouting()
|
||||
.getShards(clusterService.state(), request.concreteIndex(), request.request().type(), request.request().id(), request.request().routing(), request.request().preference());
|
||||
.getShards(clusterService.state(), request.concreteIndex(), request.request().id(), request.request().routing(), request.request().preference());
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -76,7 +76,7 @@ public class TransportMultiGetAction extends HandledTransportAction<MultiGetRequ
|
|||
continue;
|
||||
}
|
||||
ShardId shardId = clusterService.operationRouting()
|
||||
.getShards(clusterState, concreteSingleIndex, item.type(), item.id(), item.routing(), null).shardId();
|
||||
.getShards(clusterState, concreteSingleIndex, item.id(), item.routing(), null).shardId();
|
||||
MultiGetShardRequest shardRequest = shardRequests.get(shardId);
|
||||
if (shardRequest == null) {
|
||||
shardRequest = new MultiGetShardRequest(request, shardId.getIndexName(), shardId.id());
|
||||
|
|
|
@ -24,7 +24,7 @@ import org.elasticsearch.action.ActionRequestValidationException;
|
|||
import org.elasticsearch.action.DocumentRequest;
|
||||
import org.elasticsearch.action.RoutingMissingException;
|
||||
import org.elasticsearch.action.TimestampParsingException;
|
||||
import org.elasticsearch.action.support.replication.ReplicationRequest;
|
||||
import org.elasticsearch.action.support.replication.ReplicatedWriteRequest;
|
||||
import org.elasticsearch.client.Requests;
|
||||
import org.elasticsearch.cluster.metadata.MappingMetaData;
|
||||
import org.elasticsearch.cluster.metadata.MetaData;
|
||||
|
@ -67,7 +67,7 @@ import static org.elasticsearch.action.ValidateActions.addValidationError;
|
|||
* @see org.elasticsearch.client.Requests#indexRequest(String)
|
||||
* @see org.elasticsearch.client.Client#index(IndexRequest)
|
||||
*/
|
||||
public class IndexRequest extends ReplicationRequest<IndexRequest> implements DocumentRequest<IndexRequest> {
|
||||
public class IndexRequest extends ReplicatedWriteRequest<IndexRequest> implements DocumentRequest<IndexRequest> {
|
||||
|
||||
/**
|
||||
* Operation type controls if the type of the index operation.
|
||||
|
@ -145,7 +145,6 @@ public class IndexRequest extends ReplicationRequest<IndexRequest> implements Do
|
|||
|
||||
private OpType opType = OpType.INDEX;
|
||||
|
||||
private boolean refresh = false;
|
||||
private long version = Versions.MATCH_ANY;
|
||||
private VersionType versionType = VersionType.INTERNAL;
|
||||
|
||||
|
@ -542,20 +541,6 @@ public class IndexRequest extends ReplicationRequest<IndexRequest> implements Do
|
|||
return this.opType;
|
||||
}
|
||||
|
||||
/**
|
||||
* Should a refresh be executed post this index operation causing the operation to
|
||||
* be searchable. Note, heavy indexing should not set this to <tt>true</tt>. Defaults
|
||||
* to <tt>false</tt>.
|
||||
*/
|
||||
public IndexRequest refresh(boolean refresh) {
|
||||
this.refresh = refresh;
|
||||
return this;
|
||||
}
|
||||
|
||||
public boolean refresh() {
|
||||
return this.refresh;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the version, which will cause the index operation to only be performed if a matching
|
||||
* version exists and no changes happened on the doc since then.
|
||||
|
@ -648,11 +633,9 @@ public class IndexRequest extends ReplicationRequest<IndexRequest> implements Do
|
|||
routing = in.readOptionalString();
|
||||
parent = in.readOptionalString();
|
||||
timestamp = in.readOptionalString();
|
||||
ttl = in.readBoolean() ? TimeValue.readTimeValue(in) : null;
|
||||
ttl = in.readOptionalWriteable(TimeValue::new);
|
||||
source = in.readBytesReference();
|
||||
|
||||
opType = OpType.fromId(in.readByte());
|
||||
refresh = in.readBoolean();
|
||||
version = in.readLong();
|
||||
versionType = VersionType.fromValue(in.readByte());
|
||||
pipeline = in.readOptionalString();
|
||||
|
@ -666,15 +649,9 @@ public class IndexRequest extends ReplicationRequest<IndexRequest> implements Do
|
|||
out.writeOptionalString(routing);
|
||||
out.writeOptionalString(parent);
|
||||
out.writeOptionalString(timestamp);
|
||||
if (ttl == null) {
|
||||
out.writeBoolean(false);
|
||||
} else {
|
||||
out.writeBoolean(true);
|
||||
ttl.writeTo(out);
|
||||
}
|
||||
out.writeOptionalWriteable(ttl);
|
||||
out.writeBytesReference(source);
|
||||
out.writeByte(opType.id());
|
||||
out.writeBoolean(refresh);
|
||||
out.writeLong(version);
|
||||
out.writeByte(versionType.getValue());
|
||||
out.writeOptionalString(pipeline);
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
|
||||
package org.elasticsearch.action.index;
|
||||
|
||||
import org.elasticsearch.action.support.WriteRequestBuilder;
|
||||
import org.elasticsearch.action.support.replication.ReplicationRequestBuilder;
|
||||
import org.elasticsearch.client.ElasticsearchClient;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
|
@ -33,7 +34,8 @@ import java.util.Map;
|
|||
/**
|
||||
* An index document action request builder.
|
||||
*/
|
||||
public class IndexRequestBuilder extends ReplicationRequestBuilder<IndexRequest, IndexResponse, IndexRequestBuilder> {
|
||||
public class IndexRequestBuilder extends ReplicationRequestBuilder<IndexRequest, IndexResponse, IndexRequestBuilder>
|
||||
implements WriteRequestBuilder<IndexRequestBuilder> {
|
||||
|
||||
public IndexRequestBuilder(ElasticsearchClient client, IndexAction action) {
|
||||
super(client, action, new IndexRequest());
|
||||
|
@ -220,16 +222,6 @@ public class IndexRequestBuilder extends ReplicationRequestBuilder<IndexRequest,
|
|||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Should a refresh be executed post this index operation causing the operation to
|
||||
* be searchable. Note, heavy indexing should not set this to <tt>true</tt>. Defaults
|
||||
* to <tt>false</tt>.
|
||||
*/
|
||||
public IndexRequestBuilder setRefresh(boolean refresh) {
|
||||
request.refresh(refresh);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the version, which will cause the index operation to only be performed if a matching
|
||||
* version exists and no changes happened on the doc since then.
|
||||
|
|
|
@ -27,7 +27,7 @@ import org.elasticsearch.action.admin.indices.create.TransportCreateIndexAction;
|
|||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.AutoCreateIndex;
|
||||
import org.elasticsearch.action.support.replication.ReplicationOperation;
|
||||
import org.elasticsearch.action.support.replication.TransportReplicationAction;
|
||||
import org.elasticsearch.action.support.replication.TransportWriteAction;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.action.index.MappingUpdatedAction;
|
||||
import org.elasticsearch.cluster.action.shard.ShardStateAction;
|
||||
|
@ -36,16 +36,14 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
|||
import org.elasticsearch.cluster.metadata.MappingMetaData;
|
||||
import org.elasticsearch.cluster.metadata.MetaData;
|
||||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.common.collect.Tuple;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.index.IndexService;
|
||||
import org.elasticsearch.index.engine.Engine;
|
||||
import org.elasticsearch.index.mapper.Mapping;
|
||||
import org.elasticsearch.index.mapper.SourceToParse;
|
||||
import org.elasticsearch.index.shard.IndexShard;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
import org.elasticsearch.index.translog.Translog;
|
||||
import org.elasticsearch.index.translog.Translog.Location;
|
||||
import org.elasticsearch.indices.IndexAlreadyExistsException;
|
||||
import org.elasticsearch.indices.IndicesService;
|
||||
import org.elasticsearch.tasks.Task;
|
||||
|
@ -62,7 +60,7 @@ import org.elasticsearch.transport.TransportService;
|
|||
* <li><b>allowIdGeneration</b>: If the id is set not, should it be generated. Defaults to <tt>true</tt>.
|
||||
* </ul>
|
||||
*/
|
||||
public class TransportIndexAction extends TransportReplicationAction<IndexRequest, IndexRequest, IndexResponse> {
|
||||
public class TransportIndexAction extends TransportWriteAction<IndexRequest, IndexResponse> {
|
||||
|
||||
private final AutoCreateIndex autoCreateIndex;
|
||||
private final boolean allowIdGeneration;
|
||||
|
@ -78,7 +76,7 @@ public class TransportIndexAction extends TransportReplicationAction<IndexReques
|
|||
ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver,
|
||||
AutoCreateIndex autoCreateIndex) {
|
||||
super(settings, IndexAction.NAME, transportService, clusterService, indicesService, threadPool, shardStateAction,
|
||||
actionFilters, indexNameExpressionResolver, IndexRequest::new, IndexRequest::new, ThreadPool.Names.INDEX);
|
||||
actionFilters, indexNameExpressionResolver, IndexRequest::new, ThreadPool.Names.INDEX);
|
||||
this.mappingUpdatedAction = mappingUpdatedAction;
|
||||
this.createIndexAction = createIndexAction;
|
||||
this.autoCreateIndex = autoCreateIndex;
|
||||
|
@ -141,26 +139,13 @@ public class TransportIndexAction extends TransportReplicationAction<IndexReques
|
|||
}
|
||||
|
||||
@Override
|
||||
protected Tuple<IndexResponse, IndexRequest> shardOperationOnPrimary(IndexRequest request) throws Exception {
|
||||
|
||||
IndexService indexService = indicesService.indexServiceSafe(request.shardId().getIndex());
|
||||
IndexShard indexShard = indexService.getShard(request.shardId().id());
|
||||
|
||||
final WriteResult<IndexResponse> result = executeIndexRequestOnPrimary(request, indexShard, mappingUpdatedAction);
|
||||
|
||||
final IndexResponse response = result.response;
|
||||
final Translog.Location location = result.location;
|
||||
processAfterWrite(request.refresh(), indexShard, location);
|
||||
return new Tuple<>(response, request);
|
||||
protected WriteResult<IndexResponse> onPrimaryShard(IndexRequest request, IndexShard indexShard) throws Exception {
|
||||
return executeIndexRequestOnPrimary(request, indexShard, mappingUpdatedAction);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void shardOperationOnReplica(IndexRequest request) {
|
||||
final ShardId shardId = request.shardId();
|
||||
IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex());
|
||||
IndexShard indexShard = indexService.getShard(shardId.id());
|
||||
final Engine.Index operation = executeIndexRequestOnReplica(request, indexShard);
|
||||
processAfterWrite(request.refresh(), indexShard, operation.getTranslogLocation());
|
||||
protected Location onReplicaShard(IndexRequest request, IndexShard indexShard) {
|
||||
return executeIndexRequestOnReplica(request, indexShard).getTranslogLocation();
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -188,11 +173,8 @@ public class TransportIndexAction extends TransportReplicationAction<IndexReques
|
|||
return indexShard.prepareIndexOnPrimary(sourceToParse, request.version(), request.versionType());
|
||||
}
|
||||
|
||||
/**
|
||||
* Execute the given {@link IndexRequest} on a primary shard, throwing a
|
||||
* {@link ReplicationOperation.RetryOnPrimaryException} if the operation needs to be re-tried.
|
||||
*/
|
||||
public static WriteResult<IndexResponse> executeIndexRequestOnPrimary(IndexRequest request, IndexShard indexShard, MappingUpdatedAction mappingUpdatedAction) throws Exception {
|
||||
public static WriteResult<IndexResponse> executeIndexRequestOnPrimary(IndexRequest request, IndexShard indexShard,
|
||||
MappingUpdatedAction mappingUpdatedAction) throws Exception {
|
||||
Engine.Index operation = prepareIndexOperationOnPrimary(request, indexShard);
|
||||
Mapping update = operation.parsedDoc().dynamicMappingsUpdate();
|
||||
final ShardId shardId = indexShard.shardId();
|
||||
|
@ -214,8 +196,8 @@ public class TransportIndexAction extends TransportReplicationAction<IndexReques
|
|||
|
||||
assert request.versionType().validateVersionForWrites(request.version());
|
||||
|
||||
return new WriteResult<>(new IndexResponse(shardId, request.type(), request.id(), request.version(), created), operation.getTranslogLocation());
|
||||
IndexResponse response = new IndexResponse(shardId, request.type(), request.id(), request.version(), created);
|
||||
return new WriteResult<>(response, operation.getTranslogLocation());
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
|
|
@ -162,7 +162,7 @@ public final class IngestActionFilter extends AbstractComponent implements Actio
|
|||
return bulkRequest;
|
||||
} else {
|
||||
BulkRequest modifiedBulkRequest = new BulkRequest();
|
||||
modifiedBulkRequest.refresh(bulkRequest.refresh());
|
||||
modifiedBulkRequest.setRefreshPolicy(bulkRequest.getRefreshPolicy());
|
||||
modifiedBulkRequest.consistencyLevel(bulkRequest.consistencyLevel());
|
||||
modifiedBulkRequest.timeout(bulkRequest.timeout());
|
||||
|
||||
|
|
|
@ -29,7 +29,7 @@ import org.elasticsearch.threadpool.ThreadPool;
|
|||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
|
||||
import static org.elasticsearch.ingest.processor.TrackingResultProcessor.decorate;
|
||||
import static org.elasticsearch.action.ingest.TrackingResultProcessor.decorate;
|
||||
|
||||
class SimulateExecutionService {
|
||||
|
||||
|
|
|
@ -17,9 +17,8 @@
|
|||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.ingest.processor;
|
||||
package org.elasticsearch.action.ingest;
|
||||
|
||||
import org.elasticsearch.action.ingest.SimulateProcessorResult;
|
||||
import org.elasticsearch.ingest.core.CompoundProcessor;
|
||||
import org.elasticsearch.ingest.core.IngestDocument;
|
||||
import org.elasticsearch.ingest.core.Processor;
|
||||
|
@ -83,7 +82,7 @@ public final class TrackingResultProcessor implements Processor {
|
|||
onFailureProcessors.add(new TrackingResultProcessor(processor, processorResultList));
|
||||
}
|
||||
}
|
||||
return new CompoundProcessor(processors, onFailureProcessors);
|
||||
return new CompoundProcessor(false, processors, onFailureProcessors);
|
||||
}
|
||||
}
|
||||
|
|
@ -38,6 +38,7 @@ import static org.elasticsearch.action.ValidateActions.addValidationError;
|
|||
*/
|
||||
public class MultiSearchRequest extends ActionRequest<MultiSearchRequest> implements CompositeIndicesRequest {
|
||||
|
||||
private int maxConcurrentSearchRequests = 0;
|
||||
private List<SearchRequest> requests = new ArrayList<>();
|
||||
|
||||
private IndicesOptions indicesOptions = IndicesOptions.strictExpandOpenAndForbidClosed();
|
||||
|
@ -60,6 +61,25 @@ public class MultiSearchRequest extends ActionRequest<MultiSearchRequest> implem
|
|||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the amount of search requests specified in this multi search requests are allowed to be ran concurrently.
|
||||
*/
|
||||
public int maxConcurrentSearchRequests() {
|
||||
return maxConcurrentSearchRequests;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets how many search requests specified in this multi search requests are allowed to be ran concurrently.
|
||||
*/
|
||||
public MultiSearchRequest maxConcurrentSearchRequests(int maxConcurrentSearchRequests) {
|
||||
if (maxConcurrentSearchRequests < 1) {
|
||||
throw new IllegalArgumentException("maxConcurrentSearchRequests must be positive");
|
||||
}
|
||||
|
||||
this.maxConcurrentSearchRequests = maxConcurrentSearchRequests;
|
||||
return this;
|
||||
}
|
||||
|
||||
public List<SearchRequest> requests() {
|
||||
return this.requests;
|
||||
}
|
||||
|
@ -100,6 +120,7 @@ public class MultiSearchRequest extends ActionRequest<MultiSearchRequest> implem
|
|||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
maxConcurrentSearchRequests = in.readVInt();
|
||||
int size = in.readVInt();
|
||||
for (int i = 0; i < size; i++) {
|
||||
SearchRequest request = new SearchRequest();
|
||||
|
@ -111,6 +132,7 @@ public class MultiSearchRequest extends ActionRequest<MultiSearchRequest> implem
|
|||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
out.writeVInt(maxConcurrentSearchRequests);
|
||||
out.writeVInt(requests.size());
|
||||
for (SearchRequest request : requests) {
|
||||
request.writeTo(out);
|
||||
|
|
|
@ -71,4 +71,12 @@ public class MultiSearchRequestBuilder extends ActionRequestBuilder<MultiSearchR
|
|||
request().indicesOptions(indicesOptions);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets how many search requests specified in this multi search requests are allowed to be ran concurrently.
|
||||
*/
|
||||
public MultiSearchRequestBuilder setMaxConcurrentSearchRequests(int maxConcurrentSearchRequests) {
|
||||
request().maxConcurrentSearchRequests(maxConcurrentSearchRequests);
|
||||
return this;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -29,7 +29,8 @@ import org.elasticsearch.script.Script;
|
|||
import org.elasticsearch.script.Template;
|
||||
import org.elasticsearch.search.Scroll;
|
||||
import org.elasticsearch.search.aggregations.AggregationBuilder;
|
||||
import org.elasticsearch.search.aggregations.pipeline.PipelineAggregatorBuilder;
|
||||
import org.elasticsearch.search.aggregations.PipelineAggregatorBuilder;
|
||||
import org.elasticsearch.search.slice.SliceBuilder;
|
||||
import org.elasticsearch.search.builder.SearchSourceBuilder;
|
||||
import org.elasticsearch.search.highlight.HighlightBuilder;
|
||||
import org.elasticsearch.search.rescore.RescoreBuilder;
|
||||
|
@ -352,6 +353,11 @@ public class SearchRequestBuilder extends ActionRequestBuilder<SearchRequest, Se
|
|||
return this;
|
||||
}
|
||||
|
||||
public SearchRequestBuilder slice(SliceBuilder builder) {
|
||||
sourceBuilder().slice(builder);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Applies when sorting, and controls if scores will be tracked as well. Defaults to
|
||||
* <tt>false</tt>.
|
||||
|
@ -373,7 +379,7 @@ public class SearchRequestBuilder extends ActionRequestBuilder<SearchRequest, Se
|
|||
/**
|
||||
* Adds an aggregation to the search operation.
|
||||
*/
|
||||
public SearchRequestBuilder addAggregation(AggregationBuilder<?> aggregation) {
|
||||
public SearchRequestBuilder addAggregation(AggregationBuilder aggregation) {
|
||||
sourceBuilder().aggregation(aggregation);
|
||||
return this;
|
||||
}
|
||||
|
|
|
@ -36,7 +36,6 @@ import org.elasticsearch.search.profile.ProfileShardResult;
|
|||
import org.elasticsearch.search.suggest.Suggest;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import static org.elasticsearch.action.search.ShardSearchFailure.readShardSearchFailure;
|
||||
|
@ -167,9 +166,9 @@ public class SearchResponse extends ActionResponse implements StatusToXContent {
|
|||
* If profiling was enabled, this returns an object containing the profile results from
|
||||
* each shard. If profiling was not enabled, this will return null
|
||||
*
|
||||
* @return The profile results or null
|
||||
* @return The profile results or an empty map
|
||||
*/
|
||||
public @Nullable Map<String, List<ProfileShardResult>> getProfileResults() {
|
||||
public @Nullable Map<String, ProfileShardResult> getProfileResults() {
|
||||
return internalResponse.profile();
|
||||
}
|
||||
|
||||
|
|
|
@ -22,6 +22,7 @@ package org.elasticsearch.action.search;
|
|||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.HandledTransportAction;
|
||||
import org.elasticsearch.action.support.TransportAction;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.block.ClusterBlockLevel;
|
||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
|
@ -29,57 +30,118 @@ import org.elasticsearch.cluster.service.ClusterService;
|
|||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.util.concurrent.AtomicArray;
|
||||
import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
|
||||
import org.elasticsearch.common.util.concurrent.EsExecutors;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
import java.util.Queue;
|
||||
import java.util.concurrent.ConcurrentLinkedQueue;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
|
||||
/**
|
||||
*/
|
||||
public class TransportMultiSearchAction extends HandledTransportAction<MultiSearchRequest, MultiSearchResponse> {
|
||||
|
||||
private final int availableProcessors;
|
||||
private final ClusterService clusterService;
|
||||
private final TransportSearchAction searchAction;
|
||||
private final TransportAction<SearchRequest, SearchResponse> searchAction;
|
||||
|
||||
@Inject
|
||||
public TransportMultiSearchAction(Settings settings, ThreadPool threadPool, TransportService transportService,
|
||||
ClusterService clusterService, TransportSearchAction searchAction,
|
||||
ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) {
|
||||
ClusterService clusterService, TransportSearchAction searchAction,
|
||||
ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) {
|
||||
super(settings, MultiSearchAction.NAME, threadPool, transportService, actionFilters, indexNameExpressionResolver, MultiSearchRequest::new);
|
||||
this.clusterService = clusterService;
|
||||
this.searchAction = searchAction;
|
||||
this.availableProcessors = EsExecutors.boundedNumberOfProcessors(settings);
|
||||
}
|
||||
|
||||
// For testing only:
|
||||
TransportMultiSearchAction(ThreadPool threadPool, ActionFilters actionFilters, TransportService transportService,
|
||||
ClusterService clusterService, TransportAction<SearchRequest, SearchResponse> searchAction,
|
||||
IndexNameExpressionResolver indexNameExpressionResolver, int availableProcessors) {
|
||||
super(Settings.EMPTY, MultiSearchAction.NAME, threadPool, transportService, actionFilters, indexNameExpressionResolver, MultiSearchRequest::new);
|
||||
this.clusterService = clusterService;
|
||||
this.searchAction = searchAction;
|
||||
this.availableProcessors = availableProcessors;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doExecute(final MultiSearchRequest request, final ActionListener<MultiSearchResponse> listener) {
|
||||
protected void doExecute(MultiSearchRequest request, ActionListener<MultiSearchResponse> listener) {
|
||||
ClusterState clusterState = clusterService.state();
|
||||
clusterState.blocks().globalBlockedRaiseException(ClusterBlockLevel.READ);
|
||||
|
||||
final AtomicArray<MultiSearchResponse.Item> responses = new AtomicArray<>(request.requests().size());
|
||||
final AtomicInteger counter = new AtomicInteger(responses.length());
|
||||
for (int i = 0; i < responses.length(); i++) {
|
||||
final int index = i;
|
||||
searchAction.execute(request.requests().get(i), new ActionListener<SearchResponse>() {
|
||||
@Override
|
||||
public void onResponse(SearchResponse searchResponse) {
|
||||
responses.set(index, new MultiSearchResponse.Item(searchResponse, null));
|
||||
if (counter.decrementAndGet() == 0) {
|
||||
finishHim();
|
||||
}
|
||||
}
|
||||
int maxConcurrentSearches = request.maxConcurrentSearchRequests();
|
||||
if (maxConcurrentSearches == 0) {
|
||||
maxConcurrentSearches = defaultMaxConcurrentSearches(availableProcessors, clusterState);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Throwable e) {
|
||||
responses.set(index, new MultiSearchResponse.Item(null, e));
|
||||
if (counter.decrementAndGet() == 0) {
|
||||
finishHim();
|
||||
}
|
||||
}
|
||||
Queue<SearchRequestSlot> searchRequestSlots = new ConcurrentLinkedQueue<>();
|
||||
for (int i = 0; i < request.requests().size(); i++) {
|
||||
SearchRequest searchRequest = request.requests().get(i);
|
||||
searchRequestSlots.add(new SearchRequestSlot(searchRequest, i));
|
||||
}
|
||||
|
||||
private void finishHim() {
|
||||
int numRequests = request.requests().size();
|
||||
final AtomicArray<MultiSearchResponse.Item> responses = new AtomicArray<>(numRequests);
|
||||
final AtomicInteger responseCounter = new AtomicInteger(numRequests);
|
||||
int numConcurrentSearches = Math.min(numRequests, maxConcurrentSearches);
|
||||
for (int i = 0; i < numConcurrentSearches; i++) {
|
||||
executeSearch(searchRequestSlots, responses, responseCounter, listener);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* This is not perfect and makes a big assumption, that all nodes have the same thread pool size / have the number
|
||||
* of processors and that shard of the indices the search requests go to are more or less evenly distributed across
|
||||
* all nodes in the cluster. But I think it is a good enough default for most cases, if not then the default should be
|
||||
* overwritten in the request itself.
|
||||
*/
|
||||
static int defaultMaxConcurrentSearches(int availableProcessors, ClusterState state) {
|
||||
int numDateNodes = state.getNodes().getDataNodes().size();
|
||||
// availableProcessors will never be larger than 32, so max defaultMaxConcurrentSearches will never be larger than 49,
|
||||
// but we don't know about about other search requests that are being executed so lets cap at 10 per node
|
||||
int defaultSearchThreadPoolSize = Math.min(ThreadPool.searchThreadPoolSize(availableProcessors), 10);
|
||||
return Math.max(1, numDateNodes * defaultSearchThreadPoolSize);
|
||||
}
|
||||
|
||||
void executeSearch(Queue<SearchRequestSlot> requests, AtomicArray<MultiSearchResponse.Item> responses,
|
||||
AtomicInteger responseCounter, ActionListener<MultiSearchResponse> listener) {
|
||||
SearchRequestSlot request = requests.poll();
|
||||
if (request == null) {
|
||||
// Ok... so there're no more requests then this is ok, we're then waiting for running requests to complete
|
||||
return;
|
||||
}
|
||||
searchAction.execute(request.request, new ActionListener<SearchResponse>() {
|
||||
@Override
|
||||
public void onResponse(SearchResponse searchResponse) {
|
||||
responses.set(request.responseSlot, new MultiSearchResponse.Item(searchResponse, null));
|
||||
handleResponse();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Throwable e) {
|
||||
responses.set(request.responseSlot, new MultiSearchResponse.Item(null, e));
|
||||
handleResponse();
|
||||
}
|
||||
|
||||
private void handleResponse() {
|
||||
if (responseCounter.decrementAndGet() == 0) {
|
||||
listener.onResponse(new MultiSearchResponse(responses.toArray(new MultiSearchResponse.Item[responses.length()])));
|
||||
} else {
|
||||
executeSearch(requests, responses, responseCounter, listener);
|
||||
}
|
||||
});
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
final static class SearchRequestSlot {
|
||||
|
||||
final SearchRequest request;
|
||||
final int responseSlot;
|
||||
|
||||
SearchRequestSlot(SearchRequest request, int responseSlot) {
|
||||
this.request = request;
|
||||
this.responseSlot = responseSlot;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,109 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.support;
|
||||
|
||||
import org.elasticsearch.action.ActionRequestValidationException;
|
||||
import org.elasticsearch.action.bulk.BulkRequest;
|
||||
import org.elasticsearch.action.index.IndexRequest;
|
||||
import org.elasticsearch.action.support.replication.ReplicatedWriteRequest;
|
||||
import org.elasticsearch.action.update.UpdateRequest;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.io.stream.Streamable;
|
||||
import org.elasticsearch.common.io.stream.Writeable;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
/**
|
||||
* Interface implemented by requests that modify the documents in an index like {@link IndexRequest}, {@link UpdateRequest}, and
|
||||
* {@link BulkRequest}. Rather than implement this directly most implementers should extend {@link ReplicatedWriteRequest}.
|
||||
*/
|
||||
public interface WriteRequest<R extends WriteRequest<R>> extends Streamable {
|
||||
/**
|
||||
* Should this request trigger a refresh ({@linkplain RefreshPolicy#IMMEDIATE}), wait for a refresh (
|
||||
* {@linkplain RefreshPolicy#WAIT_UNTIL}), or proceed ignore refreshes entirely ({@linkplain RefreshPolicy#NONE}, the default).
|
||||
*/
|
||||
R setRefreshPolicy(RefreshPolicy refreshPolicy);
|
||||
|
||||
/**
|
||||
* Parse the refresh policy from a string, only modifying it if the string is non null. Convenient to use with request parsing.
|
||||
*/
|
||||
@SuppressWarnings("unchecked")
|
||||
default R setRefreshPolicy(String refreshPolicy) {
|
||||
if (refreshPolicy != null) {
|
||||
setRefreshPolicy(RefreshPolicy.parse(refreshPolicy));
|
||||
}
|
||||
return (R) this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Should this request trigger a refresh ({@linkplain RefreshPolicy#IMMEDIATE}), wait for a refresh (
|
||||
* {@linkplain RefreshPolicy#WAIT_UNTIL}), or proceed ignore refreshes entirely ({@linkplain RefreshPolicy#NONE}, the default).
|
||||
*/
|
||||
RefreshPolicy getRefreshPolicy();
|
||||
|
||||
ActionRequestValidationException validate();
|
||||
|
||||
enum RefreshPolicy implements Writeable {
|
||||
/**
|
||||
* Don't refresh after this request. The default.
|
||||
*/
|
||||
NONE,
|
||||
/**
|
||||
* Force a refresh as part of this request. This refresh policy does not scale for high indexing or search throughput but is useful
|
||||
* to present a consistent view to for indices with very low traffic. And it is wonderful for tests!
|
||||
*/
|
||||
IMMEDIATE,
|
||||
/**
|
||||
* Leave this request open until a refresh has made the contents of this request visible to search. This refresh policy is
|
||||
* compatible with high indexing and search throughput but it causes the request to wait to reply until a refresh occurs.
|
||||
*/
|
||||
WAIT_UNTIL;
|
||||
|
||||
/**
|
||||
* Parse the string representation of a refresh policy, usually from a request parameter.
|
||||
*/
|
||||
public static RefreshPolicy parse(String string) {
|
||||
switch (string) {
|
||||
case "false":
|
||||
return NONE;
|
||||
/*
|
||||
* Empty string is IMMEDIATE because that makes "POST /test/test/1?refresh" perform a refresh which reads well and is what folks
|
||||
* are used to.
|
||||
*/
|
||||
case "":
|
||||
case "true":
|
||||
return IMMEDIATE;
|
||||
case "wait_for":
|
||||
return WAIT_UNTIL;
|
||||
}
|
||||
throw new IllegalArgumentException("Unknown value for refresh: [" + string + "].");
|
||||
}
|
||||
|
||||
public static RefreshPolicy readFrom(StreamInput in) throws IOException {
|
||||
return RefreshPolicy.values()[in.readByte()];
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
out.writeByte((byte) ordinal());
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,45 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.support;
|
||||
|
||||
import org.elasticsearch.action.support.WriteRequest.RefreshPolicy;
|
||||
|
||||
public interface WriteRequestBuilder<B extends WriteRequestBuilder<B>> {
|
||||
WriteRequest<?> request();
|
||||
|
||||
/**
|
||||
* Should this request trigger a refresh ({@linkplain RefreshPolicy#IMMEDIATE}), wait for a refresh (
|
||||
* {@linkplain RefreshPolicy#WAIT_UNTIL}), or proceed ignore refreshes entirely ({@linkplain RefreshPolicy#NONE}, the default).
|
||||
*/
|
||||
@SuppressWarnings("unchecked")
|
||||
default B setRefreshPolicy(RefreshPolicy refreshPolicy) {
|
||||
request().setRefreshPolicy(refreshPolicy);
|
||||
return (B) this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Parse the refresh policy from a string, only modifying it if the string is non null. Convenient to use with request parsing.
|
||||
*/
|
||||
@SuppressWarnings("unchecked")
|
||||
default B setRefreshPolicy(String refreshPolicy) {
|
||||
request().setRefreshPolicy(refreshPolicy);
|
||||
return (B) this;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,40 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.support;
|
||||
|
||||
import org.elasticsearch.action.DocWriteResponse;
|
||||
import org.elasticsearch.action.bulk.BulkResponse;
|
||||
import org.elasticsearch.action.index.IndexResponse;
|
||||
import org.elasticsearch.action.support.WriteRequest.RefreshPolicy;
|
||||
import org.elasticsearch.action.update.UpdateResponse;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
|
||||
/**
|
||||
* Interface implemented by responses for actions that modify the documents in an index like {@link IndexResponse}, {@link UpdateResponse},
|
||||
* and {@link BulkResponse}. Rather than implement this directly most implementers should extend {@link DocWriteResponse}.
|
||||
*/
|
||||
public interface WriteResponse {
|
||||
/**
|
||||
* Mark the response as having forced a refresh? Requests that set {@link WriteRequest#setRefreshPolicy(RefreshPolicy)} to
|
||||
* {@link RefreshPolicy#IMMEDIATE} should always mark this as true. Requests that set it to {@link RefreshPolicy#WAIT_UNTIL} will only
|
||||
* set this to true if they run out of refresh listener slots (see {@link IndexSettings#MAX_REFRESH_LISTENERS_PER_SHARD}).
|
||||
*/
|
||||
public abstract void setForcedRefresh(boolean forcedRefresh);
|
||||
}
|
|
@ -18,7 +18,6 @@
|
|||
*/
|
||||
package org.elasticsearch.action.support.master;
|
||||
|
||||
import org.elasticsearch.action.ActionRequest;
|
||||
import org.elasticsearch.cluster.ack.AckedRequest;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
|
@ -26,7 +25,6 @@ import org.elasticsearch.common.unit.TimeValue;
|
|||
|
||||
import java.io.IOException;
|
||||
|
||||
import static org.elasticsearch.common.unit.TimeValue.readTimeValue;
|
||||
import static org.elasticsearch.common.unit.TimeValue.timeValueSeconds;
|
||||
|
||||
/**
|
||||
|
@ -76,7 +74,7 @@ public abstract class AcknowledgedRequest<Request extends MasterNodeRequest<Requ
|
|||
* Reads the timeout value
|
||||
*/
|
||||
protected void readTimeout(StreamInput in) throws IOException {
|
||||
timeout = readTimeValue(in);
|
||||
timeout = new TimeValue(in);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -61,7 +61,7 @@ public abstract class MasterNodeRequest<Request extends MasterNodeRequest<Reques
|
|||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
masterNodeTimeout = TimeValue.readTimeValue(in);
|
||||
masterNodeTimeout = new TimeValue(in);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -82,20 +82,13 @@ public abstract class BaseNodesRequest<Request extends BaseNodesRequest<Request>
|
|||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
nodesIds = in.readStringArray();
|
||||
if (in.readBoolean()) {
|
||||
timeout = TimeValue.readTimeValue(in);
|
||||
}
|
||||
timeout = in.readOptionalWriteable(TimeValue::new);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
out.writeStringArrayNullable(nodesIds);
|
||||
if (timeout == null) {
|
||||
out.writeBoolean(false);
|
||||
} else {
|
||||
out.writeBoolean(true);
|
||||
timeout.writeTo(out);
|
||||
}
|
||||
out.writeOptionalWriteable(timeout);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -176,12 +176,10 @@ public abstract class TransportNodesAction<NodesRequest extends BaseNodesRequest
|
|||
this.request = request;
|
||||
this.listener = listener;
|
||||
ClusterState clusterState = clusterService.state();
|
||||
String[] nodesIds = resolveNodes(request, clusterState);
|
||||
this.nodesIds = filterNodeIds(clusterState.nodes(), nodesIds);
|
||||
ImmutableOpenMap<String, DiscoveryNode> nodes = clusterState.nodes().getNodes();
|
||||
nodesIds = filterNodeIds(clusterState.nodes(), resolveNodes(request, clusterState));
|
||||
this.nodes = new DiscoveryNode[nodesIds.length];
|
||||
for (int i = 0; i < nodesIds.length; i++) {
|
||||
this.nodes[i] = nodes.get(nodesIds[i]);
|
||||
this.nodes[i] = clusterState.nodes().get(nodesIds[i]);
|
||||
}
|
||||
this.responses = new AtomicReferenceArray<>(this.nodesIds.length);
|
||||
}
|
||||
|
|
|
@ -0,0 +1,72 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.support.replication;
|
||||
|
||||
import org.elasticsearch.action.bulk.BulkShardRequest;
|
||||
import org.elasticsearch.action.delete.DeleteRequest;
|
||||
import org.elasticsearch.action.index.IndexRequest;
|
||||
import org.elasticsearch.action.support.WriteRequest;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
/**
|
||||
* Requests that are both {@linkplain ReplicationRequest}s (run on a shard's primary first, then the replica) and {@linkplain WriteRequest}
|
||||
* (modify documents on a shard), for example {@link BulkShardRequest}, {@link IndexRequest}, and {@link DeleteRequest}.
|
||||
*/
|
||||
public abstract class ReplicatedWriteRequest<R extends ReplicatedWriteRequest<R>> extends ReplicationRequest<R> implements WriteRequest<R> {
|
||||
private RefreshPolicy refreshPolicy = RefreshPolicy.NONE;
|
||||
|
||||
/**
|
||||
* Constructor for deserialization.
|
||||
*/
|
||||
public ReplicatedWriteRequest() {
|
||||
}
|
||||
|
||||
public ReplicatedWriteRequest(ShardId shardId) {
|
||||
super(shardId);
|
||||
}
|
||||
|
||||
@Override
|
||||
@SuppressWarnings("unchecked")
|
||||
public R setRefreshPolicy(RefreshPolicy refreshPolicy) {
|
||||
this.refreshPolicy = refreshPolicy;
|
||||
return (R) this;
|
||||
}
|
||||
|
||||
@Override
|
||||
public RefreshPolicy getRefreshPolicy() {
|
||||
return refreshPolicy;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
refreshPolicy = RefreshPolicy.readFrom(in);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
refreshPolicy.writeTo(out);
|
||||
}
|
||||
}
|
|
@ -21,7 +21,6 @@ package org.elasticsearch.action.support.replication;
|
|||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.ExceptionsHelper;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.ReplicationResponse;
|
||||
import org.elasticsearch.action.UnavailableShardsException;
|
||||
import org.elasticsearch.action.WriteConsistencyLevel;
|
||||
import org.elasticsearch.action.support.TransportActions;
|
||||
|
@ -29,7 +28,6 @@ import org.elasticsearch.cluster.ClusterState;
|
|||
import org.elasticsearch.cluster.routing.IndexRoutingTable;
|
||||
import org.elasticsearch.cluster.routing.IndexShardRoutingTable;
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.common.collect.Tuple;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.logging.ESLogger;
|
||||
import org.elasticsearch.index.engine.VersionConflictEngineException;
|
||||
|
@ -47,28 +45,41 @@ import java.util.concurrent.atomic.AtomicInteger;
|
|||
import java.util.function.Consumer;
|
||||
import java.util.function.Supplier;
|
||||
|
||||
public class ReplicationOperation<Request extends ReplicationRequest<Request>, ReplicaRequest extends ReplicationRequest<ReplicaRequest>,
|
||||
Response extends ReplicationResponse> {
|
||||
public class ReplicationOperation<
|
||||
Request extends ReplicationRequest<Request>,
|
||||
ReplicaRequest extends ReplicationRequest<ReplicaRequest>,
|
||||
PrimaryResultT extends ReplicationOperation.PrimaryResult<ReplicaRequest>
|
||||
> {
|
||||
final private ESLogger logger;
|
||||
final private Request request;
|
||||
final private Supplier<ClusterState> clusterStateSupplier;
|
||||
final private String opType;
|
||||
final private AtomicInteger totalShards = new AtomicInteger();
|
||||
/**
|
||||
* The number of pending sub-operations in this operation. This is incremented when the following operations start and decremented when
|
||||
* they complete:
|
||||
* <ul>
|
||||
* <li>The operation on the primary</li>
|
||||
* <li>The operation on each replica</li>
|
||||
* <li>Coordination of the operation as a whole. This prevents the operation from terminating early if we haven't started any replica
|
||||
* operations and the primary finishes.</li>
|
||||
* </ul>
|
||||
*/
|
||||
final private AtomicInteger pendingShards = new AtomicInteger();
|
||||
final private AtomicInteger successfulShards = new AtomicInteger();
|
||||
final private boolean executeOnReplicas;
|
||||
final private boolean checkWriteConsistency;
|
||||
final private Primary<Request, ReplicaRequest, Response> primary;
|
||||
final private Primary<Request, ReplicaRequest, PrimaryResultT> primary;
|
||||
final private Replicas<ReplicaRequest> replicasProxy;
|
||||
final private AtomicBoolean finished = new AtomicBoolean();
|
||||
final protected ActionListener<Response> finalResponseListener;
|
||||
final protected ActionListener<PrimaryResultT> resultListener;
|
||||
|
||||
private volatile Response finalResponse = null;
|
||||
private volatile PrimaryResultT primaryResult = null;
|
||||
|
||||
private final List<ReplicationResponse.ShardInfo.Failure> shardReplicaFailures = Collections.synchronizedList(new ArrayList<>());
|
||||
|
||||
ReplicationOperation(Request request, Primary<Request, ReplicaRequest, Response> primary,
|
||||
ActionListener<Response> listener,
|
||||
ReplicationOperation(Request request, Primary<Request, ReplicaRequest, PrimaryResultT> primary,
|
||||
ActionListener<PrimaryResultT> listener,
|
||||
boolean executeOnReplicas, boolean checkWriteConsistency,
|
||||
Replicas<ReplicaRequest> replicas,
|
||||
Supplier<ClusterState> clusterStateSupplier, ESLogger logger, String opType) {
|
||||
|
@ -76,7 +87,7 @@ public class ReplicationOperation<Request extends ReplicationRequest<Request>, R
|
|||
this.executeOnReplicas = executeOnReplicas;
|
||||
this.replicasProxy = replicas;
|
||||
this.primary = primary;
|
||||
this.finalResponseListener = listener;
|
||||
this.resultListener = listener;
|
||||
this.logger = logger;
|
||||
this.request = request;
|
||||
this.clusterStateSupplier = clusterStateSupplier;
|
||||
|
@ -85,28 +96,27 @@ public class ReplicationOperation<Request extends ReplicationRequest<Request>, R
|
|||
|
||||
void execute() throws Exception {
|
||||
final String writeConsistencyFailure = checkWriteConsistency ? checkWriteConsistency() : null;
|
||||
final ShardId shardId = primary.routingEntry().shardId();
|
||||
final ShardRouting primaryRouting = primary.routingEntry();
|
||||
final ShardId primaryId = primaryRouting.shardId();
|
||||
if (writeConsistencyFailure != null) {
|
||||
finishAsFailed(new UnavailableShardsException(shardId,
|
||||
finishAsFailed(new UnavailableShardsException(primaryId,
|
||||
"{} Timeout: [{}], request: [{}]", writeConsistencyFailure, request.timeout(), request));
|
||||
return;
|
||||
}
|
||||
|
||||
totalShards.incrementAndGet();
|
||||
pendingShards.incrementAndGet(); // increase by 1 until we finish all primary coordination
|
||||
Tuple<Response, ReplicaRequest> primaryResponse = primary.perform(request);
|
||||
successfulShards.incrementAndGet(); // mark primary as successful
|
||||
finalResponse = primaryResponse.v1();
|
||||
ReplicaRequest replicaRequest = primaryResponse.v2();
|
||||
pendingShards.incrementAndGet();
|
||||
primaryResult = primary.perform(request);
|
||||
final ReplicaRequest replicaRequest = primaryResult.replicaRequest();
|
||||
assert replicaRequest.primaryTerm() > 0 : "replicaRequest doesn't have a primary term";
|
||||
if (logger.isTraceEnabled()) {
|
||||
logger.trace("[{}] op [{}] completed on primary for request [{}]", shardId, opType, request);
|
||||
logger.trace("[{}] op [{}] completed on primary for request [{}]", primaryId, opType, request);
|
||||
}
|
||||
// we have to get a new state after successfully indexing into the primary in order to honour recovery semantics.
|
||||
// we have to make sure that every operation indexed into the primary after recovery start will also be replicated
|
||||
// to the recovery target. If we use an old cluster state, we may miss a relocation that has started since then.
|
||||
// If the index gets deleted after primary operation, we skip replication
|
||||
List<ShardRouting> shards = getShards(shardId, clusterStateSupplier.get());
|
||||
final List<ShardRouting> shards = getShards(primaryId, clusterStateSupplier.get());
|
||||
final String localNodeId = primary.routingEntry().currentNodeId();
|
||||
for (final ShardRouting shard : shards) {
|
||||
if (executeOnReplicas == false || shard.unassigned()) {
|
||||
|
@ -125,8 +135,8 @@ public class ReplicationOperation<Request extends ReplicationRequest<Request>, R
|
|||
}
|
||||
}
|
||||
|
||||
// decrement pending and finish (if there are no replicas, or those are done)
|
||||
decPendingAndFinishIfNeeded(); // incremented in the beginning of this method
|
||||
successfulShards.incrementAndGet();
|
||||
decPendingAndFinishIfNeeded();
|
||||
}
|
||||
|
||||
private void performOnReplica(final ShardRouting shard, final ReplicaRequest replicaRequest) {
|
||||
|
@ -241,19 +251,19 @@ public class ReplicationOperation<Request extends ReplicationRequest<Request>, R
|
|||
failuresArray = new ReplicationResponse.ShardInfo.Failure[shardReplicaFailures.size()];
|
||||
shardReplicaFailures.toArray(failuresArray);
|
||||
}
|
||||
finalResponse.setShardInfo(new ReplicationResponse.ShardInfo(
|
||||
primaryResult.setShardInfo(new ReplicationResponse.ShardInfo(
|
||||
totalShards.get(),
|
||||
successfulShards.get(),
|
||||
failuresArray
|
||||
)
|
||||
);
|
||||
finalResponseListener.onResponse(finalResponse);
|
||||
resultListener.onResponse(primaryResult);
|
||||
}
|
||||
}
|
||||
|
||||
private void finishAsFailed(Throwable throwable) {
|
||||
if (finished.compareAndSet(false, true)) {
|
||||
finalResponseListener.onFailure(throwable);
|
||||
resultListener.onFailure(throwable);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -284,22 +294,31 @@ public class ReplicationOperation<Request extends ReplicationRequest<Request>, R
|
|||
}
|
||||
|
||||
|
||||
interface Primary<Request extends ReplicationRequest<Request>, ReplicaRequest extends ReplicationRequest<ReplicaRequest>,
|
||||
Response extends ReplicationResponse> {
|
||||
interface Primary<
|
||||
Request extends ReplicationRequest<Request>,
|
||||
ReplicaRequest extends ReplicationRequest<ReplicaRequest>,
|
||||
PrimaryResultT extends PrimaryResult<ReplicaRequest>
|
||||
> {
|
||||
|
||||
/** routing entry for this primary */
|
||||
/**
|
||||
* routing entry for this primary
|
||||
*/
|
||||
ShardRouting routingEntry();
|
||||
|
||||
/** fail the primary, typically due to the fact that the operation has learned the primary has been demoted by the master */
|
||||
/**
|
||||
* fail the primary, typically due to the fact that the operation has learned the primary has been demoted by the master
|
||||
*/
|
||||
void failShard(String message, Throwable throwable);
|
||||
|
||||
/**
|
||||
* Performs the given request on this primary
|
||||
* Performs the given request on this primary. Yes, this returns as soon as it can with the request for the replicas and calls a
|
||||
* listener when the primary request is completed. Yes, the primary request might complete before the method returns. Yes, it might
|
||||
* also complete after. Deal with it.
|
||||
*
|
||||
* @return A tuple containing not null values, as first value the result of the primary operation and as second value
|
||||
* the request to be executed on the replica shards.
|
||||
* @param request the request to perform
|
||||
* @return the request to send to the repicas
|
||||
*/
|
||||
Tuple<Response, ReplicaRequest> perform(Request request) throws Exception;
|
||||
PrimaryResultT perform(Request request) throws Exception;
|
||||
|
||||
}
|
||||
|
||||
|
@ -308,19 +327,20 @@ public class ReplicationOperation<Request extends ReplicationRequest<Request>, R
|
|||
/**
|
||||
* performs the the given request on the specified replica
|
||||
*
|
||||
* @param replica {@link ShardRouting} of the shard this request should be executed on
|
||||
* @param replica {@link ShardRouting} of the shard this request should be executed on
|
||||
* @param replicaRequest operation to peform
|
||||
* @param listener a callback to call once the operation has been complicated, either successfully or with an error.
|
||||
* @param listener a callback to call once the operation has been complicated, either successfully or with an error.
|
||||
*/
|
||||
void performOn(ShardRouting replica, ReplicaRequest replicaRequest, ActionListener<TransportResponse.Empty> listener);
|
||||
|
||||
/**
|
||||
* Fail the specified shard, removing it from the current set of active shards
|
||||
* @param replica shard to fail
|
||||
* @param primary the primary shard that requested the failure
|
||||
* @param message a (short) description of the reason
|
||||
* @param throwable the original exception which caused the ReplicationOperation to request the shard to be failed
|
||||
* @param onSuccess a callback to call when the shard has been successfully removed from the active set.
|
||||
*
|
||||
* @param replica shard to fail
|
||||
* @param primary the primary shard that requested the failure
|
||||
* @param message a (short) description of the reason
|
||||
* @param throwable the original exception which caused the ReplicationOperation to request the shard to be failed
|
||||
* @param onSuccess a callback to call when the shard has been successfully removed from the active set.
|
||||
* @param onPrimaryDemoted a callback to call when the shard can not be failed because the current primary has been demoted
|
||||
* by the master.
|
||||
* @param onIgnoredFailure a callback to call when failing a shard has failed, but it that failure can be safely ignored and the
|
||||
|
@ -345,4 +365,11 @@ public class ReplicationOperation<Request extends ReplicationRequest<Request>, R
|
|||
super(in);
|
||||
}
|
||||
}
|
||||
|
||||
interface PrimaryResult<R extends ReplicationRequest<R>> {
|
||||
|
||||
R replicaRequest();
|
||||
|
||||
void setShardInfo(ReplicationResponse.ShardInfo shardInfo);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -23,6 +23,8 @@ import org.elasticsearch.action.ActionRequest;
|
|||
import org.elasticsearch.action.ActionRequestValidationException;
|
||||
import org.elasticsearch.action.IndicesRequest;
|
||||
import org.elasticsearch.action.WriteConsistencyLevel;
|
||||
import org.elasticsearch.action.admin.indices.refresh.TransportShardRefreshAction;
|
||||
import org.elasticsearch.action.index.IndexRequest;
|
||||
import org.elasticsearch.action.support.IndicesOptions;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
|
@ -38,7 +40,8 @@ import java.util.concurrent.TimeUnit;
|
|||
import static org.elasticsearch.action.ValidateActions.addValidationError;
|
||||
|
||||
/**
|
||||
*
|
||||
* Requests that are run on a particular replica, first on the primary and then on the replicas like {@link IndexRequest} or
|
||||
* {@link TransportShardRefreshAction}.
|
||||
*/
|
||||
public abstract class ReplicationRequest<Request extends ReplicationRequest<Request>> extends ActionRequest<Request>
|
||||
implements IndicesRequest {
|
||||
|
@ -65,7 +68,6 @@ public abstract class ReplicationRequest<Request extends ReplicationRequest<Requ
|
|||
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Creates a new request with resolved shard id
|
||||
*/
|
||||
|
@ -179,7 +181,7 @@ public abstract class ReplicationRequest<Request extends ReplicationRequest<Requ
|
|||
shardId = null;
|
||||
}
|
||||
consistencyLevel = WriteConsistencyLevel.fromId(in.readByte());
|
||||
timeout = TimeValue.readTimeValue(in);
|
||||
timeout = new TimeValue(in);
|
||||
index = in.readString();
|
||||
routedBasedOnClusterVersion = in.readVLong();
|
||||
primaryTerm = in.readVLong();
|
||||
|
|
|
@ -17,10 +17,12 @@
|
|||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action;
|
||||
package org.elasticsearch.action.support.replication;
|
||||
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.ExceptionsHelper;
|
||||
import org.elasticsearch.action.ActionResponse;
|
||||
import org.elasticsearch.action.ShardOperationFailedException;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
|
@ -79,14 +81,16 @@ public class ReplicationResponse extends ActionResponse {
|
|||
}
|
||||
|
||||
/**
|
||||
* @return the total number of shards the write should go to (replicas and primaries). This includes relocating shards, so this number can be higher than the number of shards.
|
||||
* @return the total number of shards the write should go to (replicas and primaries). This includes relocating shards, so this
|
||||
* number can be higher than the number of shards.
|
||||
*/
|
||||
public int getTotal() {
|
||||
return total;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return the total number of shards the write succeeded on (replicas and primaries). This includes relocating shards, so this number can be higher than the number of shards.
|
||||
* @return the total number of shards the write succeeded on (replicas and primaries). This includes relocating shards, so this
|
||||
* number can be higher than the number of shards.
|
||||
*/
|
||||
public int getSuccessful() {
|
||||
return successful;
|
|
@ -19,6 +19,7 @@
|
|||
|
||||
package org.elasticsearch.action.support.replication;
|
||||
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
|
@ -88,5 +89,25 @@ public class ReplicationTask extends Task {
|
|||
public void writeTo(StreamOutput out) throws IOException {
|
||||
out.writeString(phase);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return Strings.toString(this);
|
||||
}
|
||||
|
||||
// Implements equals and hashcode for testing
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if (obj == null || obj.getClass() != ReplicationTask.Status.class) {
|
||||
return false;
|
||||
}
|
||||
ReplicationTask.Status other = (Status) obj;
|
||||
return phase.equals(other.phase);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return phase.hashCode();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue