Merge branch 'master' into close-index-api-refactoring
This commit is contained in:
commit
593d8637c4
|
@ -51,6 +51,7 @@ import org.gradle.api.tasks.compile.JavaCompile
|
|||
import org.gradle.api.tasks.javadoc.Javadoc
|
||||
import org.gradle.internal.jvm.Jvm
|
||||
import org.gradle.process.ExecResult
|
||||
import org.gradle.process.ExecSpec
|
||||
import org.gradle.util.GradleVersion
|
||||
|
||||
import java.nio.charset.StandardCharsets
|
||||
|
@ -232,6 +233,95 @@ class BuildPlugin implements Plugin<Project> {
|
|||
project.ext.java9Home = project.rootProject.ext.java9Home
|
||||
}
|
||||
|
||||
static void requireDocker(final Task task) {
|
||||
final Project rootProject = task.project.rootProject
|
||||
if (rootProject.hasProperty('requiresDocker') == false) {
|
||||
/*
|
||||
* This is our first time encountering a task that requires Docker. We will add an extension that will let us track the tasks
|
||||
* that register as requiring Docker. We will add a delayed execution that when the task graph is ready if any such tasks are
|
||||
* in the task graph, then we check two things:
|
||||
* - the Docker binary is available
|
||||
* - we can execute a Docker command that requires privileges
|
||||
*
|
||||
* If either of these fail, we fail the build.
|
||||
*/
|
||||
final boolean buildDocker
|
||||
final String buildDockerProperty = System.getProperty("build.docker")
|
||||
if (buildDockerProperty == null || buildDockerProperty == "true") {
|
||||
buildDocker = true
|
||||
} else if (buildDockerProperty == "false") {
|
||||
buildDocker = false
|
||||
} else {
|
||||
throw new IllegalArgumentException(
|
||||
"expected build.docker to be unset or one of \"true\" or \"false\" but was [" + buildDockerProperty + "]")
|
||||
}
|
||||
rootProject.rootProject.ext.buildDocker = buildDocker
|
||||
rootProject.rootProject.ext.requiresDocker = []
|
||||
rootProject.gradle.taskGraph.whenReady { TaskExecutionGraph taskGraph ->
|
||||
// check if the Docker binary exists and record its path
|
||||
final List<String> maybeDockerBinaries = ['/usr/bin/docker', '/usr/local/bin/docker']
|
||||
final String dockerBinary = maybeDockerBinaries.find { it -> new File(it).exists() }
|
||||
|
||||
int exitCode
|
||||
String dockerErrorOutput
|
||||
if (dockerBinary == null) {
|
||||
exitCode = -1
|
||||
dockerErrorOutput = null
|
||||
} else {
|
||||
// the Docker binary executes, check that we can execute a privileged command
|
||||
final ByteArrayOutputStream output = new ByteArrayOutputStream()
|
||||
final ExecResult result = LoggedExec.exec(rootProject, { ExecSpec it ->
|
||||
it.commandLine dockerBinary, "images"
|
||||
it.errorOutput = output
|
||||
it.ignoreExitValue = true
|
||||
})
|
||||
if (result.exitValue == 0) {
|
||||
return
|
||||
}
|
||||
exitCode = result.exitValue
|
||||
dockerErrorOutput = output.toString()
|
||||
}
|
||||
final List<String> tasks =
|
||||
((List<Task>)rootProject.requiresDocker).findAll { taskGraph.hasTask(it) }.collect { " ${it.path}".toString()}
|
||||
if (tasks.isEmpty() == false) {
|
||||
/*
|
||||
* There are tasks in the task graph that require Docker. Now we are failing because either the Docker binary does not
|
||||
* exist or because execution of a privileged Docker command failed.
|
||||
*/
|
||||
String message
|
||||
if (dockerBinary == null) {
|
||||
message = String.format(
|
||||
Locale.ROOT,
|
||||
"Docker (checked [%s]) is required to run the following task%s: \n%s",
|
||||
maybeDockerBinaries.join(","),
|
||||
tasks.size() > 1 ? "s" : "",
|
||||
tasks.join('\n'))
|
||||
} else {
|
||||
assert exitCode > 0 && dockerErrorOutput != null
|
||||
message = String.format(
|
||||
Locale.ROOT,
|
||||
"a problem occurred running Docker from [%s] yet it is required to run the following task%s: \n%s\n" +
|
||||
"the problem is that Docker exited with exit code [%d] with standard error output [%s]",
|
||||
dockerBinary,
|
||||
tasks.size() > 1 ? "s" : "",
|
||||
tasks.join('\n'),
|
||||
exitCode,
|
||||
dockerErrorOutput.trim())
|
||||
}
|
||||
throw new GradleException(
|
||||
message + "\nyou can address this by attending to the reported issue, "
|
||||
+ "removing the offending tasks from being executed, "
|
||||
+ "or by passing -Dbuild.docker=false")
|
||||
}
|
||||
}
|
||||
}
|
||||
if (rootProject.buildDocker) {
|
||||
rootProject.requiresDocker.add(task)
|
||||
} else {
|
||||
task.enabled = false
|
||||
}
|
||||
}
|
||||
|
||||
private static String findCompilerJavaHome() {
|
||||
String compilerJavaHome = System.getenv('JAVA_HOME')
|
||||
final String compilerJavaProperty = System.getProperty('compiler.java')
|
||||
|
@ -785,10 +875,6 @@ class BuildPlugin implements Plugin<Project> {
|
|||
task.shouldRunAfter testTask
|
||||
}
|
||||
}
|
||||
// no loose ends: check has to depend on all test tasks
|
||||
project.tasks.matching {it.name == "check"}.all {
|
||||
dependsOn(task)
|
||||
}
|
||||
|
||||
// TODO: why are we not passing maxmemory to junit4?
|
||||
jvmArg '-Xmx' + System.getProperty('tests.heap.size', '512m')
|
||||
|
|
|
@ -129,6 +129,7 @@ public class PluginBuildPlugin extends BuildPlugin {
|
|||
RestIntegTestTask integTest = project.tasks.create('integTest', RestIntegTestTask.class)
|
||||
integTest.mustRunAfter(project.precommit, project.test)
|
||||
project.integTestCluster.distribution = System.getProperty('tests.distribution', 'integ-test-zip')
|
||||
project.check.dependsOn(integTest)
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -18,6 +18,7 @@
|
|||
*/
|
||||
package org.elasticsearch.gradle.test
|
||||
|
||||
import java.util.stream.Collectors
|
||||
import org.apache.tools.ant.DefaultLogger
|
||||
import org.apache.tools.ant.taskdefs.condition.Os
|
||||
import org.elasticsearch.gradle.BuildPlugin
|
||||
|
@ -92,7 +93,8 @@ class ClusterFormationTasks {
|
|||
throw new Exception("tests.distribution=integ-test-zip is not supported")
|
||||
}
|
||||
configureDistributionDependency(project, config.distribution, currentDistro, VersionProperties.elasticsearch)
|
||||
if (config.numBwcNodes > 0) {
|
||||
boolean hasBwcNodes = config.numBwcNodes > 0
|
||||
if (hasBwcNodes) {
|
||||
if (config.bwcVersion == null) {
|
||||
throw new IllegalArgumentException("Must specify bwcVersion when numBwcNodes > 0")
|
||||
}
|
||||
|
@ -134,6 +136,16 @@ class ClusterFormationTasks {
|
|||
esConfig['discovery.zen.hosts_provider'] = 'file'
|
||||
}
|
||||
esConfig['discovery.zen.ping.unicast.hosts'] = []
|
||||
if (hasBwcNodes == false && esConfig['discovery.type'] == null) {
|
||||
esConfig['discovery.type'] = 'zen2'
|
||||
esConfig['cluster.initial_master_nodes'] = nodes.stream().map({ n ->
|
||||
if (n.config.settings['node.name'] == null) {
|
||||
return "node-" + n.nodeNum
|
||||
} else {
|
||||
return n.config.settings['node.name']
|
||||
}
|
||||
}).collect(Collectors.toList())
|
||||
}
|
||||
esConfig
|
||||
}
|
||||
dependsOn = startDependencies
|
||||
|
|
|
@ -19,14 +19,10 @@
|
|||
package org.elasticsearch.gradle.test
|
||||
|
||||
import com.carrotsearch.gradle.junit4.RandomizedTestingTask
|
||||
import org.elasticsearch.gradle.BuildPlugin
|
||||
import org.elasticsearch.gradle.VersionProperties
|
||||
import org.gradle.api.DefaultTask
|
||||
import org.gradle.api.Project
|
||||
import org.gradle.api.Task
|
||||
import org.gradle.api.execution.TaskExecutionAdapter
|
||||
import org.gradle.api.provider.Property
|
||||
import org.gradle.api.provider.Provider
|
||||
import org.gradle.api.tasks.Copy
|
||||
import org.gradle.api.tasks.Input
|
||||
import org.gradle.api.tasks.TaskState
|
||||
|
@ -36,7 +32,6 @@ import org.gradle.plugins.ide.idea.IdeaPlugin
|
|||
import java.nio.charset.StandardCharsets
|
||||
import java.nio.file.Files
|
||||
import java.util.stream.Stream
|
||||
|
||||
/**
|
||||
* A wrapper task around setting up a cluster and running rest tests.
|
||||
*/
|
||||
|
|
|
@ -57,38 +57,6 @@
|
|||
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]rest[/\\]action[/\\]cat[/\\]RestIndicesAction.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]rest[/\\]action[/\\]cat[/\\]RestShardsAction.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]rest[/\\]action[/\\]cat[/\\]RestThreadPoolAction.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]script[/\\]ScriptService.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]MultiValueMode.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]AggregatorFactories.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]InternalMultiBucketAggregation.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]BucketsAggregator.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]geogrid[/\\]GeoHashGridAggregator.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]histogram[/\\]HistogramAggregator.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]missing[/\\]MissingAggregator.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]nested[/\\]ReverseNestedAggregator.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]range[/\\]RangeAggregator.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]significant[/\\]SignificantTermsAggregatorFactory.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]significant[/\\]heuristics[/\\]GND.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]significant[/\\]heuristics[/\\]NXYSignificanceHeuristic.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]significant[/\\]heuristics[/\\]PercentageScore.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]significant[/\\]heuristics[/\\]ScriptHeuristic.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]significant[/\\]heuristics[/\\]SignificanceHeuristic.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]terms[/\\]LongTermsAggregator.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]terms[/\\]StringTermsAggregator.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]terms[/\\]TermsAggregator.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]terms[/\\]TermsAggregatorFactory.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]metrics[/\\]HyperLogLogPlusPlus.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]support[/\\]AggregationPath.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]dfs[/\\]AggregatedDfs.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]dfs[/\\]DfsSearchResult.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]internal[/\\]ShardSearchTransportRequest.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]lookup[/\\]FieldLookup.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]lookup[/\\]LeafDocLookup.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]rescore[/\\]QueryRescorer.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]suggest[/\\]completion[/\\]context[/\\]CategoryContextMapping.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]suggest[/\\]completion[/\\]context[/\\]ContextMapping.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]suggest[/\\]completion[/\\]context[/\\]GeoContextMapping.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]suggest[/\\]completion[/\\]context[/\\]GeoQueryContext.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]snapshots[/\\]RestoreService.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]snapshots[/\\]SnapshotShardFailure.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]snapshots[/\\]SnapshotShardsService.java" checks="LineLength" />
|
||||
|
@ -113,45 +81,6 @@
|
|||
<suppress files="server[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]routing[/\\]AliasRoutingIT.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]routing[/\\]SimpleRoutingIT.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]script[/\\]ScriptServiceTests.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]MultiValueModeTests.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]SearchWithRejectionsIT.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]MissingValueIT.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]DateHistogramIT.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]DateHistogramOffsetIT.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]GeoDistanceIT.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]MissingIT.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]NaNSortingIT.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]NestedIT.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]ReverseNestedIT.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]SamplerIT.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]ShardReduceIT.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]ShardSizeTestCase.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]TermsDocCountErrorIT.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]TermsShardMinDocCountIT.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]nested[/\\]NestedAggregatorTests.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]basic[/\\]SearchWhileCreatingIndexIT.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]basic[/\\]SearchWhileRelocatingIT.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]basic[/\\]SearchWithRandomExceptionsIT.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]basic[/\\]SearchWithRandomIOExceptionsIT.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]basic[/\\]TransportTwoNodesSearchIT.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]geo[/\\]GeoBoundingBoxIT.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]geo[/\\]GeoFilterIT.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]geo[/\\]GeoShapeQueryTests.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]morelikethis[/\\]MoreLikeThisIT.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]nested[/\\]SimpleNestedIT.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]preference[/\\]SearchPreferenceIT.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]query[/\\]ExistsIT.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]query[/\\]MultiMatchQueryIT.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]query[/\\]SearchQueryIT.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]scroll[/\\]SearchScrollIT.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]scroll[/\\]SearchScrollWithFailingNodesIT.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]searchafter[/\\]SearchAfterIT.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]simple[/\\]SimpleSearchIT.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]sort[/\\]GeoDistanceSortBuilderIT.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]suggest[/\\]CompletionSuggestSearchIT.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]suggest[/\\]ContextCompletionSuggestSearchIT.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]suggest[/\\]completion[/\\]CategoryContextMappingTests.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]suggest[/\\]completion[/\\]GeoContextMappingTests.java" checks="LineLength" />
|
||||
|
||||
<!-- Temporarily contains extra-long lines as examples for tests to be written, see https://github.com/elastic/elasticsearch/issues/34829 -->
|
||||
<suppress files="modules[/\\]lang-painless[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]painless[/\\]ContextExampleTests.java" checks="LineLength" />
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
elasticsearch = 7.0.0
|
||||
lucene = 8.0.0-snapshot-c78429a554
|
||||
lucene = 8.0.0-snapshot-aaa64d70159
|
||||
|
||||
# optional dependencies
|
||||
spatial4j = 0.7
|
||||
|
|
|
@ -615,8 +615,18 @@ final class RequestConverters {
|
|||
}
|
||||
|
||||
static Request termVectors(TermVectorsRequest tvrequest) throws IOException {
|
||||
String endpoint = new EndpointBuilder().addPathPart(
|
||||
tvrequest.getIndex(), tvrequest.getType(), tvrequest.getId()).addPathPartAsIs("_termvectors").build();
|
||||
String endpoint;
|
||||
if (tvrequest.getType() != null) {
|
||||
endpoint = new EndpointBuilder().addPathPart(tvrequest.getIndex(), tvrequest.getType(), tvrequest.getId())
|
||||
.addPathPartAsIs("_termvectors")
|
||||
.build();
|
||||
} else {
|
||||
endpoint = new EndpointBuilder().addPathPart(tvrequest.getIndex())
|
||||
.addPathPartAsIs("_termvectors")
|
||||
.addPathPart(tvrequest.getId())
|
||||
.build();
|
||||
}
|
||||
|
||||
Request request = new Request(HttpGet.METHOD_NAME, endpoint);
|
||||
Params params = new Params(request);
|
||||
params.withRouting(tvrequest.getRouting());
|
||||
|
|
|
@ -20,6 +20,7 @@
|
|||
package org.elasticsearch.client.core;
|
||||
|
||||
import org.elasticsearch.client.Validatable;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.xcontent.ToXContentObject;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
|
@ -31,7 +32,7 @@ import java.util.Map;
|
|||
public class TermVectorsRequest implements ToXContentObject, Validatable {
|
||||
|
||||
private final String index;
|
||||
private final String type;
|
||||
@Nullable private final String type;
|
||||
private String id = null;
|
||||
private XContentBuilder docBuilder = null;
|
||||
|
||||
|
@ -47,25 +48,57 @@ public class TermVectorsRequest implements ToXContentObject, Validatable {
|
|||
private Map<String, String> perFieldAnalyzer = null;
|
||||
private Map<String, Integer> filterSettings = null;
|
||||
|
||||
/**
|
||||
* Constructs TermVectorRequest for the given document
|
||||
*
|
||||
* @param index - index of the document
|
||||
* @param docId - id of the document
|
||||
*/
|
||||
public TermVectorsRequest(String index, String docId) {
|
||||
this.index = index;
|
||||
this.type = null;
|
||||
this.id = docId;
|
||||
}
|
||||
|
||||
/**
|
||||
* Constructs TermVectorRequest for the given document
|
||||
*
|
||||
* @param index - index of the document
|
||||
* @param type - type of the document
|
||||
* @param docId - id of the document
|
||||
*
|
||||
* @deprecated Types are in the process of being removed, use
|
||||
* {@link #TermVectorsRequest(String, String)} instead.
|
||||
*/
|
||||
@Deprecated
|
||||
public TermVectorsRequest(String index, String type, String docId) {
|
||||
this.index = index;
|
||||
this.type = type;
|
||||
this.id = docId;
|
||||
}
|
||||
|
||||
/**
|
||||
* Constructs TermVectorRequest for an artificial document
|
||||
*
|
||||
* @param index - index of the document
|
||||
* @param docBuilder - an artificial document
|
||||
*/
|
||||
public TermVectorsRequest(String index, XContentBuilder docBuilder) {
|
||||
this.index = index;
|
||||
this.type = null;
|
||||
this.docBuilder = docBuilder;
|
||||
}
|
||||
|
||||
/**
|
||||
* Constructs TermVectorRequest for an artificial document
|
||||
* @param index - index of the document
|
||||
* @param type - type of the document
|
||||
* @param docBuilder - an artificial document
|
||||
*
|
||||
* @deprecated Types are in the process of being removed, use
|
||||
* {@link TermVectorsRequest(String, XContentBuilder)} instead.
|
||||
*/
|
||||
@Deprecated
|
||||
public TermVectorsRequest(String index, String type, XContentBuilder docBuilder) {
|
||||
this.index = index;
|
||||
this.type = type;
|
||||
|
@ -104,7 +137,10 @@ public class TermVectorsRequest implements ToXContentObject, Validatable {
|
|||
|
||||
/**
|
||||
* Returns the type of the request
|
||||
*
|
||||
* @deprecated Types are in the process of being removed.
|
||||
*/
|
||||
@Deprecated
|
||||
public String getType() {
|
||||
return type;
|
||||
}
|
||||
|
@ -218,7 +254,9 @@ public class TermVectorsRequest implements ToXContentObject, Validatable {
|
|||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.startObject();
|
||||
builder.field("_index", index);
|
||||
builder.field("_type", type);
|
||||
if (type != null) {
|
||||
builder.field("_type", type);
|
||||
}
|
||||
if (id != null) builder.field("_id", id);
|
||||
// set values only when different from defaults
|
||||
if (requestPositions == false) builder.field("positions", false);
|
||||
|
|
|
@ -94,7 +94,10 @@ public class TermVectorsResponse {
|
|||
|
||||
/**
|
||||
* Returns the type for the response
|
||||
*
|
||||
* @deprecated Types are in the process of being removed.
|
||||
*/
|
||||
@Deprecated
|
||||
public String getType() {
|
||||
return type;
|
||||
}
|
||||
|
|
|
@ -83,6 +83,7 @@ public class CCRIT extends ESRestHighLevelClientTestCase {
|
|||
});
|
||||
}
|
||||
|
||||
@AwaitsFix(bugUrl="https://github.com/elastic/elasticsearch/issues/36339")
|
||||
public void testIndexFollowing() throws Exception {
|
||||
CcrClient ccrClient = highLevelClient().ccr();
|
||||
|
||||
|
|
|
@ -1137,7 +1137,7 @@ public class CrudIT extends ESRestHighLevelClientTestCase {
|
|||
}
|
||||
{
|
||||
// test _termvectors on real documents
|
||||
TermVectorsRequest tvRequest = new TermVectorsRequest(sourceIndex, "_doc", "1");
|
||||
TermVectorsRequest tvRequest = new TermVectorsRequest(sourceIndex, "1");
|
||||
tvRequest.setFields("field");
|
||||
TermVectorsResponse tvResponse = execute(tvRequest, highLevelClient()::termvectors, highLevelClient()::termvectorsAsync);
|
||||
|
||||
|
@ -1160,7 +1160,7 @@ public class CrudIT extends ESRestHighLevelClientTestCase {
|
|||
XContentBuilder docBuilder = XContentFactory.jsonBuilder();
|
||||
docBuilder.startObject().field("field", "valuex").endObject();
|
||||
|
||||
TermVectorsRequest tvRequest = new TermVectorsRequest(sourceIndex, "_doc", docBuilder);
|
||||
TermVectorsRequest tvRequest = new TermVectorsRequest(sourceIndex, docBuilder);
|
||||
TermVectorsResponse tvResponse = execute(tvRequest, highLevelClient()::termvectors, highLevelClient()::termvectorsAsync);
|
||||
|
||||
TermVectorsResponse.TermVector.Token expectedToken = new TermVectorsResponse.TermVector.Token(0, 6, 0, null);
|
||||
|
@ -1180,7 +1180,7 @@ public class CrudIT extends ESRestHighLevelClientTestCase {
|
|||
|
||||
// Not entirely sure if _termvectors belongs to CRUD, and in the absence of a better place, will have it here
|
||||
public void testTermvectorsWithNonExistentIndex() {
|
||||
TermVectorsRequest request = new TermVectorsRequest("non-existent", "non-existent", "non-existent");
|
||||
TermVectorsRequest request = new TermVectorsRequest("non-existent", "non-existent");
|
||||
|
||||
ElasticsearchException exception = expectThrows(ElasticsearchException.class,
|
||||
() -> execute(request, highLevelClient()::termvectors, highLevelClient()::termvectorsAsync));
|
||||
|
@ -1214,7 +1214,7 @@ public class CrudIT extends ESRestHighLevelClientTestCase {
|
|||
{
|
||||
// test _mtermvectors where MultiTermVectorsRequest is constructed with ids and a template
|
||||
String[] expectedIds = {"1", "2"};
|
||||
TermVectorsRequest tvRequestTemplate = new TermVectorsRequest(sourceIndex, "_doc", "fake_id");
|
||||
TermVectorsRequest tvRequestTemplate = new TermVectorsRequest(sourceIndex, "fake_id");
|
||||
tvRequestTemplate.setFields("field");
|
||||
MultiTermVectorsRequest mtvRequest = new MultiTermVectorsRequest(expectedIds, tvRequestTemplate);
|
||||
|
||||
|
@ -1233,13 +1233,13 @@ public class CrudIT extends ESRestHighLevelClientTestCase {
|
|||
{
|
||||
// test _mtermvectors where MultiTermVectorsRequest constructed with adding each separate request
|
||||
MultiTermVectorsRequest mtvRequest = new MultiTermVectorsRequest();
|
||||
TermVectorsRequest tvRequest1 = new TermVectorsRequest(sourceIndex, "_doc", "1");
|
||||
TermVectorsRequest tvRequest1 = new TermVectorsRequest(sourceIndex, "1");
|
||||
tvRequest1.setFields("field");
|
||||
mtvRequest.add(tvRequest1);
|
||||
|
||||
XContentBuilder docBuilder = XContentFactory.jsonBuilder();
|
||||
docBuilder.startObject().field("field", "valuex").endObject();
|
||||
TermVectorsRequest tvRequest2 = new TermVectorsRequest(sourceIndex, "_doc", docBuilder);
|
||||
TermVectorsRequest tvRequest2 = new TermVectorsRequest(sourceIndex, docBuilder);
|
||||
mtvRequest.add(tvRequest2);
|
||||
|
||||
MultiTermVectorsResponse mtvResponse =
|
||||
|
|
|
@ -53,11 +53,11 @@ import org.elasticsearch.action.support.master.AcknowledgedRequest;
|
|||
import org.elasticsearch.action.support.master.MasterNodeReadRequest;
|
||||
import org.elasticsearch.action.support.master.MasterNodeRequest;
|
||||
import org.elasticsearch.action.support.replication.ReplicationRequest;
|
||||
import org.elasticsearch.client.core.MultiTermVectorsRequest;
|
||||
import org.elasticsearch.client.core.TermVectorsRequest;
|
||||
import org.elasticsearch.action.update.UpdateRequest;
|
||||
import org.elasticsearch.client.RequestConverters.EndpointBuilder;
|
||||
import org.elasticsearch.client.core.CountRequest;
|
||||
import org.elasticsearch.client.core.MultiTermVectorsRequest;
|
||||
import org.elasticsearch.client.core.TermVectorsRequest;
|
||||
import org.elasticsearch.common.CheckedBiConsumer;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.bytes.BytesArray;
|
||||
|
@ -1266,9 +1266,9 @@ public class RequestConvertersTests extends ESTestCase {
|
|||
|
||||
public void testTermVectors() throws IOException {
|
||||
String index = randomAlphaOfLengthBetween(3, 10);
|
||||
String type = randomAlphaOfLengthBetween(3, 10);
|
||||
String id = randomAlphaOfLengthBetween(3, 10);
|
||||
TermVectorsRequest tvRequest = new TermVectorsRequest(index, type, id);
|
||||
|
||||
TermVectorsRequest tvRequest = new TermVectorsRequest(index, id);
|
||||
Map<String, String> expectedParams = new HashMap<>();
|
||||
String[] fields;
|
||||
if (randomBoolean()) {
|
||||
|
@ -1289,7 +1289,7 @@ public class RequestConvertersTests extends ESTestCase {
|
|||
|
||||
Request request = RequestConverters.termVectors(tvRequest);
|
||||
StringJoiner endpoint = new StringJoiner("/", "/", "");
|
||||
endpoint.add(index).add(type).add(id).add("_termvectors");
|
||||
endpoint.add(index).add("_termvectors").add(id);
|
||||
|
||||
assertEquals(HttpGet.METHOD_NAME, request.getMethod());
|
||||
assertEquals(endpoint.toString(), request.getEndpoint());
|
||||
|
@ -1304,13 +1304,27 @@ public class RequestConvertersTests extends ESTestCase {
|
|||
assertToXContentBody(tvRequest, request.getEntity());
|
||||
}
|
||||
|
||||
public void testTermVectorsWithType() throws IOException {
|
||||
String index = randomAlphaOfLengthBetween(3, 10);
|
||||
String type = randomAlphaOfLengthBetween(3, 10);
|
||||
String id = randomAlphaOfLengthBetween(3, 10);
|
||||
TermVectorsRequest tvRequest = new TermVectorsRequest(index, type, id);
|
||||
|
||||
Request request = RequestConverters.termVectors(tvRequest);
|
||||
StringJoiner endpoint = new StringJoiner("/", "/", "");
|
||||
endpoint.add(index).add(type).add(id).add("_termvectors");
|
||||
|
||||
assertEquals(HttpGet.METHOD_NAME, request.getMethod());
|
||||
assertEquals(endpoint.toString(), request.getEndpoint());
|
||||
}
|
||||
|
||||
public void testMultiTermVectors() throws IOException {
|
||||
MultiTermVectorsRequest mtvRequest = new MultiTermVectorsRequest();
|
||||
|
||||
int numberOfRequests = randomIntBetween(0, 5);
|
||||
for (int i = 0; i < numberOfRequests; i++) {
|
||||
String index = randomAlphaOfLengthBetween(3, 10);
|
||||
String type = randomAlphaOfLengthBetween(3, 10);
|
||||
String type = randomBoolean() ? null : randomAlphaOfLengthBetween(3, 10);
|
||||
String id = randomAlphaOfLengthBetween(3, 10);
|
||||
TermVectorsRequest tvRequest = new TermVectorsRequest(index, type, id);
|
||||
String[] fields = generateRandomStringArray(10, 5, false, false);
|
||||
|
|
|
@ -1558,18 +1558,16 @@ public class CRUDDocumentationIT extends ESRestHighLevelClientTestCase {
|
|||
|
||||
{
|
||||
// tag::term-vectors-request
|
||||
TermVectorsRequest request = new TermVectorsRequest("authors", "_doc", "1");
|
||||
TermVectorsRequest request = new TermVectorsRequest("authors", "1");
|
||||
request.setFields("user");
|
||||
// end::term-vectors-request
|
||||
}
|
||||
|
||||
{
|
||||
// tag::term-vectors-request-artificial
|
||||
|
||||
XContentBuilder docBuilder = XContentFactory.jsonBuilder();
|
||||
docBuilder.startObject().field("user", "guest-user").endObject();
|
||||
TermVectorsRequest request = new TermVectorsRequest("authors",
|
||||
"_doc",
|
||||
docBuilder); // <1>
|
||||
// end::term-vectors-request-artificial
|
||||
|
||||
|
@ -1600,7 +1598,7 @@ public class CRUDDocumentationIT extends ESRestHighLevelClientTestCase {
|
|||
// end::term-vectors-request-optional-arguments
|
||||
}
|
||||
|
||||
TermVectorsRequest request = new TermVectorsRequest("authors", "_doc", "1");
|
||||
TermVectorsRequest request = new TermVectorsRequest("authors", "1");
|
||||
request.setFields("user");
|
||||
|
||||
// tag::term-vectors-execute
|
||||
|
@ -1687,21 +1685,21 @@ public class CRUDDocumentationIT extends ESRestHighLevelClientTestCase {
|
|||
// tag::multi-term-vectors-request
|
||||
MultiTermVectorsRequest request = new MultiTermVectorsRequest(); // <1>
|
||||
TermVectorsRequest tvrequest1 =
|
||||
new TermVectorsRequest("authors", "_doc", "1");
|
||||
new TermVectorsRequest("authors", "1");
|
||||
tvrequest1.setFields("user");
|
||||
request.add(tvrequest1); // <2>
|
||||
|
||||
XContentBuilder docBuilder = XContentFactory.jsonBuilder();
|
||||
docBuilder.startObject().field("user", "guest-user").endObject();
|
||||
TermVectorsRequest tvrequest2 =
|
||||
new TermVectorsRequest("authors", "_doc", docBuilder);
|
||||
new TermVectorsRequest("authors", docBuilder);
|
||||
request.add(tvrequest2); // <3>
|
||||
// end::multi-term-vectors-request
|
||||
}
|
||||
|
||||
// tag::multi-term-vectors-request-template
|
||||
TermVectorsRequest tvrequestTemplate =
|
||||
new TermVectorsRequest("authors", "_doc", "fake_id"); // <1>
|
||||
new TermVectorsRequest("authors", "fake_id"); // <1>
|
||||
tvrequestTemplate.setFields("user");
|
||||
String[] ids = {"1", "2"};
|
||||
MultiTermVectorsRequest request =
|
||||
|
|
|
@ -0,0 +1,106 @@
|
|||
import org.elasticsearch.gradle.BuildPlugin
|
||||
import org.elasticsearch.gradle.LoggedExec
|
||||
import org.elasticsearch.gradle.MavenFilteringHack
|
||||
import org.elasticsearch.gradle.VersionProperties
|
||||
|
||||
apply plugin: 'base'
|
||||
|
||||
configurations {
|
||||
dockerPlugins
|
||||
dockerSource
|
||||
ossDockerSource
|
||||
}
|
||||
|
||||
dependencies {
|
||||
dockerPlugins project(path: ":plugins:ingest-geoip", configuration: 'zip')
|
||||
dockerPlugins project(path: ":plugins:ingest-user-agent", configuration: 'zip')
|
||||
dockerSource project(path: ":distribution:archives:tar")
|
||||
ossDockerSource project(path: ":distribution:archives:oss-tar")
|
||||
}
|
||||
|
||||
ext.expansions = { oss ->
|
||||
return [
|
||||
'elasticsearch' : oss ? "elasticsearch-oss-${VersionProperties.elasticsearch}.tar.gz" : "elasticsearch-${VersionProperties.elasticsearch}.tar.gz",
|
||||
'jdkUrl' : 'https://download.java.net/java/GA/jdk11/13/GPL/openjdk-11.0.1_linux-x64_bin.tar.gz',
|
||||
'jdkVersion' : '11.0.1',
|
||||
'license': oss ? 'Apache-2.0' : 'Elastic License',
|
||||
'ingest-geoip' : "ingest-geoip-${VersionProperties.elasticsearch}.zip",
|
||||
'ingest-user-agent' : "ingest-user-agent-${VersionProperties.elasticsearch}.zip",
|
||||
'version' : VersionProperties.elasticsearch
|
||||
]
|
||||
}
|
||||
|
||||
private static String files(final boolean oss) {
|
||||
return "build/${ oss ? 'oss-' : ''}docker"
|
||||
}
|
||||
|
||||
private static String taskName(final String prefix, final boolean oss, final String suffix) {
|
||||
return "${prefix}${oss ? 'Oss' : ''}${suffix}"
|
||||
}
|
||||
|
||||
void addCopyDockerContextTask(final boolean oss) {
|
||||
task(taskName("copy", oss, "DockerContext"), type: Sync) {
|
||||
into files(oss)
|
||||
|
||||
into('bin') {
|
||||
from 'src/docker/bin'
|
||||
}
|
||||
|
||||
into('config') {
|
||||
from 'src/docker/config'
|
||||
}
|
||||
|
||||
if (oss) {
|
||||
from configurations.ossDockerSource
|
||||
} else {
|
||||
from configurations.dockerSource
|
||||
}
|
||||
|
||||
from configurations.dockerPlugins
|
||||
}
|
||||
}
|
||||
|
||||
void addCopyDockerfileTask(final boolean oss) {
|
||||
task(taskName("copy", oss, "Dockerfile"), type: Copy) {
|
||||
mustRunAfter(taskName("copy", oss, "DockerContext"))
|
||||
into files(oss)
|
||||
|
||||
from('src/docker/Dockerfile') {
|
||||
MavenFilteringHack.filter(it, expansions(oss))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void addBuildDockerImage(final boolean oss) {
|
||||
final Task buildDockerImageTask = task(taskName("build", oss, "DockerImage"), type: LoggedExec) {
|
||||
dependsOn taskName("copy", oss, "DockerContext")
|
||||
dependsOn taskName("copy", oss, "Dockerfile")
|
||||
List<String> tags
|
||||
if (oss) {
|
||||
tags = [ "docker.elastic.co/elasticsearch/elasticsearch-oss:${VersionProperties.elasticsearch}" ]
|
||||
} else {
|
||||
tags = [
|
||||
"elasticsearch:${VersionProperties.elasticsearch}",
|
||||
"docker.elastic.co/elasticsearch/elasticsearch:${VersionProperties.elasticsearch}",
|
||||
"docker.elastic.co/elasticsearch/elasticsearch-full:${VersionProperties.elasticsearch}"
|
||||
]
|
||||
}
|
||||
executable 'docker'
|
||||
final List<String> dockerArgs = ['build', files(oss), '--pull']
|
||||
for (final String tag : tags) {
|
||||
dockerArgs.add('--tag')
|
||||
dockerArgs.add(tag)
|
||||
}
|
||||
args dockerArgs.toArray()
|
||||
}
|
||||
BuildPlugin.requireDocker(buildDockerImageTask)
|
||||
}
|
||||
|
||||
for (final boolean oss : [false, true]) {
|
||||
addCopyDockerContextTask(oss)
|
||||
addCopyDockerfileTask(oss)
|
||||
addBuildDockerImage(oss)
|
||||
}
|
||||
|
||||
assemble.dependsOn "buildOssDockerImage"
|
||||
assemble.dependsOn "buildDockerImage"
|
|
@ -0,0 +1,92 @@
|
|||
################################################################################
|
||||
# This Dockerfile was generated from the template at distribution/src/docker/Dockerfile
|
||||
#
|
||||
# Beginning of multi stage Dockerfile
|
||||
################################################################################
|
||||
|
||||
################################################################################
|
||||
# Build stage 0 `builder`:
|
||||
# Extract elasticsearch artifact
|
||||
# Install required plugins
|
||||
# Set gid=0 and make group perms==owner perms
|
||||
################################################################################
|
||||
|
||||
FROM centos:7 AS builder
|
||||
|
||||
ENV PATH /usr/share/elasticsearch/bin:$PATH
|
||||
ENV JAVA_HOME /opt/jdk-${jdkVersion}
|
||||
|
||||
RUN curl -s ${jdkUrl} | tar -C /opt -zxf -
|
||||
|
||||
# Replace OpenJDK's built-in CA certificate keystore with the one from the OS
|
||||
# vendor. The latter is superior in several ways.
|
||||
# REF: https://github.com/elastic/elasticsearch-docker/issues/171
|
||||
RUN ln -sf /etc/pki/ca-trust/extracted/java/cacerts /opt/jdk-${jdkVersion}/lib/security/cacerts
|
||||
|
||||
RUN yum install -y unzip which
|
||||
|
||||
RUN groupadd -g 1000 elasticsearch && \
|
||||
adduser -u 1000 -g 1000 -d /usr/share/elasticsearch elasticsearch
|
||||
|
||||
WORKDIR /usr/share/elasticsearch
|
||||
|
||||
COPY ${elasticsearch} ${ingest-geoip} ${ingest-user-agent} /opt/
|
||||
RUN tar zxf /opt/${elasticsearch} --strip-components=1
|
||||
RUN elasticsearch-plugin install --batch file:///opt/${ingest-geoip}
|
||||
RUN elasticsearch-plugin install --batch file:///opt/${ingest-user-agent}
|
||||
RUN mkdir -p config data logs
|
||||
RUN chmod 0775 config data logs
|
||||
COPY config/elasticsearch.yml config/log4j2.properties config/
|
||||
|
||||
|
||||
################################################################################
|
||||
# Build stage 1 (the actual elasticsearch image):
|
||||
# Copy elasticsearch from stage 0
|
||||
# Add entrypoint
|
||||
################################################################################
|
||||
|
||||
FROM centos:7
|
||||
|
||||
ENV ELASTIC_CONTAINER true
|
||||
ENV JAVA_HOME /opt/jdk-${jdkVersion}
|
||||
|
||||
COPY --from=builder /opt/jdk-${jdkVersion} /opt/jdk-${jdkVersion}
|
||||
|
||||
RUN yum update -y && \
|
||||
yum install -y nc unzip wget which && \
|
||||
yum clean all
|
||||
|
||||
RUN groupadd -g 1000 elasticsearch && \
|
||||
adduser -u 1000 -g 1000 -G 0 -d /usr/share/elasticsearch elasticsearch && \
|
||||
chmod 0775 /usr/share/elasticsearch && \
|
||||
chgrp 0 /usr/share/elasticsearch
|
||||
|
||||
WORKDIR /usr/share/elasticsearch
|
||||
COPY --from=builder --chown=1000:0 /usr/share/elasticsearch /usr/share/elasticsearch
|
||||
ENV PATH /usr/share/elasticsearch/bin:$PATH
|
||||
|
||||
COPY --chown=1000:0 bin/docker-entrypoint.sh /usr/local/bin/docker-entrypoint.sh
|
||||
|
||||
# Openshift overrides USER and uses ones with randomly uid>1024 and gid=0
|
||||
# Allow ENTRYPOINT (and ES) to run even with a different user
|
||||
RUN chgrp 0 /usr/local/bin/docker-entrypoint.sh && \
|
||||
chmod g=u /etc/passwd && \
|
||||
chmod 0775 /usr/local/bin/docker-entrypoint.sh
|
||||
|
||||
EXPOSE 9200 9300
|
||||
|
||||
LABEL org.label-schema.schema-version="1.0" \
|
||||
org.label-schema.vendor="Elastic" \
|
||||
org.label-schema.name="elasticsearch" \
|
||||
org.label-schema.version="${version}" \
|
||||
org.label-schema.url="https://www.elastic.co/products/elasticsearch" \
|
||||
org.label-schema.vcs-url="https://github.com/elastic/elasticsearch" \
|
||||
license="${license}"
|
||||
|
||||
ENTRYPOINT ["/usr/local/bin/docker-entrypoint.sh"]
|
||||
# Dummy overridable parameter parsed by entrypoint
|
||||
CMD ["eswrapper"]
|
||||
|
||||
################################################################################
|
||||
# End of multi-stage Dockerfile
|
||||
################################################################################
|
|
@ -0,0 +1,100 @@
|
|||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
# Files created by Elasticsearch should always be group writable too
|
||||
umask 0002
|
||||
|
||||
run_as_other_user_if_needed() {
|
||||
if [[ "$(id -u)" == "0" ]]; then
|
||||
# If running as root, drop to specified UID and run command
|
||||
exec chroot --userspec=1000 / "${@}"
|
||||
else
|
||||
# Either we are running in Openshift with random uid and are a member of the root group
|
||||
# or with a custom --user
|
||||
exec "${@}"
|
||||
fi
|
||||
}
|
||||
|
||||
# Allow user specify custom CMD, maybe bin/elasticsearch itself
|
||||
# for example to directly specify `-E` style parameters for elasticsearch on k8s
|
||||
# or simply to run /bin/bash to check the image
|
||||
if [[ "$1" != "eswrapper" ]]; then
|
||||
if [[ "$(id -u)" == "0" && $(basename "$1") == "elasticsearch" ]]; then
|
||||
# centos:7 chroot doesn't have the `--skip-chdir` option and
|
||||
# changes our CWD.
|
||||
# Rewrite CMD args to replace $1 with `elasticsearch` explicitly,
|
||||
# so that we are backwards compatible with the docs
|
||||
# from the previous Elasticsearch versions<6
|
||||
# and configuration option D:
|
||||
# https://www.elastic.co/guide/en/elasticsearch/reference/5.6/docker.html#_d_override_the_image_8217_s_default_ulink_url_https_docs_docker_com_engine_reference_run_cmd_default_command_or_options_cmd_ulink
|
||||
# Without this, user could specify `elasticsearch -E x.y=z` but
|
||||
# `bin/elasticsearch -E x.y=z` would not work.
|
||||
set -- "elasticsearch" "${@:2}"
|
||||
# Use chroot to switch to UID 1000
|
||||
exec chroot --userspec=1000 / "$@"
|
||||
else
|
||||
# User probably wants to run something else, like /bin/bash, with another uid forced (Openshift?)
|
||||
exec "$@"
|
||||
fi
|
||||
fi
|
||||
|
||||
# Parse Docker env vars to customize Elasticsearch
|
||||
#
|
||||
# e.g. Setting the env var cluster.name=testcluster
|
||||
#
|
||||
# will cause Elasticsearch to be invoked with -Ecluster.name=testcluster
|
||||
#
|
||||
# see https://www.elastic.co/guide/en/elasticsearch/reference/current/settings.html#_setting_default_settings
|
||||
|
||||
declare -a es_opts
|
||||
|
||||
while IFS='=' read -r envvar_key envvar_value
|
||||
do
|
||||
# Elasticsearch settings need to have at least two dot separated lowercase
|
||||
# words, e.g. `cluster.name`, except for `processors` which we handle
|
||||
# specially
|
||||
if [[ "$envvar_key" =~ ^[a-z0-9_]+\.[a-z0-9_]+ || "$envvar_key" == "processors" ]]; then
|
||||
if [[ ! -z $envvar_value ]]; then
|
||||
es_opt="-E${envvar_key}=${envvar_value}"
|
||||
es_opts+=("${es_opt}")
|
||||
fi
|
||||
fi
|
||||
done < <(env)
|
||||
|
||||
# The virtual file /proc/self/cgroup should list the current cgroup
|
||||
# membership. For each hierarchy, you can follow the cgroup path from
|
||||
# this file to the cgroup filesystem (usually /sys/fs/cgroup/) and
|
||||
# introspect the statistics for the cgroup for the given
|
||||
# hierarchy. Alas, Docker breaks this by mounting the container
|
||||
# statistics at the root while leaving the cgroup paths as the actual
|
||||
# paths. Therefore, Elasticsearch provides a mechanism to override
|
||||
# reading the cgroup path from /proc/self/cgroup and instead uses the
|
||||
# cgroup path defined the JVM system property
|
||||
# es.cgroups.hierarchy.override. Therefore, we set this value here so
|
||||
# that cgroup statistics are available for the container this process
|
||||
# will run in.
|
||||
export ES_JAVA_OPTS="-Des.cgroups.hierarchy.override=/ $ES_JAVA_OPTS"
|
||||
|
||||
if [[ -d bin/x-pack ]]; then
|
||||
# Check for the ELASTIC_PASSWORD environment variable to set the
|
||||
# bootstrap password for Security.
|
||||
#
|
||||
# This is only required for the first node in a cluster with Security
|
||||
# enabled, but we have no way of knowing which node we are yet. We'll just
|
||||
# honor the variable if it's present.
|
||||
if [[ -n "$ELASTIC_PASSWORD" ]]; then
|
||||
[[ -f /usr/share/elasticsearch/config/elasticsearch.keystore ]] || (run_as_other_user_if_needed elasticsearch-keystore create)
|
||||
if ! (run_as_other_user_if_needed elasticsearch-keystore list | grep -q '^bootstrap.password$'); then
|
||||
(run_as_other_user_if_needed echo "$ELASTIC_PASSWORD" | elasticsearch-keystore add -x 'bootstrap.password')
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
if [[ "$(id -u)" == "0" ]]; then
|
||||
# If requested and running as root, mutate the ownership of bind-mounts
|
||||
if [[ -n "$TAKE_FILE_OWNERSHIP" ]]; then
|
||||
chown -R 1000:0 /usr/share/elasticsearch/{data,logs}
|
||||
fi
|
||||
fi
|
||||
|
||||
run_as_other_user_if_needed /usr/share/elasticsearch/bin/elasticsearch "${es_opts[@]}"
|
|
@ -0,0 +1,2 @@
|
|||
cluster.name: "docker-cluster"
|
||||
network.host: 0.0.0.0
|
|
@ -0,0 +1,9 @@
|
|||
status = error
|
||||
|
||||
appender.console.type = Console
|
||||
appender.console.name = console
|
||||
appender.console.layout.type = PatternLayout
|
||||
appender.console.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] [%node_name]%marker %m%n
|
||||
|
||||
rootLogger.level = info
|
||||
rootLogger.appenderRef.console.ref = console
|
|
@ -27,7 +27,7 @@ include-tagged::{doc-tests-file}[{api}-request]
|
|||
|
||||
|
||||
The second way can be used when all term vectors requests share the same
|
||||
arguments, such as index, type, and other settings. In this case, a template
|
||||
arguments, such as index and other settings. In this case, a template
|
||||
+{tvrequest}+ can be created with all necessary settings set, and
|
||||
this template request can be passed to +{request}+ along with all
|
||||
documents' ids for which to execute these requests.
|
||||
|
|
|
@ -42,4 +42,26 @@ The number of soft deletes to retain. Soft deletes are collected during merges
|
|||
on the underlying Lucene index yet retained up to the number of operations
|
||||
configured by this setting. The default value is `0`.
|
||||
|
||||
For more information about index settings, see {ref}/index-modules.html[Index modules].
|
||||
For more information about index settings, see {ref}/index-modules.html[Index modules].
|
||||
|
||||
[float]
|
||||
[[ccr-overview-beats]]
|
||||
==== Setting soft deletes on indices created by APM Server or Beats
|
||||
|
||||
If you want to replicate indices created by APM Server or Beats, and are
|
||||
allowing APM Server or Beats to manage index templates, you need to enable
|
||||
soft deletes on the underlying index templates. To enable soft deletes on the
|
||||
underlying index templates, add the following changes to the relevant APM Server
|
||||
or Beats configuration file.
|
||||
|
||||
["source","yaml"]
|
||||
----------------------------------------------------------------------
|
||||
setup.template.overwrite: true
|
||||
setup.template.settings:
|
||||
index.soft_deletes.enabled: true
|
||||
index.soft_deletes.retention.operations: 1024
|
||||
----------------------------------------------------------------------
|
||||
|
||||
For additional information on controlling the index templates managed by APM
|
||||
Server or Beats, see the relevant documentation on loading the Elasticsearch
|
||||
index template.
|
||||
|
|
|
@ -2,8 +2,8 @@
|
|||
== Multi termvectors API
|
||||
|
||||
Multi termvectors API allows to get multiple termvectors at once. The
|
||||
documents from which to retrieve the term vectors are specified by an index,
|
||||
type and id. But the documents could also be artificially provided in the request itself.
|
||||
documents from which to retrieve the term vectors are specified by an index and id.
|
||||
But the documents could also be artificially provided in the request itself.
|
||||
|
||||
The response includes a `docs`
|
||||
array with all the fetched termvectors, each element having the structure
|
||||
|
@ -17,13 +17,11 @@ POST /_mtermvectors
|
|||
"docs": [
|
||||
{
|
||||
"_index": "twitter",
|
||||
"_type": "_doc",
|
||||
"_id": "2",
|
||||
"term_statistics": true
|
||||
},
|
||||
{
|
||||
"_index": "twitter",
|
||||
"_type": "_doc",
|
||||
"_id": "1",
|
||||
"fields": [
|
||||
"message"
|
||||
|
@ -43,31 +41,6 @@ is not required in the body):
|
|||
[source,js]
|
||||
--------------------------------------------------
|
||||
POST /twitter/_mtermvectors
|
||||
{
|
||||
"docs": [
|
||||
{
|
||||
"_type": "_doc",
|
||||
"_id": "2",
|
||||
"fields": [
|
||||
"message"
|
||||
],
|
||||
"term_statistics": true
|
||||
},
|
||||
{
|
||||
"_type": "_doc",
|
||||
"_id": "1"
|
||||
}
|
||||
]
|
||||
}
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
// TEST[setup:twitter]
|
||||
|
||||
And type:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
POST /twitter/_doc/_mtermvectors
|
||||
{
|
||||
"docs": [
|
||||
{
|
||||
|
@ -86,11 +59,11 @@ POST /twitter/_doc/_mtermvectors
|
|||
// CONSOLE
|
||||
// TEST[setup:twitter]
|
||||
|
||||
If all requested documents are on same index and have same type and also the parameters are the same, the request can be simplified:
|
||||
If all requested documents are on same index and also the parameters are the same, the request can be simplified:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
POST /twitter/_doc/_mtermvectors
|
||||
POST /twitter/_mtermvectors
|
||||
{
|
||||
"ids" : ["1", "2"],
|
||||
"parameters": {
|
||||
|
@ -105,8 +78,8 @@ POST /twitter/_doc/_mtermvectors
|
|||
// TEST[setup:twitter]
|
||||
|
||||
Additionally, just like for the <<docs-termvectors,termvectors>>
|
||||
API, term vectors could be generated for user provided documents. The mapping used is
|
||||
determined by `_index` and `_type`.
|
||||
API, term vectors could be generated for user provided documents.
|
||||
The mapping used is determined by `_index`.
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
|
@ -115,7 +88,6 @@ POST /_mtermvectors
|
|||
"docs": [
|
||||
{
|
||||
"_index": "twitter",
|
||||
"_type": "_doc",
|
||||
"doc" : {
|
||||
"user" : "John Doe",
|
||||
"message" : "twitter test test test"
|
||||
|
@ -123,7 +95,6 @@ POST /_mtermvectors
|
|||
},
|
||||
{
|
||||
"_index": "twitter",
|
||||
"_type": "_doc",
|
||||
"doc" : {
|
||||
"user" : "Jane Doe",
|
||||
"message" : "Another twitter test ..."
|
||||
|
|
|
@ -8,7 +8,7 @@ realtime. This can be changed by setting `realtime` parameter to `false`.
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
GET /twitter/_doc/1/_termvectors
|
||||
GET /twitter/_termvectors/1
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
// TEST[setup:twitter]
|
||||
|
@ -18,7 +18,7 @@ retrieved either with a parameter in the url
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
GET /twitter/_doc/1/_termvectors?fields=message
|
||||
GET /twitter/_termvectors/1?fields=message
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
// TEST[setup:twitter]
|
||||
|
@ -189,7 +189,7 @@ The following request returns all information and statistics for field
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
GET /twitter/_doc/1/_termvectors
|
||||
GET /twitter/_termvectors/1
|
||||
{
|
||||
"fields" : ["text"],
|
||||
"offsets" : true,
|
||||
|
@ -277,7 +277,7 @@ Note that for the field `text`, the terms are not re-generated.
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
GET /twitter/_doc/1/_termvectors
|
||||
GET /twitter/_termvectors/1
|
||||
{
|
||||
"fields" : ["text", "some_field_without_term_vectors"],
|
||||
"offsets" : true,
|
||||
|
@ -295,15 +295,14 @@ GET /twitter/_doc/1/_termvectors
|
|||
|
||||
Term vectors can also be generated for artificial documents,
|
||||
that is for documents not present in the index. For example, the following request would
|
||||
return the same results as in example 1. The mapping used is determined by the
|
||||
`index` and `type`.
|
||||
return the same results as in example 1. The mapping used is determined by the `index`.
|
||||
|
||||
*If dynamic mapping is turned on (default), the document fields not in the original
|
||||
mapping will be dynamically created.*
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
GET /twitter/_doc/_termvectors
|
||||
GET /twitter/_termvectors
|
||||
{
|
||||
"doc" : {
|
||||
"fullname" : "John Doe",
|
||||
|
@ -326,7 +325,7 @@ vectors, the term vectors will be re-generated.
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
GET /twitter/_doc/_termvectors
|
||||
GET /twitter/_termvectors
|
||||
{
|
||||
"doc" : {
|
||||
"fullname" : "John Doe",
|
||||
|
@ -393,7 +392,7 @@ their tf-idf must be too low.
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
GET /imdb/_doc/_termvectors
|
||||
GET /imdb/_termvectors
|
||||
{
|
||||
"doc": {
|
||||
"plot": "When wealthy industrialist Tony Stark is forced to build an armored suit after a life-threatening incident, he ultimately decides to use its technology to fight against evil."
|
||||
|
|
|
@ -16,6 +16,7 @@ Here are a few sample use-cases that Elasticsearch could be used for:
|
|||
For the rest of this tutorial, you will be guided through the process of getting Elasticsearch up and running, taking a peek inside it, and performing basic operations like indexing, searching, and modifying your data. At the end of this tutorial, you should have a good idea of what Elasticsearch is, how it works, and hopefully be inspired to see how you can use it to either build sophisticated search applications or to mine intelligence from your data.
|
||||
--
|
||||
|
||||
[[getting-started-concepts]]
|
||||
== Basic Concepts
|
||||
|
||||
There are a few concepts that are core to Elasticsearch. Understanding these concepts from the outset will tremendously help ease the learning process.
|
||||
|
@ -103,6 +104,7 @@ You can monitor shard sizes using the {ref}/cat-shards.html[`_cat/shards`] API.
|
|||
|
||||
With that out of the way, let's get started with the fun part...
|
||||
|
||||
[[getting-started-install]]
|
||||
== Installation
|
||||
|
||||
[TIP]
|
||||
|
@ -266,6 +268,7 @@ As mentioned previously, we can override either the cluster or node name. This c
|
|||
|
||||
Also note the line marked http with information about the HTTP address (`192.168.8.112`) and port (`9200`) that our node is reachable from. By default, Elasticsearch uses port `9200` to provide access to its REST API. This port is configurable if necessary.
|
||||
|
||||
[[getting-started-explore]]
|
||||
== Exploring Your Cluster
|
||||
|
||||
[float]
|
||||
|
@ -278,6 +281,7 @@ Now that we have our node (and cluster) up and running, the next step is to unde
|
|||
* Perform CRUD (Create, Read, Update, and Delete) and search operations against your indexes
|
||||
* Execute advanced search operations such as paging, sorting, filtering, scripting, aggregations, and many others
|
||||
|
||||
[[getting-started-cluster-health]]
|
||||
=== Cluster Health
|
||||
|
||||
Let's start with a basic health check, which we can use to see how our cluster is doing. We'll be using curl to do this but you can use any tool that allows you to make HTTP/REST calls. Let's assume that we are still on the same node where we started Elasticsearch on and open another command shell window.
|
||||
|
@ -336,6 +340,7 @@ ip heap.percent ram.percent cpu load_1m load_5m load_15m node.role master
|
|||
|
||||
Here, we can see our one node named "PB2SGZY", which is the single node that is currently in our cluster.
|
||||
|
||||
[[getting-started-list-indices]]
|
||||
=== List All Indices
|
||||
|
||||
Now let's take a peek at our indices:
|
||||
|
@ -356,6 +361,7 @@ health status index uuid pri rep docs.count docs.deleted store.size pri.store.si
|
|||
|
||||
Which simply means we have no indices yet in the cluster.
|
||||
|
||||
[[getting-started-create-index]]
|
||||
=== Create an Index
|
||||
|
||||
Now let's create an index named "customer" and then list all the indexes again:
|
||||
|
@ -382,6 +388,7 @@ The results of the second command tells us that we now have one index named cust
|
|||
|
||||
You might also notice that the customer index has a yellow health tagged to it. Recall from our previous discussion that yellow means that some replicas are not (yet) allocated. The reason this happens for this index is because Elasticsearch by default created one replica for this index. Since we only have one node running at the moment, that one replica cannot yet be allocated (for high availability) until a later point in time when another node joins the cluster. Once that replica gets allocated onto a second node, the health status for this index will turn to green.
|
||||
|
||||
[[getting-started-query-document]]
|
||||
=== Index and Query a Document
|
||||
|
||||
Let's now put something into our customer index. We'll index a simple customer document into the customer index, with an ID of 1 as follows:
|
||||
|
@ -446,6 +453,7 @@ And the response:
|
|||
|
||||
Nothing out of the ordinary here other than a field, `found`, stating that we found a document with the requested ID 1 and another field, `_source`, which returns the full JSON document that we indexed from the previous step.
|
||||
|
||||
[[getting-started-delete-index]]
|
||||
=== Delete an Index
|
||||
|
||||
Now let's delete the index that we just created and then list all the indexes again:
|
||||
|
@ -492,6 +500,7 @@ If we study the above commands carefully, we can actually see a pattern of how w
|
|||
|
||||
This REST access pattern is so pervasive throughout all the API commands that if you can simply remember it, you will have a good head start at mastering Elasticsearch.
|
||||
|
||||
[[getting-started-modify-data]]
|
||||
== Modifying Your Data
|
||||
|
||||
Elasticsearch provides data manipulation and search capabilities in near real time. By default, you can expect a one second delay (refresh interval) from the time you index/update/delete your data until the time that it appears in your search results. This is an important distinction from other platforms like SQL wherein data is immediately available after a transaction is completed.
|
||||
|
@ -552,6 +561,7 @@ POST /customer/_doc?pretty
|
|||
|
||||
Note that in the above case, we are using the `POST` verb instead of PUT since we didn't specify an ID.
|
||||
|
||||
[[getting-started-update-documents]]
|
||||
=== Updating Documents
|
||||
|
||||
In addition to being able to index and replace documents, we can also update documents. Note though that Elasticsearch does not actually do in-place updates under the hood. Whenever we do an update, Elasticsearch deletes the old document and then indexes a new document with the update applied to it in one shot.
|
||||
|
@ -596,6 +606,7 @@ In the above example, `ctx._source` refers to the current source document that i
|
|||
|
||||
Elasticsearch provides the ability to update multiple documents given a query condition (like an `SQL UPDATE-WHERE` statement). See {ref}/docs-update-by-query.html[`docs-update-by-query` API]
|
||||
|
||||
[[getting-started-delete-documents]]
|
||||
=== Deleting Documents
|
||||
|
||||
Deleting a document is fairly straightforward. This example shows how to delete our previous customer with the ID of 2:
|
||||
|
@ -611,6 +622,7 @@ See the {ref}/docs-delete-by-query.html[`_delete_by_query` API] to delete all do
|
|||
It is worth noting that it is much more efficient to delete a whole index
|
||||
instead of deleting all documents with the Delete By Query API.
|
||||
|
||||
[[getting-started-batch-processing]]
|
||||
=== Batch Processing
|
||||
|
||||
In addition to being able to index, update, and delete individual documents, Elasticsearch also provides the ability to perform any of the above operations in batches using the {ref}/docs-bulk.html[`_bulk` API]. This functionality is important in that it provides a very efficient mechanism to do multiple operations as fast as possible with as few network roundtrips as possible.
|
||||
|
@ -643,6 +655,7 @@ Note above that for the delete action, there is no corresponding source document
|
|||
|
||||
The Bulk API does not fail due to failures in one of the actions. If a single action fails for whatever reason, it will continue to process the remainder of the actions after it. When the bulk API returns, it will provide a status for each action (in the same order it was sent in) so that you can check if a specific action failed or not.
|
||||
|
||||
[[getting-started-explore-data]]
|
||||
== Exploring Your Data
|
||||
|
||||
[float]
|
||||
|
@ -706,6 +719,7 @@ yellow open bank l7sSYV2cQXmu6_4rJWVIww 5 1 1000 0 12
|
|||
|
||||
Which means that we just successfully bulk indexed 1000 documents into the bank index (under the `_doc` type).
|
||||
|
||||
[[getting-started-search-API]]
|
||||
=== The Search API
|
||||
|
||||
Now let's start with some simple searches. There are two basic ways to run searches: one is by sending search parameters through the {ref}/search-uri-request.html[REST request URI] and the other by sending them through the {ref}/search-request-body.html[REST request body]. The request body method allows you to be more expressive and also to define your searches in a more readable JSON format. We'll try one example of the request URI method but for the remainder of this tutorial, we will exclusively be using the request body method.
|
||||
|
@ -843,6 +857,7 @@ to clutter the docs with it:
|
|||
|
||||
It is important to understand that once you get your search results back, Elasticsearch is completely done with the request and does not maintain any kind of server-side resources or open cursors into your results. This is in stark contrast to many other platforms such as SQL wherein you may initially get a partial subset of your query results up-front and then you have to continuously go back to the server if you want to fetch (or page through) the rest of the results using some kind of stateful server-side cursor.
|
||||
|
||||
[[getting-started-query-lang]]
|
||||
=== Introducing the Query Language
|
||||
|
||||
Elasticsearch provides a JSON-style domain-specific language that you can use to execute queries. This is referred to as the {ref}/query-dsl.html[Query DSL]. The query language is quite comprehensive and can be intimidating at first glance but the best way to actually learn it is to start with a few basic examples.
|
||||
|
@ -907,6 +922,7 @@ GET /bank/_search
|
|||
// CONSOLE
|
||||
// TEST[continued]
|
||||
|
||||
[[getting-started-search]]
|
||||
=== Executing Searches
|
||||
|
||||
Now that we have seen a few of the basic search parameters, let's dig in some more into the Query DSL. Let's first take a look at the returned document fields. By default, the full JSON document is returned as part of all searches. This is referred to as the source (`_source` field in the search hits). If we don't want the entire source document returned, we have the ability to request only a few fields from within source to be returned.
|
||||
|
@ -1066,6 +1082,7 @@ GET /bank/_search
|
|||
// CONSOLE
|
||||
// TEST[continued]
|
||||
|
||||
[[getting-started-filters]]
|
||||
=== Executing Filters
|
||||
|
||||
In the previous section, we skipped over a little detail called the document score (`_score` field in the search results). The score is a numeric value that is a relative measure of how well the document matches the search query that we specified. The higher the score, the more relevant the document is, the lower the score, the less relevant the document is.
|
||||
|
@ -1102,6 +1119,7 @@ Dissecting the above, the bool query contains a `match_all` query (the query par
|
|||
|
||||
In addition to the `match_all`, `match`, `bool`, and `range` queries, there are a lot of other query types that are available and we won't go into them here. Since we already have a basic understanding of how they work, it shouldn't be too difficult to apply this knowledge in learning and experimenting with the other query types.
|
||||
|
||||
[[getting-started-aggregations]]
|
||||
=== Executing Aggregations
|
||||
|
||||
Aggregations provide the ability to group and extract statistics from your data. The easiest way to think about aggregations is by roughly equating it to the SQL GROUP BY and the SQL aggregate functions. In Elasticsearch, you have the ability to execute searches returning hits and at the same time return aggregated results separate from the hits all in one response. This is very powerful and efficient in the sense that you can run queries and multiple aggregations and get the results back of both (or either) operations in one shot avoiding network roundtrips using a concise and simplified API.
|
||||
|
@ -1305,6 +1323,7 @@ GET /bank/_search
|
|||
|
||||
There are many other aggregations capabilities that we won't go into detail here. The {ref}/search-aggregations.html[aggregations reference guide] is a great starting point if you want to do further experimentation.
|
||||
|
||||
[[getting-started-conclusion]]
|
||||
== Conclusion
|
||||
|
||||
Elasticsearch is both a simple and complex product. We've so far learned the basics of what it is, how to look inside of it, and how to work with it using some of the REST APIs. Hopefully this tutorial has given you a better understanding of what Elasticsearch is and more importantly, inspired you to further experiment with the rest of its great features!
|
||||
|
|
|
@ -74,6 +74,19 @@ relative to when data was written last to a rolled over index.
|
|||
The previous phase's actions must complete before {ILM} will check `min_age` and
|
||||
transition into the next phase.
|
||||
|
||||
=== Phase Execution
|
||||
|
||||
beta[]
|
||||
|
||||
The current phase definition, of an index's policy being executed, is stored
|
||||
in the index's metadata. The phase and its actions are compiled into a series
|
||||
of discrete steps that are executed sequentially. Since some {ILM} actions are
|
||||
more complex and involve multiple operations against an index, each of these
|
||||
operations are done in isolation in a unit called a "step". The
|
||||
<<ilm-explain-lifecycle,Explain Lifecycle API>> exposes this information to us
|
||||
to see which step our index is either to execute next, or is currently
|
||||
executing.
|
||||
|
||||
=== Actions
|
||||
|
||||
beta[]
|
||||
|
|
|
@ -2,6 +2,11 @@
|
|||
[[breaking_70_indices_changes]]
|
||||
=== Indices changes
|
||||
|
||||
[float]
|
||||
==== Index creation no longer defaults to five shards
|
||||
Previous versions of Elasticsearch defaulted to creating five shards per index.
|
||||
Starting with 7.0.0, the default is now one shard per index.
|
||||
|
||||
[float]
|
||||
==== `:` is no longer allowed in index name
|
||||
|
||||
|
|
|
@ -165,7 +165,7 @@ PUT _cluster/settings
|
|||
}
|
||||
----------------------------
|
||||
// CONSOLE
|
||||
// TEST[catch:/cannot set discovery.zen.minimum_master_nodes to more than the current master nodes/]
|
||||
// TEST[skip:Test use Zen2 now so we can't test Zen1 behaviour here]
|
||||
|
||||
TIP: An advantage of splitting the master and data roles between dedicated
|
||||
nodes is that you can have just three master-eligible nodes and set
|
||||
|
|
|
@ -38,19 +38,12 @@ public class BytesChannelContext extends SocketChannelContext {
|
|||
|
||||
@Override
|
||||
public int read() throws IOException {
|
||||
if (channelBuffer.getRemaining() == 0) {
|
||||
// Requiring one additional byte will ensure that a new page is allocated.
|
||||
channelBuffer.ensureCapacity(channelBuffer.getCapacity() + 1);
|
||||
}
|
||||
|
||||
int bytesRead = readFromChannel(channelBuffer.sliceBuffersFrom(channelBuffer.getIndex()));
|
||||
int bytesRead = readFromChannel(channelBuffer);
|
||||
|
||||
if (bytesRead == 0) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
channelBuffer.incrementIndex(bytesRead);
|
||||
|
||||
handleReadBytes();
|
||||
|
||||
return bytesRead;
|
||||
|
@ -91,8 +84,7 @@ public class BytesChannelContext extends SocketChannelContext {
|
|||
* Returns a boolean indicating if the operation was fully flushed.
|
||||
*/
|
||||
private boolean singleFlush(FlushOperation flushOperation) throws IOException {
|
||||
int written = flushToChannel(flushOperation.getBuffersToWrite());
|
||||
flushOperation.incrementIndex(written);
|
||||
flushToChannel(flushOperation);
|
||||
return flushOperation.isFullyFlushed();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -21,6 +21,7 @@ package org.elasticsearch.nio;
|
|||
|
||||
import java.io.Closeable;
|
||||
import java.io.IOException;
|
||||
import java.nio.ByteBuffer;
|
||||
import java.nio.channels.CancelledKeyException;
|
||||
import java.nio.channels.ClosedChannelException;
|
||||
import java.nio.channels.ClosedSelectorException;
|
||||
|
@ -51,6 +52,7 @@ public class NioSelector implements Closeable {
|
|||
private final ConcurrentLinkedQueue<ChannelContext<?>> channelsToRegister = new ConcurrentLinkedQueue<>();
|
||||
private final EventHandler eventHandler;
|
||||
private final Selector selector;
|
||||
private final ByteBuffer ioBuffer;
|
||||
|
||||
private final ReentrantLock runLock = new ReentrantLock();
|
||||
private final CountDownLatch exitedLoop = new CountDownLatch(1);
|
||||
|
@ -65,6 +67,18 @@ public class NioSelector implements Closeable {
|
|||
public NioSelector(EventHandler eventHandler, Selector selector) {
|
||||
this.selector = selector;
|
||||
this.eventHandler = eventHandler;
|
||||
this.ioBuffer = ByteBuffer.allocateDirect(1 << 16);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns a cached direct byte buffer for network operations. It is cleared on every get call.
|
||||
*
|
||||
* @return the byte buffer
|
||||
*/
|
||||
public ByteBuffer getIoBuffer() {
|
||||
assertOnSelectorThread();
|
||||
ioBuffer.clear();
|
||||
return ioBuffer;
|
||||
}
|
||||
|
||||
public Selector rawSelector() {
|
||||
|
|
|
@ -44,7 +44,7 @@ import java.util.function.Predicate;
|
|||
*/
|
||||
public abstract class SocketChannelContext extends ChannelContext<SocketChannel> {
|
||||
|
||||
public static final Predicate<NioSocketChannel> ALWAYS_ALLOW_CHANNEL = (c) -> true;
|
||||
protected static final Predicate<NioSocketChannel> ALWAYS_ALLOW_CHANNEL = (c) -> true;
|
||||
|
||||
protected final NioSocketChannel channel;
|
||||
protected final InboundChannelBuffer channelBuffer;
|
||||
|
@ -234,49 +234,113 @@ public abstract class SocketChannelContext extends ChannelContext<SocketChannel>
|
|||
return closeNow;
|
||||
}
|
||||
|
||||
|
||||
// When you read or write to a nio socket in java, the heap memory passed down must be copied to/from
|
||||
// direct memory. The JVM internally does some buffering of the direct memory, however we can save space
|
||||
// by reusing a thread-local direct buffer (provided by the NioSelector).
|
||||
//
|
||||
// Each network event loop is given a 64kb DirectByteBuffer. When we read we use this buffer and copy the
|
||||
// data after the read. When we go to write, we copy the data to the direct memory before calling write.
|
||||
// The choice of 64KB is rather arbitrary. We can explore different sizes in the future. However, any
|
||||
// data that is copied to the buffer for a write, but not successfully flushed immediately, must be
|
||||
// copied again on the next call.
|
||||
|
||||
protected int readFromChannel(ByteBuffer buffer) throws IOException {
|
||||
ByteBuffer ioBuffer = getSelector().getIoBuffer();
|
||||
ioBuffer.limit(Math.min(buffer.remaining(), ioBuffer.limit()));
|
||||
int bytesRead;
|
||||
try {
|
||||
int bytesRead = rawChannel.read(buffer);
|
||||
if (bytesRead < 0) {
|
||||
closeNow = true;
|
||||
bytesRead = 0;
|
||||
}
|
||||
return bytesRead;
|
||||
bytesRead = rawChannel.read(ioBuffer);
|
||||
} catch (IOException e) {
|
||||
closeNow = true;
|
||||
throw e;
|
||||
}
|
||||
if (bytesRead < 0) {
|
||||
closeNow = true;
|
||||
return 0;
|
||||
} else {
|
||||
ioBuffer.flip();
|
||||
buffer.put(ioBuffer);
|
||||
return bytesRead;
|
||||
}
|
||||
}
|
||||
|
||||
protected int readFromChannel(ByteBuffer[] buffers) throws IOException {
|
||||
protected int readFromChannel(InboundChannelBuffer channelBuffer) throws IOException {
|
||||
ByteBuffer ioBuffer = getSelector().getIoBuffer();
|
||||
int bytesRead;
|
||||
try {
|
||||
int bytesRead = (int) rawChannel.read(buffers);
|
||||
if (bytesRead < 0) {
|
||||
closeNow = true;
|
||||
bytesRead = 0;
|
||||
}
|
||||
return bytesRead;
|
||||
bytesRead = rawChannel.read(ioBuffer);
|
||||
} catch (IOException e) {
|
||||
closeNow = true;
|
||||
throw e;
|
||||
}
|
||||
if (bytesRead < 0) {
|
||||
closeNow = true;
|
||||
return 0;
|
||||
} else {
|
||||
ioBuffer.flip();
|
||||
channelBuffer.ensureCapacity(channelBuffer.getIndex() + ioBuffer.remaining());
|
||||
ByteBuffer[] buffers = channelBuffer.sliceBuffersFrom(channelBuffer.getIndex());
|
||||
int j = 0;
|
||||
while (j < buffers.length && ioBuffer.remaining() > 0) {
|
||||
ByteBuffer buffer = buffers[j++];
|
||||
copyBytes(ioBuffer, buffer);
|
||||
}
|
||||
channelBuffer.incrementIndex(bytesRead);
|
||||
return bytesRead;
|
||||
}
|
||||
}
|
||||
|
||||
protected int flushToChannel(ByteBuffer buffer) throws IOException {
|
||||
int initialPosition = buffer.position();
|
||||
ByteBuffer ioBuffer = getSelector().getIoBuffer();
|
||||
copyBytes(buffer, ioBuffer);
|
||||
ioBuffer.flip();
|
||||
int bytesWritten;
|
||||
try {
|
||||
return rawChannel.write(buffer);
|
||||
bytesWritten = rawChannel.write(ioBuffer);
|
||||
} catch (IOException e) {
|
||||
closeNow = true;
|
||||
buffer.position(initialPosition);
|
||||
throw e;
|
||||
}
|
||||
buffer.position(initialPosition + bytesWritten);
|
||||
return bytesWritten;
|
||||
}
|
||||
|
||||
protected int flushToChannel(ByteBuffer[] buffers) throws IOException {
|
||||
try {
|
||||
return (int) rawChannel.write(buffers);
|
||||
} catch (IOException e) {
|
||||
closeNow = true;
|
||||
throw e;
|
||||
protected int flushToChannel(FlushOperation flushOperation) throws IOException {
|
||||
ByteBuffer ioBuffer = getSelector().getIoBuffer();
|
||||
|
||||
boolean continueFlush = flushOperation.isFullyFlushed() == false;
|
||||
int totalBytesFlushed = 0;
|
||||
while (continueFlush) {
|
||||
ioBuffer.clear();
|
||||
int j = 0;
|
||||
ByteBuffer[] buffers = flushOperation.getBuffersToWrite();
|
||||
while (j < buffers.length && ioBuffer.remaining() > 0) {
|
||||
ByteBuffer buffer = buffers[j++];
|
||||
copyBytes(buffer, ioBuffer);
|
||||
}
|
||||
ioBuffer.flip();
|
||||
int bytesFlushed;
|
||||
try {
|
||||
bytesFlushed = rawChannel.write(ioBuffer);
|
||||
} catch (IOException e) {
|
||||
closeNow = true;
|
||||
throw e;
|
||||
}
|
||||
flushOperation.incrementIndex(bytesFlushed);
|
||||
totalBytesFlushed += bytesFlushed;
|
||||
continueFlush = ioBuffer.hasRemaining() == false && flushOperation.isFullyFlushed() == false;
|
||||
}
|
||||
return totalBytesFlushed;
|
||||
}
|
||||
|
||||
private void copyBytes(ByteBuffer from, ByteBuffer to) {
|
||||
int nBytesToCopy = Math.min(to.remaining(), from.remaining());
|
||||
int initialLimit = from.limit();
|
||||
from.limit(from.position() + nBytesToCopy);
|
||||
to.put(from);
|
||||
from.limit(initialLimit);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -31,7 +31,7 @@ import java.util.function.BiConsumer;
|
|||
import java.util.function.Consumer;
|
||||
|
||||
import static org.mockito.Matchers.any;
|
||||
import static org.mockito.Matchers.anyInt;
|
||||
import static org.mockito.Matchers.eq;
|
||||
import static org.mockito.Mockito.mock;
|
||||
import static org.mockito.Mockito.times;
|
||||
import static org.mockito.Mockito.verify;
|
||||
|
@ -64,14 +64,19 @@ public class BytesChannelContextTests extends ESTestCase {
|
|||
context = new BytesChannelContext(channel, selector, mock(Consumer.class), handler, channelBuffer);
|
||||
|
||||
when(selector.isOnCurrentThread()).thenReturn(true);
|
||||
ByteBuffer buffer = ByteBuffer.allocate(1 << 14);
|
||||
when(selector.getIoBuffer()).thenAnswer(invocationOnMock -> {
|
||||
buffer.clear();
|
||||
return buffer;
|
||||
});
|
||||
}
|
||||
|
||||
public void testSuccessfulRead() throws IOException {
|
||||
byte[] bytes = createMessage(messageLength);
|
||||
|
||||
when(rawChannel.read(any(ByteBuffer[].class), anyInt(), anyInt())).thenAnswer(invocationOnMock -> {
|
||||
ByteBuffer[] buffers = (ByteBuffer[]) invocationOnMock.getArguments()[0];
|
||||
buffers[0].put(bytes);
|
||||
when(rawChannel.read(any(ByteBuffer.class))).thenAnswer(invocationOnMock -> {
|
||||
ByteBuffer buffer = (ByteBuffer) invocationOnMock.getArguments()[0];
|
||||
buffer.put(bytes);
|
||||
return bytes.length;
|
||||
});
|
||||
|
||||
|
@ -87,9 +92,9 @@ public class BytesChannelContextTests extends ESTestCase {
|
|||
public void testMultipleReadsConsumed() throws IOException {
|
||||
byte[] bytes = createMessage(messageLength * 2);
|
||||
|
||||
when(rawChannel.read(any(ByteBuffer[].class), anyInt(), anyInt())).thenAnswer(invocationOnMock -> {
|
||||
ByteBuffer[] buffers = (ByteBuffer[]) invocationOnMock.getArguments()[0];
|
||||
buffers[0].put(bytes);
|
||||
when(rawChannel.read(any(ByteBuffer.class))).thenAnswer(invocationOnMock -> {
|
||||
ByteBuffer buffer = (ByteBuffer) invocationOnMock.getArguments()[0];
|
||||
buffer.put(bytes);
|
||||
return bytes.length;
|
||||
});
|
||||
|
||||
|
@ -105,9 +110,9 @@ public class BytesChannelContextTests extends ESTestCase {
|
|||
public void testPartialRead() throws IOException {
|
||||
byte[] bytes = createMessage(messageLength);
|
||||
|
||||
when(rawChannel.read(any(ByteBuffer[].class), anyInt(), anyInt())).thenAnswer(invocationOnMock -> {
|
||||
ByteBuffer[] buffers = (ByteBuffer[]) invocationOnMock.getArguments()[0];
|
||||
buffers[0].put(bytes);
|
||||
when(rawChannel.read(any(ByteBuffer.class))).thenAnswer(invocationOnMock -> {
|
||||
ByteBuffer buffer = (ByteBuffer) invocationOnMock.getArguments()[0];
|
||||
buffer.put(bytes);
|
||||
return bytes.length;
|
||||
});
|
||||
|
||||
|
@ -130,14 +135,14 @@ public class BytesChannelContextTests extends ESTestCase {
|
|||
|
||||
public void testReadThrowsIOException() throws IOException {
|
||||
IOException ioException = new IOException();
|
||||
when(rawChannel.read(any(ByteBuffer[].class), anyInt(), anyInt())).thenThrow(ioException);
|
||||
when(rawChannel.read(any(ByteBuffer.class))).thenThrow(ioException);
|
||||
|
||||
IOException ex = expectThrows(IOException.class, () -> context.read());
|
||||
assertSame(ioException, ex);
|
||||
}
|
||||
|
||||
public void testReadThrowsIOExceptionMeansReadyForClose() throws IOException {
|
||||
when(rawChannel.read(any(ByteBuffer[].class), anyInt(), anyInt())).thenThrow(new IOException());
|
||||
when(rawChannel.read(any(ByteBuffer.class))).thenThrow(new IOException());
|
||||
|
||||
assertFalse(context.selectorShouldClose());
|
||||
expectThrows(IOException.class, () -> context.read());
|
||||
|
@ -145,7 +150,7 @@ public class BytesChannelContextTests extends ESTestCase {
|
|||
}
|
||||
|
||||
public void testReadLessThanZeroMeansReadyForClose() throws IOException {
|
||||
when(rawChannel.read(any(ByteBuffer[].class), anyInt(), anyInt())).thenReturn(-1L);
|
||||
when(rawChannel.read(any(ByteBuffer.class))).thenReturn(-1);
|
||||
|
||||
assertEquals(0, context.read());
|
||||
|
||||
|
@ -164,11 +169,13 @@ public class BytesChannelContextTests extends ESTestCase {
|
|||
assertTrue(context.readyForFlush());
|
||||
|
||||
when(flushOperation.getBuffersToWrite()).thenReturn(buffers);
|
||||
when(flushOperation.isFullyFlushed()).thenReturn(true);
|
||||
when(flushOperation.isFullyFlushed()).thenReturn(false, true);
|
||||
when(flushOperation.getListener()).thenReturn(listener);
|
||||
context.flushChannel();
|
||||
|
||||
verify(rawChannel).write(buffers, 0, buffers.length);
|
||||
ByteBuffer buffer = buffers[0].duplicate();
|
||||
buffer.flip();
|
||||
verify(rawChannel).write(eq(buffer));
|
||||
verify(selector).executeListener(listener, null);
|
||||
assertFalse(context.readyForFlush());
|
||||
}
|
||||
|
@ -180,7 +187,7 @@ public class BytesChannelContextTests extends ESTestCase {
|
|||
assertTrue(context.readyForFlush());
|
||||
|
||||
when(flushOperation.isFullyFlushed()).thenReturn(false);
|
||||
when(flushOperation.getBuffersToWrite()).thenReturn(new ByteBuffer[0]);
|
||||
when(flushOperation.getBuffersToWrite()).thenReturn(new ByteBuffer[] {ByteBuffer.allocate(3)});
|
||||
context.flushChannel();
|
||||
|
||||
verify(listener, times(0)).accept(null, null);
|
||||
|
@ -194,8 +201,8 @@ public class BytesChannelContextTests extends ESTestCase {
|
|||
BiConsumer<Void, Exception> listener2 = mock(BiConsumer.class);
|
||||
FlushReadyWrite flushOperation1 = mock(FlushReadyWrite.class);
|
||||
FlushReadyWrite flushOperation2 = mock(FlushReadyWrite.class);
|
||||
when(flushOperation1.getBuffersToWrite()).thenReturn(new ByteBuffer[0]);
|
||||
when(flushOperation2.getBuffersToWrite()).thenReturn(new ByteBuffer[0]);
|
||||
when(flushOperation1.getBuffersToWrite()).thenReturn(new ByteBuffer[] {ByteBuffer.allocate(3)});
|
||||
when(flushOperation2.getBuffersToWrite()).thenReturn(new ByteBuffer[] {ByteBuffer.allocate(3)});
|
||||
when(flushOperation1.getListener()).thenReturn(listener);
|
||||
when(flushOperation2.getListener()).thenReturn(listener2);
|
||||
|
||||
|
@ -204,7 +211,7 @@ public class BytesChannelContextTests extends ESTestCase {
|
|||
|
||||
assertTrue(context.readyForFlush());
|
||||
|
||||
when(flushOperation1.isFullyFlushed()).thenReturn(true);
|
||||
when(flushOperation1.isFullyFlushed()).thenReturn(false, true);
|
||||
when(flushOperation2.isFullyFlushed()).thenReturn(false);
|
||||
context.flushChannel();
|
||||
|
||||
|
@ -212,7 +219,7 @@ public class BytesChannelContextTests extends ESTestCase {
|
|||
verify(listener2, times(0)).accept(null, null);
|
||||
assertTrue(context.readyForFlush());
|
||||
|
||||
when(flushOperation2.isFullyFlushed()).thenReturn(true);
|
||||
when(flushOperation2.isFullyFlushed()).thenReturn(false, true);
|
||||
|
||||
context.flushChannel();
|
||||
|
||||
|
@ -231,7 +238,7 @@ public class BytesChannelContextTests extends ESTestCase {
|
|||
|
||||
IOException exception = new IOException();
|
||||
when(flushOperation.getBuffersToWrite()).thenReturn(buffers);
|
||||
when(rawChannel.write(buffers, 0, buffers.length)).thenThrow(exception);
|
||||
when(rawChannel.write(any(ByteBuffer.class))).thenThrow(exception);
|
||||
when(flushOperation.getListener()).thenReturn(listener);
|
||||
expectThrows(IOException.class, () -> context.flushChannel());
|
||||
|
||||
|
@ -246,7 +253,7 @@ public class BytesChannelContextTests extends ESTestCase {
|
|||
|
||||
IOException exception = new IOException();
|
||||
when(flushOperation.getBuffersToWrite()).thenReturn(buffers);
|
||||
when(rawChannel.write(buffers, 0, buffers.length)).thenThrow(exception);
|
||||
when(rawChannel.write(any(ByteBuffer.class))).thenThrow(exception);
|
||||
|
||||
assertFalse(context.selectorShouldClose());
|
||||
expectThrows(IOException.class, () -> context.flushChannel());
|
||||
|
|
|
@ -22,6 +22,7 @@ package org.elasticsearch.nio;
|
|||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.junit.Before;
|
||||
import org.mockito.ArgumentCaptor;
|
||||
import org.mockito.stubbing.Answer;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.nio.ByteBuffer;
|
||||
|
@ -54,6 +55,7 @@ public class SocketChannelContextTests extends ESTestCase {
|
|||
private BiConsumer<Void, Exception> listener;
|
||||
private NioSelector selector;
|
||||
private ReadWriteHandler readWriteHandler;
|
||||
private ByteBuffer ioBuffer = ByteBuffer.allocate(1024);
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
@Before
|
||||
|
@ -71,6 +73,10 @@ public class SocketChannelContextTests extends ESTestCase {
|
|||
context = new TestSocketChannelContext(channel, selector, exceptionHandler, readWriteHandler, channelBuffer);
|
||||
|
||||
when(selector.isOnCurrentThread()).thenReturn(true);
|
||||
when(selector.getIoBuffer()).thenAnswer(invocationOnMock -> {
|
||||
ioBuffer.clear();
|
||||
return ioBuffer;
|
||||
});
|
||||
}
|
||||
|
||||
public void testIOExceptionSetIfEncountered() throws IOException {
|
||||
|
@ -90,7 +96,6 @@ public class SocketChannelContextTests extends ESTestCase {
|
|||
}
|
||||
|
||||
public void testSignalWhenPeerClosed() throws IOException {
|
||||
when(rawChannel.read(any(ByteBuffer[].class), anyInt(), anyInt())).thenReturn(-1L);
|
||||
when(rawChannel.read(any(ByteBuffer.class))).thenReturn(-1);
|
||||
assertFalse(context.closeNow());
|
||||
context.read();
|
||||
|
@ -289,6 +294,153 @@ public class SocketChannelContextTests extends ESTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
public void testReadToBufferLimitsToPassedBuffer() throws IOException {
|
||||
ByteBuffer buffer = ByteBuffer.allocate(10);
|
||||
when(rawChannel.read(any(ByteBuffer.class))).thenAnswer(completelyFillBufferAnswer());
|
||||
|
||||
int bytesRead = context.readFromChannel(buffer);
|
||||
assertEquals(bytesRead, 10);
|
||||
assertEquals(0, buffer.remaining());
|
||||
}
|
||||
|
||||
public void testReadToBufferHandlesIOException() throws IOException {
|
||||
when(rawChannel.read(any(ByteBuffer.class))).thenThrow(new IOException());
|
||||
|
||||
expectThrows(IOException.class, () -> context.readFromChannel(ByteBuffer.allocate(10)));
|
||||
assertTrue(context.closeNow());
|
||||
}
|
||||
|
||||
public void testReadToBufferHandlesEOF() throws IOException {
|
||||
when(rawChannel.read(any(ByteBuffer.class))).thenReturn(-1);
|
||||
|
||||
context.readFromChannel(ByteBuffer.allocate(10));
|
||||
assertTrue(context.closeNow());
|
||||
}
|
||||
|
||||
public void testReadToChannelBufferWillReadAsMuchAsIOBufferAllows() throws IOException {
|
||||
when(rawChannel.read(any(ByteBuffer.class))).thenAnswer(completelyFillBufferAnswer());
|
||||
|
||||
InboundChannelBuffer channelBuffer = InboundChannelBuffer.allocatingInstance();
|
||||
int bytesRead = context.readFromChannel(channelBuffer);
|
||||
assertEquals(ioBuffer.capacity(), bytesRead);
|
||||
assertEquals(ioBuffer.capacity(), channelBuffer.getIndex());
|
||||
}
|
||||
|
||||
public void testReadToChannelBufferHandlesIOException() throws IOException {
|
||||
when(rawChannel.read(any(ByteBuffer.class))).thenThrow(new IOException());
|
||||
|
||||
InboundChannelBuffer channelBuffer = InboundChannelBuffer.allocatingInstance();
|
||||
expectThrows(IOException.class, () -> context.readFromChannel(channelBuffer));
|
||||
assertTrue(context.closeNow());
|
||||
assertEquals(0, channelBuffer.getIndex());
|
||||
}
|
||||
|
||||
public void testReadToChannelBufferHandlesEOF() throws IOException {
|
||||
when(rawChannel.read(any(ByteBuffer.class))).thenReturn(-1);
|
||||
|
||||
InboundChannelBuffer channelBuffer = InboundChannelBuffer.allocatingInstance();
|
||||
context.readFromChannel(channelBuffer);
|
||||
assertTrue(context.closeNow());
|
||||
assertEquals(0, channelBuffer.getIndex());
|
||||
}
|
||||
|
||||
public void testFlushBufferHandlesPartialFlush() throws IOException {
|
||||
int bytesToConsume = 3;
|
||||
when(rawChannel.write(any(ByteBuffer.class))).thenAnswer(consumeBufferAnswer(bytesToConsume));
|
||||
|
||||
ByteBuffer buffer = ByteBuffer.allocate(10);
|
||||
context.flushToChannel(buffer);
|
||||
assertEquals(10 - bytesToConsume, buffer.remaining());
|
||||
}
|
||||
|
||||
public void testFlushBufferHandlesFullFlush() throws IOException {
|
||||
int bytesToConsume = 10;
|
||||
when(rawChannel.write(any(ByteBuffer.class))).thenAnswer(consumeBufferAnswer(bytesToConsume));
|
||||
|
||||
ByteBuffer buffer = ByteBuffer.allocate(10);
|
||||
context.flushToChannel(buffer);
|
||||
assertEquals(0, buffer.remaining());
|
||||
}
|
||||
|
||||
public void testFlushBufferHandlesIOException() throws IOException {
|
||||
when(rawChannel.write(any(ByteBuffer.class))).thenThrow(new IOException());
|
||||
|
||||
ByteBuffer buffer = ByteBuffer.allocate(10);
|
||||
expectThrows(IOException.class, () -> context.flushToChannel(buffer));
|
||||
assertTrue(context.closeNow());
|
||||
assertEquals(10, buffer.remaining());
|
||||
}
|
||||
|
||||
public void testFlushBuffersHandlesZeroFlush() throws IOException {
|
||||
when(rawChannel.write(any(ByteBuffer.class))).thenAnswer(consumeBufferAnswer(0));
|
||||
|
||||
ByteBuffer[] buffers = {ByteBuffer.allocate(1023), ByteBuffer.allocate(1023)};
|
||||
FlushOperation flushOperation = new FlushOperation(buffers, listener);
|
||||
context.flushToChannel(flushOperation);
|
||||
assertEquals(2, flushOperation.getBuffersToWrite().length);
|
||||
assertEquals(0, flushOperation.getBuffersToWrite()[0].position());
|
||||
}
|
||||
|
||||
public void testFlushBuffersHandlesPartialFlush() throws IOException {
|
||||
AtomicBoolean first = new AtomicBoolean(true);
|
||||
when(rawChannel.write(any(ByteBuffer.class))).thenAnswer(invocationOnMock -> {
|
||||
if (first.compareAndSet(true, false)) {
|
||||
return consumeBufferAnswer(1024).answer(invocationOnMock);
|
||||
} else {
|
||||
return consumeBufferAnswer(3).answer(invocationOnMock);
|
||||
}
|
||||
});
|
||||
|
||||
ByteBuffer[] buffers = {ByteBuffer.allocate(1023), ByteBuffer.allocate(1023)};
|
||||
FlushOperation flushOperation = new FlushOperation(buffers, listener);
|
||||
context.flushToChannel(flushOperation);
|
||||
assertEquals(1, flushOperation.getBuffersToWrite().length);
|
||||
assertEquals(4, flushOperation.getBuffersToWrite()[0].position());
|
||||
}
|
||||
|
||||
public void testFlushBuffersHandlesFullFlush() throws IOException {
|
||||
AtomicBoolean first = new AtomicBoolean(true);
|
||||
when(rawChannel.write(any(ByteBuffer.class))).thenAnswer(invocationOnMock -> {
|
||||
if (first.compareAndSet(true, false)) {
|
||||
return consumeBufferAnswer(1024).answer(invocationOnMock);
|
||||
} else {
|
||||
return consumeBufferAnswer(1022).answer(invocationOnMock);
|
||||
}
|
||||
});
|
||||
|
||||
ByteBuffer[] buffers = {ByteBuffer.allocate(1023), ByteBuffer.allocate(1023)};
|
||||
FlushOperation flushOperation = new FlushOperation(buffers, listener);
|
||||
context.flushToChannel(flushOperation);
|
||||
assertTrue(flushOperation.isFullyFlushed());
|
||||
}
|
||||
|
||||
public void testFlushBuffersHandlesIOException() throws IOException {
|
||||
when(rawChannel.write(any(ByteBuffer.class))).thenThrow(new IOException());
|
||||
|
||||
ByteBuffer[] buffers = {ByteBuffer.allocate(10), ByteBuffer.allocate(10)};
|
||||
FlushOperation flushOperation = new FlushOperation(buffers, listener);
|
||||
expectThrows(IOException.class, () -> context.flushToChannel(flushOperation));
|
||||
assertTrue(context.closeNow());
|
||||
}
|
||||
|
||||
public void testFlushBuffersHandlesIOExceptionSecondTimeThroughLoop() throws IOException {
|
||||
AtomicBoolean first = new AtomicBoolean(true);
|
||||
when(rawChannel.write(any(ByteBuffer.class))).thenAnswer(invocationOnMock -> {
|
||||
if (first.compareAndSet(true, false)) {
|
||||
return consumeBufferAnswer(1024).answer(invocationOnMock);
|
||||
} else {
|
||||
throw new IOException();
|
||||
}
|
||||
});
|
||||
|
||||
ByteBuffer[] buffers = {ByteBuffer.allocate(1023), ByteBuffer.allocate(1023)};
|
||||
FlushOperation flushOperation = new FlushOperation(buffers, listener);
|
||||
expectThrows(IOException.class, () -> context.flushToChannel(flushOperation));
|
||||
assertTrue(context.closeNow());
|
||||
assertEquals(1, flushOperation.getBuffersToWrite().length);
|
||||
assertEquals(1, flushOperation.getBuffersToWrite()[0].position());
|
||||
}
|
||||
|
||||
private static class TestSocketChannelContext extends SocketChannelContext {
|
||||
|
||||
private TestSocketChannelContext(NioSocketChannel channel, NioSelector selector, Consumer<Exception> exceptionHandler,
|
||||
|
@ -305,8 +457,8 @@ public class SocketChannelContextTests extends ESTestCase {
|
|||
@Override
|
||||
public int read() throws IOException {
|
||||
if (randomBoolean()) {
|
||||
ByteBuffer[] byteBuffers = {ByteBuffer.allocate(10)};
|
||||
return readFromChannel(byteBuffers);
|
||||
InboundChannelBuffer channelBuffer = InboundChannelBuffer.allocatingInstance();
|
||||
return readFromChannel(channelBuffer);
|
||||
} else {
|
||||
return readFromChannel(ByteBuffer.allocate(10));
|
||||
}
|
||||
|
@ -316,7 +468,7 @@ public class SocketChannelContextTests extends ESTestCase {
|
|||
public void flushChannel() throws IOException {
|
||||
if (randomBoolean()) {
|
||||
ByteBuffer[] byteBuffers = {ByteBuffer.allocate(10)};
|
||||
flushToChannel(byteBuffers);
|
||||
flushToChannel(new FlushOperation(byteBuffers, (v, e) -> {}));
|
||||
} else {
|
||||
flushToChannel(ByteBuffer.allocate(10));
|
||||
}
|
||||
|
@ -345,4 +497,23 @@ public class SocketChannelContextTests extends ESTestCase {
|
|||
}
|
||||
return bytes;
|
||||
}
|
||||
|
||||
private Answer<Integer> completelyFillBufferAnswer() {
|
||||
return invocationOnMock -> {
|
||||
ByteBuffer b = (ByteBuffer) invocationOnMock.getArguments()[0];
|
||||
int bytesRead = b.remaining();
|
||||
while (b.hasRemaining()) {
|
||||
b.put((byte) 1);
|
||||
}
|
||||
return bytesRead;
|
||||
};
|
||||
}
|
||||
|
||||
private Answer<Object> consumeBufferAnswer(int bytesToConsume) {
|
||||
return invocationOnMock -> {
|
||||
ByteBuffer b = (ByteBuffer) invocationOnMock.getArguments()[0];
|
||||
b.position(b.position() + bytesToConsume);
|
||||
return bytesToConsume;
|
||||
};
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1 @@
|
|||
7c72e9fe3c987151d04a12025b41465f5b21ba00
|
|
@ -1 +0,0 @@
|
|||
4a1574a3d3fcb950b440e36b3035f90885794bbf
|
|
@ -0,0 +1 @@
|
|||
a8681d864406ad5b50076157d800320c95a17e01
|
|
@ -1 +0,0 @@
|
|||
428b4a9e84b4e903dfadb4dd1e1ef2cdd98cce08
|
|
@ -0,0 +1 @@
|
|||
4e02f20bf005f8aa0d3e182e5446736557d35723
|
|
@ -1 +0,0 @@
|
|||
d08ee1049d04f672175ea9ba3132f7eaa98d9742
|
|
@ -0,0 +1 @@
|
|||
fd8de205837d3177d2f8ff454e74badf8a7c20a1
|
|
@ -1 +0,0 @@
|
|||
841a9bd3a0e12b15b700c0655a76e4035d3128ae
|
|
@ -0,0 +1 @@
|
|||
b32e8b29e9533d7e7f832a093d68b294c8f43bee
|
|
@ -1 +0,0 @@
|
|||
e9bfd4935d1a5d55154cb99a066a03797174bc33
|
|
@ -0,0 +1 @@
|
|||
03f342981ccdb176bb34e95bbce8d5ff7b00a711
|
|
@ -1 +0,0 @@
|
|||
6a933a5113a708229177463c94d53ea544414a53
|
|
@ -0,0 +1 @@
|
|||
67c740d5304a22b062acc54b4fcf5314186c013e
|
|
@ -1 +0,0 @@
|
|||
7709b470601b0c1a77fdcd5dd9ce9f48aba3db78
|
|
@ -0,0 +1 @@
|
|||
ab4cf17ec2d41ae62e249faba5ee584d75e23131
|
|
@ -1 +0,0 @@
|
|||
00062c609614d7229c5869d7d8988674ffaea350
|
|
@ -13,6 +13,19 @@ setup:
|
|||
client: "integration_test"
|
||||
base_path: "${base_path}"
|
||||
|
||||
# Remove the snapshots, if a previous test failed to delete them. This is
|
||||
# useful for third party tests that runs the test against a real external service.
|
||||
- do:
|
||||
snapshot.delete:
|
||||
repository: repository
|
||||
snapshot: snapshot-one
|
||||
ignore: 404
|
||||
- do:
|
||||
snapshot.delete:
|
||||
repository: repository
|
||||
snapshot: snapshot-two
|
||||
ignore: 404
|
||||
|
||||
---
|
||||
"Snapshot/Restore with repository-gcs":
|
||||
|
||||
|
|
|
@ -74,6 +74,7 @@ task testRepositoryCreds(type: RandomizedTestingTask) {
|
|||
include '**/S3BlobStoreRepositoryTests.class'
|
||||
systemProperty 'es.allow_insecure_settings', 'true'
|
||||
}
|
||||
project.check.dependsOn(testRepositoryCreds)
|
||||
|
||||
test {
|
||||
// these are tested explicitly in separate test tasks
|
||||
|
@ -106,20 +107,12 @@ String s3ECSBasePath = System.getenv("amazon_s3_base_path_ecs")
|
|||
// If all these variables are missing then we are testing against the internal fixture instead, which has the following
|
||||
// credentials hard-coded in.
|
||||
|
||||
if (!s3PermanentAccessKey && !s3PermanentSecretKey && !s3PermanentBucket && !s3PermanentBasePath
|
||||
&& !s3EC2Bucket && !s3EC2BasePath
|
||||
&& !s3ECSBucket && !s3ECSBasePath) {
|
||||
if (!s3PermanentAccessKey && !s3PermanentSecretKey && !s3PermanentBucket && !s3PermanentBasePath) {
|
||||
s3PermanentAccessKey = 's3_integration_test_permanent_access_key'
|
||||
s3PermanentSecretKey = 's3_integration_test_permanent_secret_key'
|
||||
s3PermanentBucket = 'permanent-bucket-test'
|
||||
s3PermanentBasePath = 'integration_test'
|
||||
|
||||
s3EC2Bucket = 'ec2-bucket-test'
|
||||
s3EC2BasePath = 'integration_test'
|
||||
|
||||
s3ECSBucket = 'ecs-bucket-test'
|
||||
s3ECSBasePath = 'integration_test'
|
||||
|
||||
useFixture = true
|
||||
|
||||
} else if (!s3PermanentAccessKey || !s3PermanentSecretKey || !s3PermanentBucket || !s3PermanentBasePath) {
|
||||
|
@ -137,6 +130,16 @@ if (!s3TemporaryAccessKey && !s3TemporarySecretKey && !s3TemporaryBucket && !s3T
|
|||
throw new IllegalArgumentException("not all options specified to run against external S3 service as temporary credentials are present")
|
||||
}
|
||||
|
||||
if (!s3EC2Bucket && !s3EC2BasePath && !s3ECSBucket && !s3ECSBasePath) {
|
||||
s3EC2Bucket = 'ec2-bucket-test'
|
||||
s3EC2BasePath = 'integration_test'
|
||||
s3ECSBucket = 'ecs-bucket-test'
|
||||
s3ECSBasePath = 'integration_test'
|
||||
} else if (!s3EC2Bucket || !s3EC2BasePath || !s3ECSBucket || !s3ECSBasePath) {
|
||||
throw new IllegalArgumentException("not all options specified to run EC2/ECS tests are present")
|
||||
}
|
||||
|
||||
|
||||
final String minioVersion = 'RELEASE.2018-06-22T23-48-46Z'
|
||||
final String minioBinDir = "${buildDir}/minio/bin"
|
||||
final String minioDataDir = "${buildDir}/minio/data"
|
||||
|
|
|
@ -16,6 +16,19 @@ setup:
|
|||
canned_acl: private
|
||||
storage_class: standard
|
||||
|
||||
# Remove the snapshots, if a previous test failed to delete them. This is
|
||||
# useful for third party tests that runs the test against a real external service.
|
||||
- do:
|
||||
snapshot.delete:
|
||||
repository: repository_permanent
|
||||
snapshot: snapshot-one
|
||||
ignore: 404
|
||||
- do:
|
||||
snapshot.delete:
|
||||
repository: repository_permanent
|
||||
snapshot: snapshot-two
|
||||
ignore: 404
|
||||
|
||||
---
|
||||
"Snapshot and Restore with repository-s3 using permanent credentials":
|
||||
|
||||
|
|
|
@ -953,6 +953,56 @@ public class FullClusterRestartIT extends AbstractFullClusterRestartTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
public void testSoftDeletes() throws Exception {
|
||||
if (isRunningAgainstOldCluster()) {
|
||||
XContentBuilder mappingsAndSettings = jsonBuilder();
|
||||
mappingsAndSettings.startObject();
|
||||
{
|
||||
mappingsAndSettings.startObject("settings");
|
||||
mappingsAndSettings.field("number_of_shards", 1);
|
||||
mappingsAndSettings.field("number_of_replicas", 1);
|
||||
if (getOldClusterVersion().onOrAfter(Version.V_6_5_0)) {
|
||||
mappingsAndSettings.field("soft_deletes.enabled", true);
|
||||
}
|
||||
mappingsAndSettings.endObject();
|
||||
}
|
||||
mappingsAndSettings.endObject();
|
||||
Request createIndex = new Request("PUT", "/" + index);
|
||||
createIndex.setJsonEntity(Strings.toString(mappingsAndSettings));
|
||||
client().performRequest(createIndex);
|
||||
int numDocs = between(10, 100);
|
||||
for (int i = 0; i < numDocs; i++) {
|
||||
String doc = Strings.toString(JsonXContent.contentBuilder().startObject().field("field", "v1").endObject());
|
||||
Request request = new Request("POST", "/" + index + "/doc/" + i);
|
||||
request.setJsonEntity(doc);
|
||||
client().performRequest(request);
|
||||
if (rarely()) {
|
||||
refresh();
|
||||
}
|
||||
}
|
||||
client().performRequest(new Request("POST", "/" + index + "/_flush"));
|
||||
int liveDocs = numDocs;
|
||||
assertTotalHits(liveDocs, entityAsMap(client().performRequest(new Request("GET", "/" + index + "/_search"))));
|
||||
for (int i = 0; i < numDocs; i++) {
|
||||
if (randomBoolean()) {
|
||||
String doc = Strings.toString(JsonXContent.contentBuilder().startObject().field("field", "v2").endObject());
|
||||
Request request = new Request("POST", "/" + index + "/doc/" + i);
|
||||
request.setJsonEntity(doc);
|
||||
client().performRequest(request);
|
||||
} else if (randomBoolean()) {
|
||||
client().performRequest(new Request("DELETE", "/" + index + "/doc/" + i));
|
||||
liveDocs--;
|
||||
}
|
||||
}
|
||||
refresh();
|
||||
assertTotalHits(liveDocs, entityAsMap(client().performRequest(new Request("GET", "/" + index + "/_search"))));
|
||||
saveInfoDocument("doc_count", Integer.toString(liveDocs));
|
||||
} else {
|
||||
int liveDocs = Integer.parseInt(loadInfoDocument("doc_count"));
|
||||
assertTotalHits(liveDocs, entityAsMap(client().performRequest(new Request("GET", "/" + index + "/_search"))));
|
||||
}
|
||||
}
|
||||
|
||||
private void checkSnapshot(String snapshotName, int count, Version tookOnVersion) throws IOException {
|
||||
// Check the snapshot metadata, especially the version
|
||||
Request listSnapshotRequest = new Request("GET", "/_snapshot/repo/" + snapshotName);
|
||||
|
|
|
@ -83,6 +83,8 @@ for (Version version : bwcVersions.wireCompatible) {
|
|||
dataDir = { nodeNumber -> oldClusterTest.nodes[stopNode].dataDir }
|
||||
setting 'repositories.url.allowed_urls', 'http://snapshot.test*'
|
||||
setting 'node.name', "upgraded-node-${stopNode}"
|
||||
// TODO: Move to Zen2 once we support rolling upgrade with Zen2
|
||||
setting 'discovery.type', 'zen'
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -26,6 +26,7 @@ import org.elasticsearch.client.ResponseException;
|
|||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.util.concurrent.AbstractRunnable;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
import org.elasticsearch.test.rest.yaml.ObjectPath;
|
||||
|
||||
import java.io.IOException;
|
||||
|
@ -272,4 +273,35 @@ public class RecoveryIT extends AbstractRollingTestCase {
|
|||
ensureGreen(index);
|
||||
}
|
||||
|
||||
public void testRecoveryWithSoftDeletes() throws Exception {
|
||||
final String index = "recover_with_soft_deletes";
|
||||
if (CLUSTER_TYPE == ClusterType.OLD) {
|
||||
Settings.Builder settings = Settings.builder()
|
||||
.put(IndexMetaData.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1)
|
||||
.put(IndexMetaData.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 1)
|
||||
// if the node with the replica is the first to be restarted, while a replica is still recovering
|
||||
// then delayed allocation will kick in. When the node comes back, the master will search for a copy
|
||||
// but the recovering copy will be seen as invalid and the cluster health won't return to GREEN
|
||||
// before timing out
|
||||
.put(INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), "100ms")
|
||||
.put(SETTING_ALLOCATION_MAX_RETRY.getKey(), "0"); // fail faster
|
||||
if (getNodeId(v -> v.onOrAfter(Version.V_6_5_0)) != null) {
|
||||
settings.put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true);
|
||||
}
|
||||
createIndex(index, settings.build());
|
||||
int numDocs = randomInt(10);
|
||||
indexDocs(index, 0, numDocs);
|
||||
if (randomBoolean()) {
|
||||
client().performRequest(new Request("POST", "/" + index + "/_flush"));
|
||||
}
|
||||
for (int i = 0; i < numDocs; i++) {
|
||||
if (randomBoolean()) {
|
||||
indexDocs(index, i, 1); // update
|
||||
} else if (randomBoolean()) {
|
||||
client().performRequest(new Request("DELETE", index + "/test/" + i));
|
||||
}
|
||||
}
|
||||
}
|
||||
ensureGreen(index);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -22,6 +22,8 @@ apply plugin: 'elasticsearch.rest-test'
|
|||
|
||||
integTestCluster {
|
||||
setting 'node.name', null
|
||||
// TODO: Run this using zen2
|
||||
setting 'discovery.type', 'zen'
|
||||
}
|
||||
|
||||
integTestRunner {
|
||||
|
|
|
@ -3,8 +3,9 @@
|
|||
"documentation" : "http://www.elastic.co/guide/en/elasticsearch/reference/master/docs-termvectors.html",
|
||||
"methods" : ["GET", "POST"],
|
||||
"url" : {
|
||||
"path" : "/{index}/{type}/_termvectors",
|
||||
"paths" : ["/{index}/{type}/_termvectors", "/{index}/{type}/{id}/_termvectors"],
|
||||
"path" : "/{index}/_termvectors/{id}",
|
||||
"paths" : ["/{index}/_termvectors/{id}", "/{index}/_termvectors/",
|
||||
"/{index}/{type}/{id}/_termvectors", "/{index}/{type}/_termvectors"],
|
||||
"parts" : {
|
||||
"index" : {
|
||||
"type" : "string",
|
||||
|
@ -14,7 +15,7 @@
|
|||
"type" : {
|
||||
"type" : "string",
|
||||
"description" : "The type of the document.",
|
||||
"required" : true
|
||||
"required" : false
|
||||
},
|
||||
"id" : {
|
||||
"type" : "string",
|
||||
|
|
|
@ -1,10 +1,13 @@
|
|||
setup:
|
||||
- skip:
|
||||
version: " - 6.99.99"
|
||||
reason: types are required in requests before 7.0.0
|
||||
- do:
|
||||
indices.create:
|
||||
index: testidx
|
||||
body:
|
||||
mappings:
|
||||
testtype:
|
||||
_doc:
|
||||
properties:
|
||||
text:
|
||||
type : "text"
|
||||
|
@ -12,7 +15,7 @@ setup:
|
|||
- do:
|
||||
index:
|
||||
index: testidx
|
||||
type: testtype
|
||||
type: _doc
|
||||
id: testing_document
|
||||
body: {"text" : "The quick brown fox is brown."}
|
||||
|
||||
|
@ -22,19 +25,6 @@ setup:
|
|||
---
|
||||
"Basic tests for multi termvector get":
|
||||
|
||||
- do:
|
||||
mtermvectors:
|
||||
"term_statistics" : true
|
||||
"body" :
|
||||
"docs":
|
||||
-
|
||||
"_index" : "testidx"
|
||||
"_type" : "testtype"
|
||||
"_id" : "testing_document"
|
||||
|
||||
- match: {docs.0.term_vectors.text.terms.brown.term_freq: 2}
|
||||
- match: {docs.0.term_vectors.text.terms.brown.ttf: 2}
|
||||
|
||||
- do:
|
||||
mtermvectors:
|
||||
"term_statistics" : true
|
||||
|
@ -42,7 +32,6 @@ setup:
|
|||
"docs":
|
||||
-
|
||||
"_index" : "testidx"
|
||||
"_type" : "testtype"
|
||||
"_id" : "testing_document"
|
||||
|
||||
- match: {docs.0.term_vectors.text.terms.brown.term_freq: 2}
|
||||
|
@ -55,7 +44,6 @@ setup:
|
|||
"body" :
|
||||
"docs":
|
||||
-
|
||||
"_type" : "testtype"
|
||||
"_id" : "testing_document"
|
||||
|
||||
- match: {docs.0.term_vectors.text.terms.brown.term_freq: 2}
|
||||
|
@ -65,20 +53,6 @@ setup:
|
|||
mtermvectors:
|
||||
"term_statistics" : true
|
||||
"index" : "testidx"
|
||||
"type" : "testtype"
|
||||
"body" :
|
||||
"docs":
|
||||
-
|
||||
"_id" : "testing_document"
|
||||
|
||||
- match: {docs.0.term_vectors.text.terms.brown.term_freq: 2}
|
||||
- match: {docs.0.term_vectors.text.terms.brown.ttf: 2}
|
||||
|
||||
- do:
|
||||
mtermvectors:
|
||||
"term_statistics" : true
|
||||
"index" : "testidx"
|
||||
"type" : "testtype"
|
||||
"ids" : ["testing_document"]
|
||||
|
||||
- match: {docs.0.term_vectors.text.terms.brown.term_freq: 2}
|
||||
|
|
|
@ -0,0 +1,85 @@
|
|||
setup:
|
||||
- do:
|
||||
indices.create:
|
||||
index: testidx
|
||||
body:
|
||||
mappings:
|
||||
testtype:
|
||||
properties:
|
||||
text:
|
||||
type : "text"
|
||||
term_vector : "with_positions_offsets"
|
||||
- do:
|
||||
index:
|
||||
index: testidx
|
||||
type: testtype
|
||||
id: testing_document
|
||||
body: {"text" : "The quick brown fox is brown."}
|
||||
|
||||
- do:
|
||||
indices.refresh: {}
|
||||
|
||||
---
|
||||
"Basic tests for multi termvector get":
|
||||
|
||||
- do:
|
||||
mtermvectors:
|
||||
"term_statistics" : true
|
||||
"body" :
|
||||
"docs":
|
||||
-
|
||||
"_index" : "testidx"
|
||||
"_type" : "testtype"
|
||||
"_id" : "testing_document"
|
||||
|
||||
- match: {docs.0.term_vectors.text.terms.brown.term_freq: 2}
|
||||
- match: {docs.0.term_vectors.text.terms.brown.ttf: 2}
|
||||
|
||||
- do:
|
||||
mtermvectors:
|
||||
"term_statistics" : true
|
||||
"body" :
|
||||
"docs":
|
||||
-
|
||||
"_index" : "testidx"
|
||||
"_type" : "testtype"
|
||||
"_id" : "testing_document"
|
||||
|
||||
- match: {docs.0.term_vectors.text.terms.brown.term_freq: 2}
|
||||
- match: {docs.0.term_vectors.text.terms.brown.ttf: 2}
|
||||
|
||||
- do:
|
||||
mtermvectors:
|
||||
"term_statistics" : true
|
||||
"index" : "testidx"
|
||||
"body" :
|
||||
"docs":
|
||||
-
|
||||
"_type" : "testtype"
|
||||
"_id" : "testing_document"
|
||||
|
||||
- match: {docs.0.term_vectors.text.terms.brown.term_freq: 2}
|
||||
- match: {docs.0.term_vectors.text.terms.brown.ttf: 2}
|
||||
|
||||
- do:
|
||||
mtermvectors:
|
||||
"term_statistics" : true
|
||||
"index" : "testidx"
|
||||
"type" : "testtype"
|
||||
"body" :
|
||||
"docs":
|
||||
-
|
||||
"_id" : "testing_document"
|
||||
|
||||
- match: {docs.0.term_vectors.text.terms.brown.term_freq: 2}
|
||||
- match: {docs.0.term_vectors.text.terms.brown.ttf: 2}
|
||||
|
||||
- do:
|
||||
mtermvectors:
|
||||
"term_statistics" : true
|
||||
"index" : "testidx"
|
||||
"type" : "testtype"
|
||||
"ids" : ["testing_document"]
|
||||
|
||||
- match: {docs.0.term_vectors.text.terms.brown.term_freq: 2}
|
||||
- match: {docs.0.term_vectors.text.terms.brown.ttf: 2}
|
|
@ -1,3 +1,7 @@
|
|||
setup:
|
||||
- skip:
|
||||
version: " - 6.99.99"
|
||||
reason: types are required in requests before 7.0.0
|
||||
|
||||
---
|
||||
"Deprecated camel case and _ parameters should fail in Term Vectors query":
|
||||
|
@ -12,7 +16,7 @@
|
|||
index: testidx
|
||||
body:
|
||||
mappings:
|
||||
testtype:
|
||||
_doc:
|
||||
properties:
|
||||
text:
|
||||
type : "text"
|
||||
|
@ -21,7 +25,7 @@
|
|||
- do:
|
||||
index:
|
||||
index: testidx
|
||||
type: testtype
|
||||
type: _doc
|
||||
id: testing_document
|
||||
body: {"text" : "The quick brown fox is brown."}
|
||||
|
||||
|
@ -33,7 +37,6 @@
|
|||
"docs":
|
||||
-
|
||||
"_index" : "testidx"
|
||||
"_type" : "testtype"
|
||||
"_id" : "testing_document"
|
||||
"version" : 1
|
||||
"versionType" : "external"
|
||||
|
@ -46,7 +49,7 @@
|
|||
"docs":
|
||||
-
|
||||
"_index" : "testidx"
|
||||
"_type" : "testtype"
|
||||
"_type" : "_doc"
|
||||
"_id" : "testing_document"
|
||||
"version" : 1
|
||||
"_version_type" : "external"
|
||||
|
|
|
@ -0,0 +1,52 @@
|
|||
|
||||
---
|
||||
"Deprecated camel case and _ parameters should fail in Term Vectors query":
|
||||
|
||||
- skip:
|
||||
version: " - 6.99.99"
|
||||
reason: camel case and _ parameters (e.g. versionType, _version_type) should fail from 7.0
|
||||
features: "warnings"
|
||||
|
||||
- do:
|
||||
indices.create:
|
||||
index: testidx
|
||||
body:
|
||||
mappings:
|
||||
testtype:
|
||||
properties:
|
||||
text:
|
||||
type : "text"
|
||||
term_vector : "with_positions_offsets"
|
||||
|
||||
- do:
|
||||
index:
|
||||
index: testidx
|
||||
type: testtype
|
||||
id: testing_document
|
||||
body: {"text" : "The quick brown fox is brown."}
|
||||
|
||||
- do:
|
||||
catch: bad_request
|
||||
mtermvectors:
|
||||
"term_statistics" : true
|
||||
"body" :
|
||||
"docs":
|
||||
-
|
||||
"_index" : "testidx"
|
||||
"_type" : "testtype"
|
||||
"_id" : "testing_document"
|
||||
"version" : 1
|
||||
"versionType" : "external"
|
||||
|
||||
- do:
|
||||
catch: bad_request
|
||||
mtermvectors:
|
||||
"term_statistics" : true
|
||||
"body" :
|
||||
"docs":
|
||||
-
|
||||
"_index" : "testidx"
|
||||
"_type" : "testtype"
|
||||
"_id" : "testing_document"
|
||||
"version" : 1
|
||||
"_version_type" : "external"
|
|
@ -1,10 +1,14 @@
|
|||
setup:
|
||||
- skip:
|
||||
version: " - 6.99.99"
|
||||
reason: types are required in requests before 7.0.0
|
||||
|
||||
- do:
|
||||
indices.create:
|
||||
index: testidx
|
||||
body:
|
||||
mappings:
|
||||
testtype:
|
||||
_doc:
|
||||
"properties":
|
||||
"text":
|
||||
"type" : "text"
|
||||
|
@ -12,7 +16,7 @@ setup:
|
|||
- do:
|
||||
index:
|
||||
index: testidx
|
||||
type: testtype
|
||||
type: _doc
|
||||
id: testing_document
|
||||
body:
|
||||
"text" : "The quick brown fox is brown."
|
||||
|
@ -25,7 +29,6 @@ setup:
|
|||
- do:
|
||||
termvectors:
|
||||
index: testidx
|
||||
type: testtype
|
||||
id: testing_document
|
||||
"term_statistics" : true
|
||||
|
||||
|
|
|
@ -0,0 +1,35 @@
|
|||
setup:
|
||||
- do:
|
||||
indices.create:
|
||||
index: testidx
|
||||
body:
|
||||
mappings:
|
||||
testtype:
|
||||
"properties":
|
||||
"text":
|
||||
"type" : "text"
|
||||
"term_vector" : "with_positions_offsets"
|
||||
- do:
|
||||
index:
|
||||
index: testidx
|
||||
type: testtype
|
||||
id: testing_document
|
||||
body:
|
||||
"text" : "The quick brown fox is brown."
|
||||
- do:
|
||||
indices.refresh: {}
|
||||
|
||||
---
|
||||
"Basic tests for termvector get":
|
||||
|
||||
- do:
|
||||
termvectors:
|
||||
index: testidx
|
||||
type: testtype
|
||||
id: testing_document
|
||||
"term_statistics" : true
|
||||
|
||||
|
||||
- match: {term_vectors.text.field_statistics.sum_doc_freq: 5}
|
||||
- match: {term_vectors.text.terms.brown.doc_freq: 1}
|
||||
- match: {term_vectors.text.terms.brown.tokens.0.start_offset: 10}
|
|
@ -1,3 +1,9 @@
|
|||
setup:
|
||||
- skip:
|
||||
version: " - 6.99.99"
|
||||
reason: types are required in requests before 7.0.0
|
||||
|
||||
---
|
||||
"Term vector API should return 'found: false' for docs between index and refresh":
|
||||
- do:
|
||||
indices.create:
|
||||
|
@ -10,7 +16,7 @@
|
|||
number_of_replicas: 0
|
||||
refresh_interval: -1
|
||||
mappings:
|
||||
doc:
|
||||
_doc:
|
||||
properties:
|
||||
text:
|
||||
type : "text"
|
||||
|
@ -23,7 +29,7 @@
|
|||
- do:
|
||||
index:
|
||||
index: testidx
|
||||
type: doc
|
||||
type: _doc
|
||||
id: 1
|
||||
body:
|
||||
text : "foo bar"
|
||||
|
@ -31,11 +37,10 @@
|
|||
- do:
|
||||
termvectors:
|
||||
index: testidx
|
||||
type: doc
|
||||
id: 1
|
||||
realtime: false
|
||||
|
||||
- match: { _index: "testidx" }
|
||||
- match: { _type: "doc" }
|
||||
- match: { _type: "_doc" }
|
||||
- match: { _id: "1" }
|
||||
- is_false: found
|
||||
|
|
|
@ -0,0 +1,41 @@
|
|||
"Term vector API should return 'found: false' for docs between index and refresh":
|
||||
- do:
|
||||
indices.create:
|
||||
index: testidx
|
||||
body:
|
||||
settings:
|
||||
index:
|
||||
translog.flush_threshold_size: "512MB"
|
||||
number_of_shards: 1
|
||||
number_of_replicas: 0
|
||||
refresh_interval: -1
|
||||
mappings:
|
||||
doc:
|
||||
properties:
|
||||
text:
|
||||
type : "text"
|
||||
term_vector : "with_positions_offsets"
|
||||
|
||||
- do:
|
||||
cluster.health:
|
||||
wait_for_status: green
|
||||
|
||||
- do:
|
||||
index:
|
||||
index: testidx
|
||||
type: doc
|
||||
id: 1
|
||||
body:
|
||||
text : "foo bar"
|
||||
|
||||
- do:
|
||||
termvectors:
|
||||
index: testidx
|
||||
type: doc
|
||||
id: 1
|
||||
realtime: false
|
||||
|
||||
- match: { _index: "testidx" }
|
||||
- match: { _type: "doc" }
|
||||
- match: { _id: "1" }
|
||||
- is_false: found
|
|
@ -1,3 +1,8 @@
|
|||
setup:
|
||||
- skip:
|
||||
version: " - 6.99.99"
|
||||
reason: types are required in requests before 7.0.0
|
||||
|
||||
---
|
||||
"Realtime Term Vectors":
|
||||
|
||||
|
@ -17,14 +22,13 @@
|
|||
- do:
|
||||
index:
|
||||
index: test_1
|
||||
type: test
|
||||
type: _doc
|
||||
id: 1
|
||||
body: { foo: bar }
|
||||
|
||||
- do:
|
||||
termvectors:
|
||||
index: test_1
|
||||
type: test
|
||||
id: 1
|
||||
realtime: false
|
||||
|
||||
|
@ -33,7 +37,6 @@
|
|||
- do:
|
||||
termvectors:
|
||||
index: test_1
|
||||
type: test
|
||||
id: 1
|
||||
realtime: true
|
||||
|
||||
|
|
|
@ -0,0 +1,40 @@
|
|||
---
|
||||
"Realtime Term Vectors":
|
||||
|
||||
- do:
|
||||
indices.create:
|
||||
index: test_1
|
||||
body:
|
||||
settings:
|
||||
index:
|
||||
refresh_interval: -1
|
||||
number_of_replicas: 0
|
||||
|
||||
- do:
|
||||
cluster.health:
|
||||
wait_for_status: green
|
||||
|
||||
- do:
|
||||
index:
|
||||
index: test_1
|
||||
type: test
|
||||
id: 1
|
||||
body: { foo: bar }
|
||||
|
||||
- do:
|
||||
termvectors:
|
||||
index: test_1
|
||||
type: test
|
||||
id: 1
|
||||
realtime: false
|
||||
|
||||
- is_false: found
|
||||
|
||||
- do:
|
||||
termvectors:
|
||||
index: test_1
|
||||
type: test
|
||||
id: 1
|
||||
realtime: true
|
||||
|
||||
- is_true: found
|
|
@ -1,88 +0,0 @@
|
|||
---
|
||||
"Versions":
|
||||
|
||||
- do:
|
||||
index:
|
||||
index: test_1
|
||||
type: test
|
||||
id: 1
|
||||
body: { foo: bar }
|
||||
- match: { _version: 1}
|
||||
|
||||
- do:
|
||||
index:
|
||||
index: test_1
|
||||
type: test
|
||||
id: 1
|
||||
body: { foo: bar }
|
||||
- match: { _version: 2}
|
||||
|
||||
- do:
|
||||
get:
|
||||
index: test_1
|
||||
type: test
|
||||
id: 1
|
||||
version: 2
|
||||
- match: { _id: "1" }
|
||||
|
||||
- do:
|
||||
catch: conflict
|
||||
get:
|
||||
index: test_1
|
||||
type: test
|
||||
id: 1
|
||||
version: 1
|
||||
|
||||
- do:
|
||||
get:
|
||||
index: test_1
|
||||
type: test
|
||||
id: 1
|
||||
version: 2
|
||||
version_type: external
|
||||
- match: { _id: "1" }
|
||||
|
||||
- do:
|
||||
catch: conflict
|
||||
get:
|
||||
index: test_1
|
||||
type: test
|
||||
id: 1
|
||||
version: 10
|
||||
version_type: external
|
||||
|
||||
- do:
|
||||
catch: conflict
|
||||
get:
|
||||
index: test_1
|
||||
type: test
|
||||
id: 1
|
||||
version: 1
|
||||
version_type: external
|
||||
|
||||
- do:
|
||||
get:
|
||||
index: test_1
|
||||
type: test
|
||||
id: 1
|
||||
version: 2
|
||||
version_type: external_gte
|
||||
- match: { _id: "1" }
|
||||
|
||||
- do:
|
||||
catch: conflict
|
||||
get:
|
||||
index: test_1
|
||||
type: test
|
||||
id: 1
|
||||
version: 10
|
||||
version_type: external_gte
|
||||
|
||||
- do:
|
||||
catch: conflict
|
||||
get:
|
||||
index: test_1
|
||||
type: test
|
||||
id: 1
|
||||
version: 1
|
||||
version_type: external_gte
|
|
@ -322,4 +322,6 @@ if (isEclipse == false || project.path == ":server-tests") {
|
|||
dependsOn: test.dependsOn) {
|
||||
include '**/*IT.class'
|
||||
}
|
||||
check.dependsOn integTest
|
||||
integTest.mustRunAfter test
|
||||
}
|
||||
|
|
|
@ -0,0 +1 @@
|
|||
c93cb302ae55231fa31df9f0261218c7c28220f9
|
|
@ -1 +0,0 @@
|
|||
ada03def6399ef5606a77c93ee45514701b98987
|
|
@ -0,0 +1 @@
|
|||
472592ca23575c81e4d9afb5a96a4cd0011a1eec
|
|
@ -1 +0,0 @@
|
|||
c21b7cb3d2a3f34ea73b915cc15c67f203876ddf
|
|
@ -0,0 +1 @@
|
|||
4367ce6eeceb1aefd34909dd54f37e5ba1d19155
|
|
@ -1 +0,0 @@
|
|||
a6149ea94d695ebad4e5037f2926ca20c768777d
|
|
@ -0,0 +1 @@
|
|||
da31530f4b6e6af93d0bc48b5340f807dee4a674
|
|
@ -1 +0,0 @@
|
|||
88de707d0913f9240114091a22bc178627792de3
|
|
@ -0,0 +1 @@
|
|||
a518c74fed29756129bd9d269a4e91608dc1f18a
|
|
@ -1 +0,0 @@
|
|||
9812a19bdccd3646fde3db3ed53ce17c8ecd2c72
|
|
@ -0,0 +1 @@
|
|||
373ecf6b3d3327d39447fe5cbea4a7d39da45c96
|
|
@ -1 +0,0 @@
|
|||
9877d38f3f966352812888014b9dd0fcd861b418
|
|
@ -0,0 +1 @@
|
|||
6d06db3ebf77daf78d2e7bcddbc88939e3e4f209
|
|
@ -1 +0,0 @@
|
|||
2ae87d38ad6b9f349de1a14c9fa2bc36d1e1126e
|
|
@ -0,0 +1 @@
|
|||
69daf0820f3765f09cd6ac58b0bd735cd715b36d
|
|
@ -1 +0,0 @@
|
|||
cb167b153ee422e222b314fb1aacf07742079b18
|
|
@ -0,0 +1 @@
|
|||
ec1f69fab1272640930e837c52dd7e7ece9eac02
|
|
@ -1 +0,0 @@
|
|||
5461afee0210ce1d2e9336e0a3f94ea7da64e491
|
|
@ -0,0 +1 @@
|
|||
391c592312e63b9b3a524aaa7cd332fbd835a2d4
|
|
@ -1 +0,0 @@
|
|||
28fd369ca80e1bee4a9830723348363850f25f91
|
|
@ -0,0 +1 @@
|
|||
dbf82d3776adb6eedf79758b6f190dee177a4a48
|
|
@ -1 +0,0 @@
|
|||
7139424ecadad80df8127497f06d08d037c5e9cd
|
|
@ -0,0 +1 @@
|
|||
4be46166d77909b2cd77f4fcd799a6aa27653525
|
|
@ -1 +0,0 @@
|
|||
82f9b91f2e288af0b9cee8ccc561655f9d07ed70
|
|
@ -0,0 +1 @@
|
|||
c59ab3fead635dc8072b5baa8324f8449b66043d
|
|
@ -1 +0,0 @@
|
|||
add9ee3e5c59c0544c3c88a4c92695a630a20693
|
|
@ -0,0 +1 @@
|
|||
06b279804b2acf70b1261f5df2dfc0a5fb316654
|
|
@ -1 +0,0 @@
|
|||
1b926af192edb666840bf23cfb2d8e72fc7373e7
|
|
@ -0,0 +1 @@
|
|||
283aa39e821e030604efa261c414b78ddfa662d1
|
|
@ -1 +0,0 @@
|
|||
e1926831397ff98ac0c68b3632b5d3365ee5062b
|
|
@ -1,48 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
/*
|
||||
* A Simple class to handle wrapping a response with another response
|
||||
*/
|
||||
package org.elasticsearch.action.support;
|
||||
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.ActionResponse;
|
||||
|
||||
public abstract class DelegatingActionListener<Instigator extends ActionResponse, Delegated extends ActionResponse>
|
||||
implements ActionListener<Instigator> {
|
||||
|
||||
ActionListener<Delegated> delegatedActionListener;
|
||||
|
||||
protected DelegatingActionListener(final ActionListener<Delegated> listener) {
|
||||
this.delegatedActionListener = listener;
|
||||
}
|
||||
|
||||
protected abstract Delegated getDelegatedFromInstigator(Instigator response);
|
||||
|
||||
@Override
|
||||
public final void onResponse(Instigator response) {
|
||||
delegatedActionListener.onResponse(getDelegatedFromInstigator(response));
|
||||
}
|
||||
|
||||
@Override
|
||||
public final void onFailure(Exception e) {
|
||||
delegatedActionListener.onFailure(e);
|
||||
}
|
||||
}
|
|
@ -62,7 +62,10 @@ public class MultiTermVectorsResponse extends ActionResponse implements Iterable
|
|||
|
||||
/**
|
||||
* The type of the action.
|
||||
*
|
||||
* @deprecated Types are in the process of being removed.
|
||||
*/
|
||||
@Deprecated
|
||||
public String getType() {
|
||||
return type;
|
||||
}
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
|
||||
package org.elasticsearch.action.termvectors;
|
||||
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
import org.elasticsearch.ElasticsearchParseException;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.ActionRequestValidationException;
|
||||
|
@ -32,6 +33,7 @@ import org.elasticsearch.common.bytes.BytesArray;
|
|||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.logging.DeprecationLogger;
|
||||
import org.elasticsearch.common.lucene.uid.Versions;
|
||||
import org.elasticsearch.common.util.set.Sets;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
|
@ -39,6 +41,7 @@ import org.elasticsearch.common.xcontent.XContentHelper;
|
|||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.index.VersionType;
|
||||
import org.elasticsearch.rest.action.document.RestTermVectorsAction;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
|
@ -60,6 +63,8 @@ import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
|
|||
* required.
|
||||
*/
|
||||
public class TermVectorsRequest extends SingleShardRequest<TermVectorsRequest> implements RealtimeRequest {
|
||||
private static final DeprecationLogger deprecationLogger = new DeprecationLogger(
|
||||
LogManager.getLogger(TermVectorsRequest.class));
|
||||
|
||||
private static final ParseField INDEX = new ParseField("_index");
|
||||
private static final ParseField TYPE = new ParseField("_type");
|
||||
|
@ -621,6 +626,7 @@ public class TermVectorsRequest extends SingleShardRequest<TermVectorsRequest> i
|
|||
termVectorsRequest.index = parser.text();
|
||||
} else if (TYPE.match(currentFieldName, parser.getDeprecationHandler())) {
|
||||
termVectorsRequest.type = parser.text();
|
||||
deprecationLogger.deprecated(RestTermVectorsAction.TYPES_DEPRECATION_MESSAGE);
|
||||
} else if (ID.match(currentFieldName, parser.getDeprecationHandler())) {
|
||||
if (termVectorsRequest.doc != null) {
|
||||
throw new ElasticsearchParseException("failed to parse term vectors request. " +
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue