Merge remote-tracking branch 'es/7.x' into enrich-7.x
This commit is contained in:
commit
b66ad34565
|
@ -24,7 +24,7 @@ mainClassName = 'org.openjdk.jmh.Main'
|
|||
assemble.enabled = false
|
||||
archivesBaseName = 'elasticsearch-benchmarks'
|
||||
|
||||
unitTest.enabled = false
|
||||
test.enabled = false
|
||||
|
||||
dependencies {
|
||||
compile("org.elasticsearch:elasticsearch:${version}") {
|
||||
|
|
|
@ -199,11 +199,11 @@ if (project != rootProject) {
|
|||
into localDownloads
|
||||
}
|
||||
|
||||
unitTest {
|
||||
test {
|
||||
// The test task is configured to runtimeJava version, but build-tools doesn't support all of them, so test
|
||||
// with compiler instead on the ones that are too old.
|
||||
if (project.runtimeJavaVersion <= JavaVersion.VERSION_1_10) {
|
||||
jvm = "${project.compilerJavaHome}/bin/java"
|
||||
executable = "${project.compilerJavaHome}/bin/java"
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -215,8 +215,6 @@ if (project != rootProject) {
|
|||
}
|
||||
dependsOn setupLocalDownloads
|
||||
exclude "**/*Tests.class"
|
||||
testClassesDirs = sourceSets.test.output.classesDirs
|
||||
classpath = sourceSets.test.runtimeClasspath
|
||||
inputs.dir(file("src/testKit"))
|
||||
// tell BuildExamplePluginsIT where to find the example plugins
|
||||
systemProperty (
|
||||
|
@ -232,11 +230,7 @@ if (project != rootProject) {
|
|||
if (isLuceneSnapshot) {
|
||||
systemProperty 'test.lucene-snapshot-revision', isLuceneSnapshot[0][1]
|
||||
}
|
||||
String defaultParallel = System.getProperty('tests.jvms', project.rootProject.ext.defaultParallel)
|
||||
if (defaultParallel == "auto") {
|
||||
defaultParallel = Math.max(Runtime.getRuntime().availableProcessors(), 4)
|
||||
}
|
||||
maxParallelForks defaultParallel as Integer
|
||||
maxParallelForks System.getProperty('tests.jvms', project.rootProject.ext.defaultParallel.toString()) as Integer
|
||||
}
|
||||
check.dependsOn(integTest)
|
||||
|
||||
|
|
|
@ -1,53 +0,0 @@
|
|||
package com.carrotsearch.gradle.junit4
|
||||
|
||||
import com.carrotsearch.ant.tasks.junit4.SuiteBalancer
|
||||
import com.carrotsearch.ant.tasks.junit4.balancers.ExecutionTimeBalancer
|
||||
import com.carrotsearch.ant.tasks.junit4.listeners.ExecutionTimesReport
|
||||
import org.apache.tools.ant.types.FileSet
|
||||
|
||||
class BalancersConfiguration {
|
||||
// parent task, so executionTime can register an additional listener
|
||||
RandomizedTestingTask task
|
||||
List<SuiteBalancer> balancers = new ArrayList<>()
|
||||
|
||||
void executionTime(Map<String,Object> properties) {
|
||||
ExecutionTimeBalancer balancer = new ExecutionTimeBalancer()
|
||||
|
||||
FileSet fileSet = new FileSet()
|
||||
Object filename = properties.remove('cacheFilename')
|
||||
if (filename == null) {
|
||||
throw new IllegalArgumentException('cacheFilename is required for executionTime balancer')
|
||||
}
|
||||
fileSet.setIncludes(filename.toString())
|
||||
|
||||
File cacheDir = task.project.projectDir
|
||||
Object dir = properties.remove('cacheDir')
|
||||
if (dir != null) {
|
||||
cacheDir = new File(dir.toString())
|
||||
}
|
||||
fileSet.setDir(cacheDir)
|
||||
balancer.add(fileSet)
|
||||
|
||||
int historySize = 10
|
||||
Object size = properties.remove('historySize')
|
||||
if (size instanceof Integer) {
|
||||
historySize = (Integer)size
|
||||
} else if (size != null) {
|
||||
throw new IllegalArgumentException('historySize must be an integer')
|
||||
}
|
||||
ExecutionTimesReport listener = new ExecutionTimesReport()
|
||||
listener.setFile(new File(cacheDir, filename.toString()))
|
||||
listener.setHistoryLength(historySize)
|
||||
|
||||
if (properties.isEmpty() == false) {
|
||||
throw new IllegalArgumentException('Unknown properties for executionTime balancer: ' + properties.keySet())
|
||||
}
|
||||
|
||||
task.listenersConfig.listeners.add(listener)
|
||||
balancers.add(balancer)
|
||||
}
|
||||
|
||||
void custom(SuiteBalancer balancer) {
|
||||
balancers.add(balancer)
|
||||
}
|
||||
}
|
|
@ -1,25 +0,0 @@
|
|||
package com.carrotsearch.gradle.junit4
|
||||
|
||||
import com.carrotsearch.ant.tasks.junit4.listeners.AggregatedEventListener
|
||||
import com.carrotsearch.ant.tasks.junit4.listeners.antxml.AntXmlReport
|
||||
|
||||
|
||||
class ListenersConfiguration {
|
||||
RandomizedTestingTask task
|
||||
List<AggregatedEventListener> listeners = new ArrayList<>()
|
||||
|
||||
void junitReport(Map<String, Object> props) {
|
||||
AntXmlReport reportListener = new AntXmlReport()
|
||||
Object dir = props == null ? null : props.get('dir')
|
||||
if (dir != null) {
|
||||
reportListener.setDir(task.project.file(dir))
|
||||
} else {
|
||||
reportListener.setDir(new File(task.project.buildDir, 'reports' + File.separator + "${task.name}Junit"))
|
||||
}
|
||||
listeners.add(reportListener)
|
||||
}
|
||||
|
||||
void custom(AggregatedEventListener listener) {
|
||||
listeners.add(listener)
|
||||
}
|
||||
}
|
|
@ -1,60 +0,0 @@
|
|||
package com.carrotsearch.gradle.junit4
|
||||
|
||||
import com.carrotsearch.ant.tasks.junit4.JUnit4
|
||||
import org.gradle.api.Plugin
|
||||
import org.gradle.api.Project
|
||||
import org.gradle.api.tasks.TaskContainer
|
||||
|
||||
class RandomizedTestingPlugin implements Plugin<Project> {
|
||||
|
||||
void apply(Project project) {
|
||||
String seed = setupSeed(project)
|
||||
createUnitTestTask(project.tasks)
|
||||
configureAnt(project.ant, seed)
|
||||
}
|
||||
|
||||
/**
|
||||
* Pins the test seed at configuration time so it isn't different on every
|
||||
* {@link RandomizedTestingTask} execution. This is useful if random
|
||||
* decisions in one run of {@linkplain RandomizedTestingTask} influence the
|
||||
* outcome of subsequent runs. Pinning the seed up front like this makes
|
||||
* the reproduction line from one run be useful on another run.
|
||||
*/
|
||||
static String setupSeed(Project project) {
|
||||
if (project.rootProject.ext.has('testSeed')) {
|
||||
/* Skip this if we've already pinned the testSeed. It is important
|
||||
* that this checks the rootProject so that we know we've only ever
|
||||
* initialized one time. */
|
||||
return project.rootProject.ext.testSeed
|
||||
}
|
||||
String testSeed = System.getProperty('tests.seed')
|
||||
if (testSeed == null) {
|
||||
long seed = new Random(System.currentTimeMillis()).nextLong()
|
||||
testSeed = Long.toUnsignedString(seed, 16).toUpperCase(Locale.ROOT)
|
||||
}
|
||||
/* Set the testSeed on the root project first so other projects can use
|
||||
* it during initialization. */
|
||||
project.rootProject.ext.testSeed = testSeed
|
||||
project.rootProject.subprojects {
|
||||
project.ext.testSeed = testSeed
|
||||
}
|
||||
|
||||
return testSeed
|
||||
}
|
||||
|
||||
static void createUnitTestTask(TaskContainer tasks) {
|
||||
// only create a unitTest task if the `test` task exists as some project don't make use of it.
|
||||
tasks.matching { it.name == "test" }.all {
|
||||
// We don't want to run any tests with the Gradle test runner since we add our own randomized runner
|
||||
it.enabled = false
|
||||
RandomizedTestingTask unitTest = tasks.create('unitTest', RandomizedTestingTask)
|
||||
unitTest.description = 'Runs unit tests with the randomized testing framework'
|
||||
it.dependsOn unitTest
|
||||
}
|
||||
}
|
||||
|
||||
static void configureAnt(AntBuilder ant, String seed) {
|
||||
ant.project.addTaskDefinition('junit4:junit4', JUnit4.class)
|
||||
ant.properties.put('tests.seed', seed)
|
||||
}
|
||||
}
|
|
@ -1,330 +0,0 @@
|
|||
package com.carrotsearch.gradle.junit4
|
||||
|
||||
import com.carrotsearch.ant.tasks.junit4.ListenersList
|
||||
import com.carrotsearch.ant.tasks.junit4.listeners.AggregatedEventListener
|
||||
import groovy.xml.NamespaceBuilder
|
||||
import groovy.xml.NamespaceBuilderSupport
|
||||
import org.apache.tools.ant.BuildException
|
||||
import org.apache.tools.ant.DefaultLogger
|
||||
import org.apache.tools.ant.Project
|
||||
import org.apache.tools.ant.RuntimeConfigurable
|
||||
import org.apache.tools.ant.UnknownElement
|
||||
import org.elasticsearch.gradle.BuildPlugin
|
||||
import org.gradle.api.DefaultTask
|
||||
import org.gradle.api.InvalidUserDataException
|
||||
import org.gradle.api.file.FileCollection
|
||||
import org.gradle.api.file.FileTreeElement
|
||||
import org.gradle.api.specs.Spec
|
||||
import org.gradle.api.tasks.Input
|
||||
import org.gradle.api.tasks.InputDirectory
|
||||
import org.gradle.api.tasks.Optional
|
||||
import org.gradle.api.tasks.TaskAction
|
||||
import org.gradle.api.tasks.options.Option
|
||||
import org.gradle.api.tasks.util.PatternFilterable
|
||||
import org.gradle.api.tasks.util.PatternSet
|
||||
import org.gradle.internal.logging.progress.ProgressLoggerFactory
|
||||
import org.gradle.util.ConfigureUtil
|
||||
|
||||
import javax.inject.Inject
|
||||
|
||||
class RandomizedTestingTask extends DefaultTask {
|
||||
|
||||
// TODO: change to "executable" to match gradle test params?
|
||||
@Optional
|
||||
@Input
|
||||
String jvm = 'java'
|
||||
|
||||
@Optional
|
||||
@Input
|
||||
File workingDir = new File(project.buildDir, 'testrun' + File.separator + name)
|
||||
|
||||
@Optional
|
||||
@Input
|
||||
FileCollection classpath
|
||||
|
||||
@Input
|
||||
String parallelism = '1'
|
||||
|
||||
@Input
|
||||
FileCollection testClassesDirs
|
||||
|
||||
@Optional
|
||||
@Input
|
||||
boolean haltOnFailure = true
|
||||
|
||||
@Optional
|
||||
@Input
|
||||
boolean shuffleOnSlave = true
|
||||
|
||||
@Optional
|
||||
@Input
|
||||
boolean enableAssertions = true
|
||||
|
||||
@Optional
|
||||
@Input
|
||||
boolean enableSystemAssertions = true
|
||||
|
||||
@Optional
|
||||
@Input
|
||||
boolean leaveTemporary = false
|
||||
|
||||
@Optional
|
||||
@Input
|
||||
String ifNoTests = 'ignore'
|
||||
|
||||
@Optional
|
||||
@Input
|
||||
String onNonEmptyWorkDirectory = 'fail'
|
||||
|
||||
TestLoggingConfiguration testLoggingConfig = new TestLoggingConfiguration()
|
||||
|
||||
BalancersConfiguration balancersConfig = new BalancersConfiguration(task: this)
|
||||
ListenersConfiguration listenersConfig = new ListenersConfiguration(task: this)
|
||||
|
||||
List<String> jvmArgs = new ArrayList<>()
|
||||
|
||||
@Optional
|
||||
@Input
|
||||
String argLine = null
|
||||
|
||||
Map<String, Object> systemProperties = new HashMap<>()
|
||||
Map<String, Object> environmentVariables = new HashMap<>()
|
||||
PatternFilterable patternSet = new PatternSet()
|
||||
|
||||
RandomizedTestingTask() {
|
||||
outputs.upToDateWhen {false} // randomized tests are never up to date
|
||||
listenersConfig.listeners.add(new TestProgressLogger(factory: getProgressLoggerFactory()))
|
||||
listenersConfig.listeners.add(new TestReportLogger(logger: logger, config: testLoggingConfig))
|
||||
}
|
||||
|
||||
@Inject
|
||||
ProgressLoggerFactory getProgressLoggerFactory() {
|
||||
throw new UnsupportedOperationException()
|
||||
}
|
||||
|
||||
void jvmArgs(Iterable<String> arguments) {
|
||||
jvmArgs.addAll(arguments)
|
||||
}
|
||||
|
||||
void jvmArg(String argument) {
|
||||
jvmArgs.add(argument)
|
||||
}
|
||||
|
||||
void systemProperty(String property, Object value) {
|
||||
systemProperties.put(property, value)
|
||||
}
|
||||
|
||||
void environment(String key, Object value) {
|
||||
environmentVariables.put(key, value)
|
||||
}
|
||||
|
||||
void include(String... includes) {
|
||||
this.patternSet.include(includes);
|
||||
}
|
||||
|
||||
void include(Iterable<String> includes) {
|
||||
this.patternSet.include(includes);
|
||||
}
|
||||
|
||||
void include(Spec<FileTreeElement> includeSpec) {
|
||||
this.patternSet.include(includeSpec);
|
||||
}
|
||||
|
||||
void include(Closure includeSpec) {
|
||||
this.patternSet.include(includeSpec);
|
||||
}
|
||||
|
||||
void exclude(String... excludes) {
|
||||
this.patternSet.exclude(excludes);
|
||||
}
|
||||
|
||||
void exclude(Iterable<String> excludes) {
|
||||
this.patternSet.exclude(excludes);
|
||||
}
|
||||
|
||||
void exclude(Spec<FileTreeElement> excludeSpec) {
|
||||
this.patternSet.exclude(excludeSpec);
|
||||
}
|
||||
|
||||
void exclude(Closure excludeSpec) {
|
||||
this.patternSet.exclude(excludeSpec);
|
||||
}
|
||||
|
||||
@Input
|
||||
void testLogging(Closure closure) {
|
||||
ConfigureUtil.configure(closure, testLoggingConfig)
|
||||
}
|
||||
|
||||
@Input
|
||||
void balancers(Closure closure) {
|
||||
ConfigureUtil.configure(closure, balancersConfig)
|
||||
}
|
||||
|
||||
@Input
|
||||
void listeners(Closure closure) {
|
||||
ConfigureUtil.configure(closure, listenersConfig)
|
||||
}
|
||||
|
||||
@Option(
|
||||
option = "tests",
|
||||
description = "Sets test class or method name to be included. This is for IDEs. Use -Dtests.class and -Dtests.method"
|
||||
)
|
||||
void setTestNameIncludePattern(String testNamePattern) {
|
||||
// This is only implemented to give support for IDEs running tests. There are 3 patterns expected:
|
||||
// * An exact test class and method
|
||||
// * An exact test class
|
||||
// * A package name prefix, ending with .*
|
||||
// There is no way to distinguish the first two without looking at classes, so we use the rule
|
||||
// that class names start with an uppercase letter...
|
||||
// TODO: this doesn't work yet, but not sure why...intellij says it is using --tests, and this work from the command line...
|
||||
String[] parts = testNamePattern.split('\\.')
|
||||
String lastPart = parts[parts.length - 1]
|
||||
String classname
|
||||
String methodname = null
|
||||
if (lastPart.equals('*') || lastPart.charAt(0).isUpperCase()) {
|
||||
// package name or class name, just pass through
|
||||
classname = testNamePattern
|
||||
} else {
|
||||
// method name, need to separate
|
||||
methodname = lastPart
|
||||
classname = testNamePattern.substring(0, testNamePattern.length() - lastPart.length() - 1)
|
||||
}
|
||||
ant.setProperty('tests.class', classname)
|
||||
if (methodname != null) {
|
||||
ant.setProperty('tests.method', methodname)
|
||||
}
|
||||
}
|
||||
|
||||
@TaskAction
|
||||
void executeTests() {
|
||||
Map attributes = [
|
||||
jvm: jvm,
|
||||
parallelism: parallelism,
|
||||
heartbeat: testLoggingConfig.slowTests.heartbeat,
|
||||
dir: workingDir,
|
||||
tempdir: new File(workingDir, 'temp'),
|
||||
haltOnFailure: true, // we want to capture when a build failed, but will decide whether to rethrow later
|
||||
shuffleOnSlave: shuffleOnSlave,
|
||||
leaveTemporary: leaveTemporary,
|
||||
ifNoTests: ifNoTests,
|
||||
onNonEmptyWorkDirectory: onNonEmptyWorkDirectory,
|
||||
newenvironment: true
|
||||
]
|
||||
|
||||
DefaultLogger listener = null
|
||||
ByteArrayOutputStream antLoggingBuffer = null
|
||||
if (logger.isInfoEnabled() == false) {
|
||||
// in info logging, ant already outputs info level, so we see everything
|
||||
// but on errors or when debugging, we want to see info level messages
|
||||
// because junit4 emits jvm output with ant logging
|
||||
if (testLoggingConfig.outputMode == TestLoggingConfiguration.OutputMode.ALWAYS) {
|
||||
// we want all output, so just stream directly
|
||||
listener = new DefaultLogger(
|
||||
errorPrintStream: System.err,
|
||||
outputPrintStream: System.out,
|
||||
messageOutputLevel: Project.MSG_INFO)
|
||||
} else {
|
||||
// we want to buffer the info, and emit it if the test fails
|
||||
antLoggingBuffer = new ByteArrayOutputStream()
|
||||
PrintStream stream = new PrintStream(antLoggingBuffer, true, "UTF-8")
|
||||
listener = new DefaultLogger(
|
||||
errorPrintStream: stream,
|
||||
outputPrintStream: stream,
|
||||
messageOutputLevel: Project.MSG_INFO)
|
||||
}
|
||||
project.ant.project.addBuildListener(listener)
|
||||
}
|
||||
|
||||
NamespaceBuilderSupport junit4 = NamespaceBuilder.newInstance(ant, 'junit4')
|
||||
try {
|
||||
junit4.junit4(attributes) {
|
||||
classpath {
|
||||
pathElement(path: classpath.asPath)
|
||||
}
|
||||
if (enableAssertions) {
|
||||
jvmarg(value: '-ea')
|
||||
}
|
||||
if (enableSystemAssertions) {
|
||||
jvmarg(value: '-esa')
|
||||
}
|
||||
for (String arg : jvmArgs) {
|
||||
jvmarg(value: arg)
|
||||
}
|
||||
if (argLine != null) {
|
||||
jvmarg(line: argLine)
|
||||
}
|
||||
testClassesDirs.each { testClassDir ->
|
||||
fileset(dir: testClassDir) {
|
||||
patternSet.getIncludes().each { include(name: it) }
|
||||
patternSet.getExcludes().each { exclude(name: it) }
|
||||
}
|
||||
}
|
||||
for (Map.Entry<String, Object> prop : systemProperties) {
|
||||
if (prop.getKey().equals('tests.seed')) {
|
||||
throw new InvalidUserDataException('Seed should be ' +
|
||||
'set on the project instead of a system property')
|
||||
}
|
||||
if (prop.getValue() instanceof Closure) {
|
||||
sysproperty key: prop.getKey(), value: (prop.getValue() as Closure).call().toString()
|
||||
} else {
|
||||
sysproperty key: prop.getKey(), value: prop.getValue().toString()
|
||||
}
|
||||
}
|
||||
systemProperty 'tests.seed', project.testSeed
|
||||
for (Map.Entry<String, Object> envvar : environmentVariables) {
|
||||
env key: envvar.getKey(), value: envvar.getValue().toString()
|
||||
}
|
||||
makeListeners()
|
||||
}
|
||||
} catch (BuildException e) {
|
||||
if (antLoggingBuffer != null) {
|
||||
logger.error('JUnit4 test failed, ant output was:')
|
||||
logger.error(antLoggingBuffer.toString('UTF-8'))
|
||||
}
|
||||
if (haltOnFailure) {
|
||||
throw e;
|
||||
}
|
||||
}
|
||||
|
||||
if (listener != null) {
|
||||
// remove the listener we added so other ant tasks dont have verbose logging!
|
||||
project.ant.project.removeBuildListener(listener)
|
||||
}
|
||||
}
|
||||
|
||||
static class ListenersElement extends UnknownElement {
|
||||
AggregatedEventListener[] listeners
|
||||
|
||||
ListenersElement() {
|
||||
super('listeners')
|
||||
setNamespace('junit4')
|
||||
setQName('listeners')
|
||||
}
|
||||
|
||||
public void handleChildren(Object realThing, RuntimeConfigurable wrapper) {
|
||||
assert realThing instanceof ListenersList
|
||||
ListenersList list = (ListenersList)realThing
|
||||
|
||||
for (AggregatedEventListener listener : listeners) {
|
||||
list.addConfigured(listener)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Makes an ant xml element for 'listeners' just as AntBuilder would, except configuring
|
||||
* the element adds the already created children.
|
||||
*/
|
||||
def makeListeners() {
|
||||
def context = ant.getAntXmlContext()
|
||||
def parentWrapper = context.currentWrapper()
|
||||
def parent = parentWrapper.getProxy()
|
||||
UnknownElement element = new ListenersElement(listeners: listenersConfig.listeners)
|
||||
element.setProject(context.getProject())
|
||||
element.setRealThing(logger)
|
||||
((UnknownElement)parent).addChild(element)
|
||||
RuntimeConfigurable wrapper = new RuntimeConfigurable(element, element.getQName())
|
||||
parentWrapper.addChild(wrapper)
|
||||
return wrapper.getProxy()
|
||||
}
|
||||
}
|
|
@ -1,14 +0,0 @@
|
|||
package com.carrotsearch.gradle.junit4
|
||||
|
||||
class SlowTestsConfiguration {
|
||||
int heartbeat = 0
|
||||
int summarySize = 0
|
||||
|
||||
void heartbeat(int heartbeat) {
|
||||
this.heartbeat = heartbeat
|
||||
}
|
||||
|
||||
void summarySize(int summarySize) {
|
||||
this.summarySize = summarySize
|
||||
}
|
||||
}
|
|
@ -1,14 +0,0 @@
|
|||
package com.carrotsearch.gradle.junit4
|
||||
|
||||
class StackTraceFiltersConfiguration {
|
||||
List<String> patterns = new ArrayList<>()
|
||||
List<String> contains = new ArrayList<>()
|
||||
|
||||
void regex(String pattern) {
|
||||
patterns.add(pattern)
|
||||
}
|
||||
|
||||
void contains(String contain) {
|
||||
contains.add(contain)
|
||||
}
|
||||
}
|
|
@ -1,43 +0,0 @@
|
|||
package com.carrotsearch.gradle.junit4
|
||||
|
||||
import org.gradle.api.tasks.Input
|
||||
import org.gradle.util.ConfigureUtil
|
||||
|
||||
class TestLoggingConfiguration {
|
||||
/** Display mode for output streams. */
|
||||
static enum OutputMode {
|
||||
/** Always display the output emitted from tests. */
|
||||
ALWAYS,
|
||||
/**
|
||||
* Display the output only if a test/ suite failed. This requires internal buffering
|
||||
* so the output will be shown only after a test completes.
|
||||
*/
|
||||
ONERROR,
|
||||
/** Don't display the output, even on test failures. */
|
||||
NEVER
|
||||
}
|
||||
|
||||
OutputMode outputMode = OutputMode.ONERROR
|
||||
SlowTestsConfiguration slowTests = new SlowTestsConfiguration()
|
||||
StackTraceFiltersConfiguration stackTraceFilters = new StackTraceFiltersConfiguration()
|
||||
|
||||
/** Summarize the first N failures at the end of the test. */
|
||||
@Input
|
||||
int showNumFailuresAtEnd = 3 // match TextReport default
|
||||
|
||||
void slowTests(Closure closure) {
|
||||
ConfigureUtil.configure(closure, slowTests)
|
||||
}
|
||||
|
||||
void stackTraceFilters(Closure closure) {
|
||||
ConfigureUtil.configure(closure, stackTraceFilters)
|
||||
}
|
||||
|
||||
void outputMode(String mode) {
|
||||
outputMode = mode.toUpperCase() as OutputMode
|
||||
}
|
||||
|
||||
void showNumFailuresAtEnd(int n) {
|
||||
showNumFailuresAtEnd = n
|
||||
}
|
||||
}
|
|
@ -1,193 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package com.carrotsearch.gradle.junit4
|
||||
|
||||
import com.carrotsearch.ant.tasks.junit4.JUnit4
|
||||
import com.carrotsearch.ant.tasks.junit4.dependencies.com.google.common.eventbus.Subscribe
|
||||
import com.carrotsearch.ant.tasks.junit4.events.TestStartedEvent
|
||||
import com.carrotsearch.ant.tasks.junit4.events.aggregated.AggregatedQuitEvent
|
||||
import com.carrotsearch.ant.tasks.junit4.events.aggregated.AggregatedStartEvent
|
||||
import com.carrotsearch.ant.tasks.junit4.events.aggregated.AggregatedSuiteResultEvent
|
||||
import com.carrotsearch.ant.tasks.junit4.events.aggregated.AggregatedSuiteStartedEvent
|
||||
import com.carrotsearch.ant.tasks.junit4.events.aggregated.AggregatedTestResultEvent
|
||||
import com.carrotsearch.ant.tasks.junit4.events.aggregated.ChildBootstrap
|
||||
import com.carrotsearch.ant.tasks.junit4.events.aggregated.HeartBeatEvent
|
||||
import com.carrotsearch.ant.tasks.junit4.listeners.AggregatedEventListener
|
||||
import org.gradle.internal.logging.progress.ProgressLogger
|
||||
import org.gradle.internal.logging.progress.ProgressLoggerFactory
|
||||
import org.junit.runner.Description
|
||||
|
||||
import static com.carrotsearch.ant.tasks.junit4.FormattingUtils.formatDurationInSeconds
|
||||
import static com.carrotsearch.ant.tasks.junit4.events.aggregated.TestStatus.ERROR
|
||||
import static com.carrotsearch.ant.tasks.junit4.events.aggregated.TestStatus.FAILURE
|
||||
import static com.carrotsearch.ant.tasks.junit4.events.aggregated.TestStatus.IGNORED
|
||||
import static com.carrotsearch.ant.tasks.junit4.events.aggregated.TestStatus.IGNORED_ASSUMPTION
|
||||
import static com.carrotsearch.ant.tasks.junit4.events.aggregated.TestStatus.OK
|
||||
|
||||
/**
|
||||
* Adapts junit4's event listeners into gradle's ProgressLogger. Note that
|
||||
* junit4 guarantees (via guava) that methods on this class won't be called by
|
||||
* multiple threads simultaneously which is helpful in making it simpler.
|
||||
*
|
||||
* Every time a test finishes this class will update the logger. It will log
|
||||
* the last finished test method on the logger line until the first suite
|
||||
* finishes. Once the first suite finishes it always logs the last finished
|
||||
* suite. This means that in test runs with a single suite the logger will be
|
||||
* updated with the test name the whole time which is useful because these runs
|
||||
* usually have longer individual tests. For test runs with lots of suites the
|
||||
* majority of the time is spent showing the last suite that finished which is
|
||||
* more useful for those test runs because test methods there tend to be very
|
||||
* quick.
|
||||
*/
|
||||
class TestProgressLogger implements AggregatedEventListener {
|
||||
/** Factory to build a progress logger when testing starts */
|
||||
ProgressLoggerFactory factory
|
||||
ProgressLogger parentProgressLogger
|
||||
ProgressLogger suiteLogger
|
||||
ProgressLogger testLogger
|
||||
ProgressLogger[] slaveLoggers
|
||||
int totalSuites
|
||||
int totalSlaves
|
||||
|
||||
// Counters incremented test completion.
|
||||
volatile int suitesCompleted = 0
|
||||
volatile int testsCompleted = 0
|
||||
volatile int testsFailed = 0
|
||||
volatile int testsIgnored = 0
|
||||
|
||||
@Subscribe
|
||||
void onStart(AggregatedStartEvent e) throws IOException {
|
||||
totalSuites = e.suiteCount
|
||||
totalSlaves = e.slaveCount
|
||||
parentProgressLogger = factory.newOperation(TestProgressLogger)
|
||||
parentProgressLogger.setDescription('Randomized test runner')
|
||||
parentProgressLogger.started()
|
||||
|
||||
suiteLogger = factory.newOperation(TestProgressLogger, parentProgressLogger)
|
||||
suiteLogger.setDescription('Suite logger')
|
||||
suiteLogger.started("Suites: 0/" + totalSuites)
|
||||
testLogger = factory.newOperation(TestProgressLogger, parentProgressLogger)
|
||||
testLogger.setDescription('Test logger')
|
||||
testLogger.started('Tests: completed: 0, failed: 0, ignored: 0')
|
||||
slaveLoggers = new ProgressLogger[e.slaveCount]
|
||||
for (int i = 0; i < e.slaveCount; ++i) {
|
||||
slaveLoggers[i] = factory.newOperation(TestProgressLogger, parentProgressLogger)
|
||||
slaveLoggers[i].setDescription("J${i} test logger")
|
||||
slaveLoggers[i].started("J${i}: initializing...")
|
||||
}
|
||||
}
|
||||
|
||||
@Subscribe
|
||||
void onChildBootstrap(ChildBootstrap e) throws IOException {
|
||||
slaveLoggers[e.getSlave().id].progress("J${e.slave.id}: starting (pid ${e.slave.pidString})")
|
||||
}
|
||||
|
||||
@Subscribe
|
||||
void onQuit(AggregatedQuitEvent e) throws IOException {
|
||||
// if onStart was never called (eg no matching tests), suiteLogger and all the other loggers will be null
|
||||
if (suiteLogger != null) {
|
||||
suiteLogger.completed()
|
||||
testLogger.completed()
|
||||
for (ProgressLogger slaveLogger : slaveLoggers) {
|
||||
slaveLogger.completed()
|
||||
}
|
||||
parentProgressLogger.completed()
|
||||
}
|
||||
}
|
||||
|
||||
@Subscribe
|
||||
void onSuiteStart(AggregatedSuiteStartedEvent e) throws IOException {
|
||||
String suiteName = simpleName(e.suiteStartedEvent.description)
|
||||
slaveLoggers[e.slave.id].progress("J${e.slave.id}: ${suiteName} - initializing")
|
||||
}
|
||||
|
||||
@Subscribe
|
||||
void onSuiteResult(AggregatedSuiteResultEvent e) throws IOException {
|
||||
suitesCompleted++
|
||||
suiteLogger.progress("Suites: " + suitesCompleted + "/" + totalSuites)
|
||||
}
|
||||
|
||||
@Subscribe
|
||||
void onTestResult(AggregatedTestResultEvent e) throws IOException {
|
||||
String statusMessage
|
||||
testsCompleted++
|
||||
switch (e.status) {
|
||||
case ERROR:
|
||||
case FAILURE:
|
||||
testsFailed++
|
||||
statusMessage = "failed"
|
||||
break
|
||||
case IGNORED:
|
||||
case IGNORED_ASSUMPTION:
|
||||
testsIgnored++
|
||||
statusMessage = "ignored"
|
||||
break
|
||||
case OK:
|
||||
String time = formatDurationInSeconds(e.executionTime)
|
||||
statusMessage = "completed [${time}]"
|
||||
break
|
||||
default:
|
||||
throw new IllegalArgumentException("Unknown test status: [${e.status}]")
|
||||
}
|
||||
testLogger.progress("Tests: completed: ${testsCompleted}, failed: ${testsFailed}, ignored: ${testsIgnored}")
|
||||
String testName = testName(e.description)
|
||||
slaveLoggers[e.slave.id].progress("J${e.slave.id}: ${testName} ${statusMessage}")
|
||||
}
|
||||
|
||||
@Subscribe
|
||||
void onTestStarted(TestStartedEvent e) throws IOException {
|
||||
String testName = testName(e.description)
|
||||
slaveLoggers[e.slave.id].progress("J${e.slave.id}: ${testName} ...")
|
||||
}
|
||||
|
||||
@Subscribe
|
||||
void onHeartbeat(HeartBeatEvent e) throws IOException {
|
||||
String testName = testName(e.description)
|
||||
String time = formatDurationInSeconds(e.getNoEventDuration())
|
||||
slaveLoggers[e.slave.id].progress("J${e.slave.id}: ${testName} stalled for ${time}")
|
||||
}
|
||||
|
||||
/**
|
||||
* Build the test name in the format of <className>.<methodName>
|
||||
*/
|
||||
private static String testName(Description description) {
|
||||
String className = simpleName(description)
|
||||
if (description == null) {
|
||||
return className + "." + "<unknownMethod>"
|
||||
}
|
||||
return className + "." + description.methodName
|
||||
}
|
||||
|
||||
/**
|
||||
* Extract a Class#getSimpleName style name from Class#getName style
|
||||
* string. We can't just use Class#getSimpleName because junit descriptions
|
||||
* don't always set the class field but they always set the className
|
||||
* field.
|
||||
*/
|
||||
private static String simpleName(Description description) {
|
||||
if (description == null) {
|
||||
return "<unknownClass>"
|
||||
}
|
||||
return description.className.substring(description.className.lastIndexOf('.') + 1)
|
||||
}
|
||||
|
||||
@Override
|
||||
void setOuter(JUnit4 junit) {}
|
||||
}
|
|
@ -1,369 +0,0 @@
|
|||
package com.carrotsearch.gradle.junit4
|
||||
|
||||
import com.carrotsearch.ant.tasks.junit4.JUnit4
|
||||
import com.carrotsearch.ant.tasks.junit4.Pluralize
|
||||
import com.carrotsearch.ant.tasks.junit4.TestsSummaryEventListener
|
||||
import com.carrotsearch.ant.tasks.junit4.dependencies.com.google.common.base.Strings
|
||||
import com.carrotsearch.ant.tasks.junit4.dependencies.com.google.common.eventbus.Subscribe
|
||||
import com.carrotsearch.ant.tasks.junit4.events.EventType
|
||||
import com.carrotsearch.ant.tasks.junit4.events.IEvent
|
||||
import com.carrotsearch.ant.tasks.junit4.events.IStreamEvent
|
||||
import com.carrotsearch.ant.tasks.junit4.events.SuiteStartedEvent
|
||||
import com.carrotsearch.ant.tasks.junit4.events.TestFinishedEvent
|
||||
import com.carrotsearch.ant.tasks.junit4.events.aggregated.AggregatedQuitEvent
|
||||
import com.carrotsearch.ant.tasks.junit4.events.aggregated.AggregatedResultEvent
|
||||
import com.carrotsearch.ant.tasks.junit4.events.aggregated.AggregatedStartEvent
|
||||
import com.carrotsearch.ant.tasks.junit4.events.aggregated.AggregatedSuiteResultEvent
|
||||
import com.carrotsearch.ant.tasks.junit4.events.aggregated.AggregatedSuiteStartedEvent
|
||||
import com.carrotsearch.ant.tasks.junit4.events.aggregated.AggregatedTestResultEvent
|
||||
import com.carrotsearch.ant.tasks.junit4.events.aggregated.ChildBootstrap
|
||||
import com.carrotsearch.ant.tasks.junit4.events.aggregated.HeartBeatEvent
|
||||
import com.carrotsearch.ant.tasks.junit4.events.aggregated.PartialOutputEvent
|
||||
import com.carrotsearch.ant.tasks.junit4.events.aggregated.TestStatus
|
||||
import com.carrotsearch.ant.tasks.junit4.events.mirrors.FailureMirror
|
||||
import com.carrotsearch.ant.tasks.junit4.listeners.AggregatedEventListener
|
||||
import com.carrotsearch.ant.tasks.junit4.listeners.StackTraceFilter
|
||||
import org.apache.tools.ant.filters.TokenFilter
|
||||
import org.gradle.api.logging.LogLevel
|
||||
import org.gradle.api.logging.Logger
|
||||
import org.junit.runner.Description
|
||||
|
||||
import java.util.concurrent.atomic.AtomicInteger
|
||||
|
||||
import static com.carrotsearch.ant.tasks.junit4.FormattingUtils.formatDescription
|
||||
import static com.carrotsearch.ant.tasks.junit4.FormattingUtils.formatDurationInSeconds
|
||||
import static com.carrotsearch.ant.tasks.junit4.FormattingUtils.formatTime
|
||||
import static com.carrotsearch.gradle.junit4.TestLoggingConfiguration.OutputMode
|
||||
|
||||
class TestReportLogger extends TestsSummaryEventListener implements AggregatedEventListener {
|
||||
|
||||
static final String FAILURE_MARKER = " <<< FAILURES!"
|
||||
|
||||
/** Status names column. */
|
||||
static EnumMap<? extends TestStatus, String> statusNames;
|
||||
static {
|
||||
statusNames = new EnumMap<>(TestStatus.class);
|
||||
for (TestStatus s : TestStatus.values()) {
|
||||
statusNames.put(s,
|
||||
s == TestStatus.IGNORED_ASSUMPTION
|
||||
? "IGNOR/A" : s.toString());
|
||||
}
|
||||
}
|
||||
|
||||
JUnit4 owner
|
||||
|
||||
/** Logger to write the report to */
|
||||
Logger logger
|
||||
|
||||
TestLoggingConfiguration config
|
||||
|
||||
/** Forked concurrent JVM count. */
|
||||
int forkedJvmCount
|
||||
|
||||
/** Format line for JVM ID string. */
|
||||
String jvmIdFormat
|
||||
|
||||
/** Output stream that logs messages to the given logger */
|
||||
LoggingOutputStream outStream
|
||||
LoggingOutputStream errStream
|
||||
|
||||
/** A list of failed tests, if to be displayed at the end. */
|
||||
List<Description> failedTests = new ArrayList<>()
|
||||
|
||||
/** Stack trace filters. */
|
||||
StackTraceFilter stackFilter = new StackTraceFilter()
|
||||
|
||||
Map<String, Long> suiteTimes = new HashMap<>()
|
||||
boolean slowTestsFound = false
|
||||
|
||||
int totalSuites
|
||||
AtomicInteger suitesCompleted = new AtomicInteger()
|
||||
|
||||
@Subscribe
|
||||
void onStart(AggregatedStartEvent e) throws IOException {
|
||||
this.totalSuites = e.getSuiteCount();
|
||||
StringBuilder info = new StringBuilder('==> Test Info: ')
|
||||
info.append('seed=' + owner.getSeed() + '; ')
|
||||
info.append(Pluralize.pluralize(e.getSlaveCount(), 'jvm') + '=' + e.getSlaveCount() + '; ')
|
||||
info.append(Pluralize.pluralize(e.getSuiteCount(), 'suite') + '=' + e.getSuiteCount())
|
||||
logger.lifecycle(info.toString())
|
||||
|
||||
forkedJvmCount = e.getSlaveCount();
|
||||
jvmIdFormat = " J%-" + (1 + (int) Math.floor(Math.log10(forkedJvmCount))) + "d";
|
||||
|
||||
outStream = new LoggingOutputStream(logger: logger, level: LogLevel.LIFECYCLE, prefix: " 1> ")
|
||||
errStream = new LoggingOutputStream(logger: logger, level: LogLevel.ERROR, prefix: " 2> ")
|
||||
|
||||
for (String contains : config.stackTraceFilters.contains) {
|
||||
TokenFilter.ContainsString containsFilter = new TokenFilter.ContainsString()
|
||||
containsFilter.setContains(contains)
|
||||
stackFilter.addContainsString(containsFilter)
|
||||
}
|
||||
for (String pattern : config.stackTraceFilters.patterns) {
|
||||
TokenFilter.ContainsRegex regexFilter = new TokenFilter.ContainsRegex()
|
||||
regexFilter.setPattern(pattern)
|
||||
stackFilter.addContainsRegex(regexFilter)
|
||||
}
|
||||
}
|
||||
|
||||
@Subscribe
|
||||
void onChildBootstrap(ChildBootstrap e) throws IOException {
|
||||
logger.info("Started J" + e.getSlave().id + " PID(" + e.getSlave().getPidString() + ").");
|
||||
}
|
||||
|
||||
@Subscribe
|
||||
void onHeartbeat(HeartBeatEvent e) throws IOException {
|
||||
logger.warn("HEARTBEAT J" + e.getSlave().id + " PID(" + e.getSlave().getPidString() + "): " +
|
||||
formatTime(e.getCurrentTime()) + ", stalled for " +
|
||||
formatDurationInSeconds(e.getNoEventDuration()) + " at: " +
|
||||
(e.getDescription() == null ? "<unknown>" : formatDescription(e.getDescription())))
|
||||
slowTestsFound = true
|
||||
}
|
||||
|
||||
@Subscribe
|
||||
void onQuit(AggregatedQuitEvent e) throws IOException {
|
||||
if (config.showNumFailuresAtEnd > 0 && !failedTests.isEmpty()) {
|
||||
List<Description> sublist = this.failedTests
|
||||
StringBuilder b = new StringBuilder()
|
||||
b.append('Tests with failures')
|
||||
if (sublist.size() > config.showNumFailuresAtEnd) {
|
||||
sublist = sublist.subList(0, config.showNumFailuresAtEnd)
|
||||
b.append(" (first " + config.showNumFailuresAtEnd + " out of " + failedTests.size() + ")")
|
||||
}
|
||||
b.append(':\n')
|
||||
for (Description description : sublist) {
|
||||
b.append(" - ").append(formatDescription(description, true)).append('\n')
|
||||
}
|
||||
logger.warn(b.toString())
|
||||
}
|
||||
if (config.slowTests.summarySize > 0) {
|
||||
List<Map.Entry<String, Long>> sortedSuiteTimes = new ArrayList<>(suiteTimes.entrySet())
|
||||
Collections.sort(sortedSuiteTimes, new Comparator<Map.Entry<String, Long>>() {
|
||||
@Override
|
||||
int compare(Map.Entry<String, Long> o1, Map.Entry<String, Long> o2) {
|
||||
return o2.value - o1.value // sort descending
|
||||
}
|
||||
})
|
||||
LogLevel level = slowTestsFound ? LogLevel.WARN : LogLevel.INFO
|
||||
int numToLog = Math.min(config.slowTests.summarySize, sortedSuiteTimes.size())
|
||||
logger.log(level, 'Slow Tests Summary:')
|
||||
for (int i = 0; i < numToLog; ++i) {
|
||||
logger.log(level, String.format(Locale.ENGLISH, '%6.2fs | %s',
|
||||
sortedSuiteTimes.get(i).value / 1000.0,
|
||||
sortedSuiteTimes.get(i).key));
|
||||
}
|
||||
logger.log(level, '') // extra vertical separation
|
||||
}
|
||||
if (failedTests.isEmpty()) {
|
||||
// summary is already printed for failures
|
||||
logger.lifecycle('==> Test Summary: ' + getResult().toString())
|
||||
}
|
||||
}
|
||||
|
||||
@Subscribe
|
||||
void onSuiteStart(AggregatedSuiteStartedEvent e) throws IOException {
|
||||
if (isPassthrough()) {
|
||||
SuiteStartedEvent evt = e.getSuiteStartedEvent();
|
||||
emitSuiteStart(LogLevel.LIFECYCLE, evt.getDescription());
|
||||
}
|
||||
}
|
||||
|
||||
@Subscribe
|
||||
void onOutput(PartialOutputEvent e) throws IOException {
|
||||
if (isPassthrough()) {
|
||||
// We only allow passthrough output if there is one JVM.
|
||||
switch (e.getEvent().getType()) {
|
||||
case EventType.APPEND_STDERR:
|
||||
((IStreamEvent) e.getEvent()).copyTo(errStream);
|
||||
break;
|
||||
case EventType.APPEND_STDOUT:
|
||||
((IStreamEvent) e.getEvent()).copyTo(outStream);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Subscribe
|
||||
void onTestResult(AggregatedTestResultEvent e) throws IOException {
|
||||
if (isPassthrough() && e.getStatus() != TestStatus.OK) {
|
||||
flushOutput();
|
||||
emitStatusLine(LogLevel.ERROR, e, e.getStatus(), e.getExecutionTime());
|
||||
}
|
||||
|
||||
if (!e.isSuccessful()) {
|
||||
failedTests.add(e.getDescription());
|
||||
}
|
||||
}
|
||||
|
||||
@Subscribe
|
||||
void onSuiteResult(AggregatedSuiteResultEvent e) throws IOException {
|
||||
final int completed = suitesCompleted.incrementAndGet();
|
||||
|
||||
if (e.isSuccessful() && e.getTests().isEmpty()) {
|
||||
return;
|
||||
}
|
||||
if (config.slowTests.summarySize > 0) {
|
||||
suiteTimes.put(e.getDescription().getDisplayName(), e.getExecutionTime())
|
||||
}
|
||||
|
||||
LogLevel level = e.isSuccessful() && config.outputMode != OutputMode.ALWAYS ? LogLevel.INFO : LogLevel.LIFECYCLE
|
||||
|
||||
// We must emit buffered test and stream events (in case of failures).
|
||||
if (!isPassthrough()) {
|
||||
emitSuiteStart(level, e.getDescription())
|
||||
emitBufferedEvents(level, e)
|
||||
}
|
||||
|
||||
// Emit a synthetic failure for suite-level errors, if any.
|
||||
if (!e.getFailures().isEmpty()) {
|
||||
emitStatusLine(level, e, TestStatus.ERROR, 0)
|
||||
}
|
||||
|
||||
if (!e.getFailures().isEmpty()) {
|
||||
failedTests.add(e.getDescription())
|
||||
}
|
||||
|
||||
emitSuiteEnd(level, e, completed)
|
||||
}
|
||||
|
||||
/** Suite prologue. */
|
||||
void emitSuiteStart(LogLevel level, Description description) throws IOException {
|
||||
logger.log(level, 'Suite: ' + description.getDisplayName());
|
||||
}
|
||||
|
||||
void emitBufferedEvents(LogLevel level, AggregatedSuiteResultEvent e) throws IOException {
|
||||
if (config.outputMode == OutputMode.NEVER) {
|
||||
return
|
||||
}
|
||||
|
||||
final IdentityHashMap<TestFinishedEvent,AggregatedTestResultEvent> eventMap = new IdentityHashMap<>();
|
||||
for (AggregatedTestResultEvent tre : e.getTests()) {
|
||||
eventMap.put(tre.getTestFinishedEvent(), tre)
|
||||
}
|
||||
|
||||
final boolean emitOutput = config.outputMode == OutputMode.ALWAYS && isPassthrough() == false ||
|
||||
config.outputMode == OutputMode.ONERROR && e.isSuccessful() == false
|
||||
|
||||
for (IEvent event : e.getEventStream()) {
|
||||
switch (event.getType()) {
|
||||
case EventType.APPEND_STDOUT:
|
||||
if (emitOutput) ((IStreamEvent) event).copyTo(outStream);
|
||||
break;
|
||||
|
||||
case EventType.APPEND_STDERR:
|
||||
if (emitOutput) ((IStreamEvent) event).copyTo(errStream);
|
||||
break;
|
||||
|
||||
case EventType.TEST_FINISHED:
|
||||
assert eventMap.containsKey(event)
|
||||
final AggregatedTestResultEvent aggregated = eventMap.get(event);
|
||||
if (aggregated.getStatus() != TestStatus.OK) {
|
||||
flushOutput();
|
||||
emitStatusLine(level, aggregated, aggregated.getStatus(), aggregated.getExecutionTime());
|
||||
}
|
||||
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (emitOutput) {
|
||||
flushOutput()
|
||||
}
|
||||
}
|
||||
|
||||
void emitSuiteEnd(LogLevel level, AggregatedSuiteResultEvent e, int suitesCompleted) throws IOException {
|
||||
|
||||
final StringBuilder b = new StringBuilder();
|
||||
b.append(String.format(Locale.ENGLISH, 'Completed [%d/%d]%s in %.2fs, ',
|
||||
suitesCompleted,
|
||||
totalSuites,
|
||||
e.getSlave().slaves > 1 ? ' on J' + e.getSlave().id : '',
|
||||
e.getExecutionTime() / 1000.0d));
|
||||
b.append(e.getTests().size()).append(Pluralize.pluralize(e.getTests().size(), ' test'));
|
||||
|
||||
int failures = e.getFailureCount();
|
||||
if (failures > 0) {
|
||||
b.append(', ').append(failures).append(Pluralize.pluralize(failures, ' failure'));
|
||||
}
|
||||
|
||||
int errors = e.getErrorCount();
|
||||
if (errors > 0) {
|
||||
b.append(', ').append(errors).append(Pluralize.pluralize(errors, ' error'));
|
||||
}
|
||||
|
||||
int ignored = e.getIgnoredCount();
|
||||
if (ignored > 0) {
|
||||
b.append(', ').append(ignored).append(' skipped');
|
||||
}
|
||||
|
||||
if (!e.isSuccessful()) {
|
||||
b.append(' <<< FAILURES!');
|
||||
}
|
||||
|
||||
b.append('\n')
|
||||
logger.log(level, b.toString());
|
||||
}
|
||||
|
||||
/** Emit status line for an aggregated event. */
|
||||
void emitStatusLine(LogLevel level, AggregatedResultEvent result, TestStatus status, long timeMillis) throws IOException {
|
||||
final StringBuilder line = new StringBuilder();
|
||||
|
||||
line.append(Strings.padEnd(statusNames.get(status), 8, ' ' as char))
|
||||
line.append(formatDurationInSeconds(timeMillis))
|
||||
if (forkedJvmCount > 1) {
|
||||
line.append(String.format(Locale.ENGLISH, jvmIdFormat, result.getSlave().id))
|
||||
}
|
||||
line.append(' | ')
|
||||
|
||||
line.append(formatDescription(result.getDescription()))
|
||||
if (!result.isSuccessful()) {
|
||||
line.append(FAILURE_MARKER)
|
||||
}
|
||||
logger.log(level, line.toString())
|
||||
|
||||
PrintWriter writer = new PrintWriter(new LoggingOutputStream(logger: logger, level: level, prefix: ' > '))
|
||||
|
||||
if (status == TestStatus.IGNORED && result instanceof AggregatedTestResultEvent) {
|
||||
writer.write('Cause: ')
|
||||
writer.write(((AggregatedTestResultEvent) result).getCauseForIgnored())
|
||||
writer.flush()
|
||||
}
|
||||
|
||||
final List<FailureMirror> failures = result.getFailures();
|
||||
if (!failures.isEmpty()) {
|
||||
int count = 0;
|
||||
for (FailureMirror fm : failures) {
|
||||
count++;
|
||||
if (fm.isAssumptionViolation()) {
|
||||
writer.write(String.format(Locale.ENGLISH,
|
||||
'Assumption #%d: %s',
|
||||
count, fm.getMessage() == null ? '(no message)' : fm.getMessage()));
|
||||
} else {
|
||||
writer.write(String.format(Locale.ENGLISH,
|
||||
'Throwable #%d: %s',
|
||||
count,
|
||||
stackFilter.apply(fm.getTrace())));
|
||||
}
|
||||
}
|
||||
writer.flush()
|
||||
}
|
||||
}
|
||||
|
||||
void flushOutput() throws IOException {
|
||||
outStream.flush()
|
||||
errStream.flush()
|
||||
}
|
||||
|
||||
/** Returns true if output should be logged immediately. */
|
||||
boolean isPassthrough() {
|
||||
return forkedJvmCount == 1 && config.outputMode == OutputMode.ALWAYS
|
||||
}
|
||||
|
||||
@Override
|
||||
void setOuter(JUnit4 task) {
|
||||
owner = task
|
||||
}
|
||||
}
|
|
@ -18,13 +18,13 @@
|
|||
*/
|
||||
package org.elasticsearch.gradle
|
||||
|
||||
import com.carrotsearch.gradle.junit4.RandomizedTestingTask
|
||||
import com.github.jengelman.gradle.plugins.shadow.ShadowPlugin
|
||||
import org.apache.commons.io.IOUtils
|
||||
import org.apache.tools.ant.taskdefs.condition.Os
|
||||
import org.eclipse.jgit.lib.Constants
|
||||
import org.eclipse.jgit.lib.RepositoryBuilder
|
||||
import org.elasticsearch.gradle.precommit.PrecommitTasks
|
||||
import org.elasticsearch.gradle.test.ErrorReportingTestListener
|
||||
import org.gradle.api.GradleException
|
||||
import org.gradle.api.InvalidUserDataException
|
||||
import org.gradle.api.JavaVersion
|
||||
|
@ -40,8 +40,8 @@ import org.gradle.api.artifacts.ProjectDependency
|
|||
import org.gradle.api.artifacts.ResolvedArtifact
|
||||
import org.gradle.api.artifacts.dsl.RepositoryHandler
|
||||
import org.gradle.api.credentials.HttpHeaderCredentials
|
||||
import org.gradle.api.execution.TaskActionListener
|
||||
import org.gradle.api.execution.TaskExecutionGraph
|
||||
import org.gradle.api.plugins.JavaBasePlugin
|
||||
import org.gradle.api.plugins.JavaPlugin
|
||||
import org.gradle.api.publish.maven.MavenPublication
|
||||
import org.gradle.api.publish.maven.plugins.MavenPublishPlugin
|
||||
|
@ -51,6 +51,7 @@ import org.gradle.api.tasks.bundling.Jar
|
|||
import org.gradle.api.tasks.compile.GroovyCompile
|
||||
import org.gradle.api.tasks.compile.JavaCompile
|
||||
import org.gradle.api.tasks.javadoc.Javadoc
|
||||
import org.gradle.api.tasks.testing.Test
|
||||
import org.gradle.authentication.http.HttpHeaderAuthentication
|
||||
import org.gradle.internal.jvm.Jvm
|
||||
import org.gradle.process.ExecResult
|
||||
|
@ -83,7 +84,6 @@ class BuildPlugin implements Plugin<Project> {
|
|||
)
|
||||
}
|
||||
project.pluginManager.apply('java')
|
||||
project.pluginManager.apply('carrotsearch.randomized-testing')
|
||||
configureConfigurations(project)
|
||||
configureJars(project) // jar config must be added before info broker
|
||||
// these plugins add lots of info to our jars
|
||||
|
@ -93,8 +93,12 @@ class BuildPlugin implements Plugin<Project> {
|
|||
project.pluginManager.apply('nebula.info-scm')
|
||||
project.pluginManager.apply('nebula.info-jar')
|
||||
|
||||
// apply global test task failure listener
|
||||
project.rootProject.pluginManager.apply(TestFailureReportingPlugin)
|
||||
|
||||
project.getTasks().create("buildResources", ExportElasticsearchBuildResourcesTask)
|
||||
|
||||
setupSeed(project)
|
||||
globalBuildInfo(project)
|
||||
configureRepositories(project)
|
||||
project.ext.versions = VersionProperties.versions
|
||||
|
@ -103,9 +107,7 @@ class BuildPlugin implements Plugin<Project> {
|
|||
configureJavadoc(project)
|
||||
configureSourcesJar(project)
|
||||
configurePomGeneration(project)
|
||||
|
||||
applyCommonTestConfig(project)
|
||||
configureTest(project)
|
||||
configureTestTasks(project)
|
||||
configurePrecommit(project)
|
||||
configureDependenciesInfo(project)
|
||||
}
|
||||
|
@ -904,128 +906,107 @@ class BuildPlugin implements Plugin<Project> {
|
|||
}
|
||||
}
|
||||
|
||||
static void applyCommonTestConfig(Project project) {
|
||||
project.tasks.withType(RandomizedTestingTask) {task ->
|
||||
jvm "${project.runtimeJavaHome}/bin/java"
|
||||
parallelism System.getProperty('tests.jvms', project.rootProject.ext.defaultParallel)
|
||||
ifNoTests 'fail'
|
||||
onNonEmptyWorkDirectory 'wipe'
|
||||
leaveTemporary true
|
||||
project.sourceSets.matching { it.name == "test" }.all { test ->
|
||||
task.testClassesDirs = test.output.classesDirs
|
||||
task.classpath = test.runtimeClasspath
|
||||
}
|
||||
group = JavaBasePlugin.VERIFICATION_GROUP
|
||||
dependsOn 'testClasses'
|
||||
static void configureTestTasks(Project project) {
|
||||
// Default test task should run only unit tests
|
||||
project.tasks.withType(Test).matching { it.name == 'test' }.all {
|
||||
include '**/*Tests.class'
|
||||
}
|
||||
|
||||
// Make sure all test tasks are configured properly
|
||||
if (name != "test") {
|
||||
project.tasks.matching { it.name == "test"}.all { testTask ->
|
||||
task.shouldRunAfter testTask
|
||||
}
|
||||
}
|
||||
if (name == "unitTest") {
|
||||
include("**/*Tests.class")
|
||||
}
|
||||
|
||||
// TODO: why are we not passing maxmemory to junit4?
|
||||
jvmArg '-Xmx' + System.getProperty('tests.heap.size', '512m')
|
||||
jvmArg '-Xms' + System.getProperty('tests.heap.size', '512m')
|
||||
jvmArg '-XX:+HeapDumpOnOutOfMemoryError'
|
||||
// none of this stuff is applicable to the `:buildSrc` project tests
|
||||
if (project.path != ':build-tools') {
|
||||
File heapdumpDir = new File(project.buildDir, 'heapdump')
|
||||
heapdumpDir.mkdirs()
|
||||
jvmArg '-XX:HeapDumpPath=' + heapdumpDir
|
||||
if (project.runtimeJavaVersion >= JavaVersion.VERSION_1_9) {
|
||||
jvmArg '--illegal-access=warn'
|
||||
}
|
||||
argLine System.getProperty('tests.jvm.argline')
|
||||
|
||||
// we use './temp' since this is per JVM and tests are forbidden from writing to CWD
|
||||
systemProperty 'java.io.tmpdir', './temp'
|
||||
systemProperty 'java.awt.headless', 'true'
|
||||
systemProperty 'tests.gradle', 'true'
|
||||
systemProperty 'tests.artifact', project.name
|
||||
systemProperty 'tests.task', path
|
||||
systemProperty 'tests.security.manager', 'true'
|
||||
systemProperty 'jna.nosys', 'true'
|
||||
systemProperty 'compiler.java', project.ext.compilerJavaVersion.getMajorVersion()
|
||||
if (project.ext.inFipsJvm) {
|
||||
systemProperty 'runtime.java', project.ext.runtimeJavaVersion.getMajorVersion() + "FIPS"
|
||||
} else {
|
||||
systemProperty 'runtime.java', project.ext.runtimeJavaVersion.getMajorVersion()
|
||||
}
|
||||
// TODO: remove setting logging level via system property
|
||||
systemProperty 'tests.logger.level', 'WARN'
|
||||
for (Map.Entry<String, String> property : System.properties.entrySet()) {
|
||||
if (property.getKey().startsWith('tests.') ||
|
||||
property.getKey().startsWith('es.')) {
|
||||
if (property.getKey().equals('tests.seed')) {
|
||||
/* The seed is already set on the project so we
|
||||
* shouldn't attempt to override it. */
|
||||
continue;
|
||||
}
|
||||
systemProperty property.getKey(), property.getValue()
|
||||
project.tasks.withType(Test) { Test test ->
|
||||
File testOutputDir = new File(test.reports.junitXml.getDestination(), "output")
|
||||
|
||||
doFirst {
|
||||
project.mkdir(testOutputDir)
|
||||
project.mkdir(heapdumpDir)
|
||||
project.mkdir(test.workingDir)
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: remove this once ctx isn't added to update script params in 7.0
|
||||
systemProperty 'es.scripting.update.ctx_in_params', 'false'
|
||||
def listener = new ErrorReportingTestListener(test.testLogging, testOutputDir)
|
||||
test.extensions.add(ErrorReportingTestListener, 'errorReportingTestListener', listener)
|
||||
addTestOutputListener(listener)
|
||||
addTestListener(listener)
|
||||
|
||||
// Set the system keystore/truststore password if we're running tests in a FIPS-140 JVM
|
||||
if (project.inFipsJvm) {
|
||||
systemProperty 'javax.net.ssl.trustStorePassword', 'password'
|
||||
systemProperty 'javax.net.ssl.keyStorePassword', 'password'
|
||||
}
|
||||
executable = "${project.runtimeJavaHome}/bin/java"
|
||||
workingDir = project.file("${project.buildDir}/testrun/${test.name}")
|
||||
maxParallelForks = project.rootProject.ext.defaultParallel
|
||||
|
||||
boolean assertionsEnabled = Boolean.parseBoolean(System.getProperty('tests.asserts', 'true'))
|
||||
enableSystemAssertions assertionsEnabled
|
||||
enableAssertions assertionsEnabled
|
||||
exclude '**/*$*.class'
|
||||
|
||||
testLogging {
|
||||
showNumFailuresAtEnd 25
|
||||
slowTests {
|
||||
heartbeat 10
|
||||
summarySize 5
|
||||
jvmArgs "-Xmx${System.getProperty('tests.heap.size', '512m')}",
|
||||
"-Xms${System.getProperty('tests.heap.size', '512m')}",
|
||||
'-XX:+HeapDumpOnOutOfMemoryError',
|
||||
"-XX:HeapDumpPath=$heapdumpDir"
|
||||
|
||||
if (project.runtimeJavaVersion >= JavaVersion.VERSION_1_9) {
|
||||
jvmArgs '--illegal-access=warn'
|
||||
}
|
||||
stackTraceFilters {
|
||||
// custom filters: we carefully only omit test infra noise here
|
||||
contains '.SlaveMain.'
|
||||
regex(/^(\s+at )(org\.junit\.)/)
|
||||
// also includes anonymous classes inside these two:
|
||||
regex(/^(\s+at )(com\.carrotsearch\.randomizedtesting\.RandomizedRunner)/)
|
||||
regex(/^(\s+at )(com\.carrotsearch\.randomizedtesting\.ThreadLeakControl)/)
|
||||
regex(/^(\s+at )(com\.carrotsearch\.randomizedtesting\.rules\.)/)
|
||||
regex(/^(\s+at )(org\.apache\.lucene\.util\.TestRule)/)
|
||||
regex(/^(\s+at )(org\.apache\.lucene\.util\.AbstractBeforeAfterRule)/)
|
||||
|
||||
if (System.getProperty('tests.jvm.argline')) {
|
||||
jvmArgs System.getProperty('tests.jvm.argline').split(" ")
|
||||
}
|
||||
if (System.getProperty('tests.class') != null && System.getProperty('tests.output') == null) {
|
||||
// if you are debugging, you want to see the output!
|
||||
outputMode 'always'
|
||||
|
||||
if (Boolean.parseBoolean(System.getProperty('tests.asserts', 'true'))) {
|
||||
jvmArgs '-ea', '-esa'
|
||||
}
|
||||
|
||||
// we use './temp' since this is per JVM and tests are forbidden from writing to CWD
|
||||
systemProperties 'gradle.dist.lib': new File(project.class.location.toURI()).parent,
|
||||
'gradle.worker.jar': "${project.gradle.getGradleUserHomeDir()}/caches/${project.gradle.gradleVersion}/workerMain/gradle-worker.jar",
|
||||
'gradle.user.home': project.gradle.getGradleUserHomeDir(),
|
||||
'java.io.tmpdir': './temp',
|
||||
'java.awt.headless': 'true',
|
||||
'tests.gradle': 'true',
|
||||
'tests.artifact': project.name,
|
||||
'tests.task': path,
|
||||
'tests.security.manager': 'true',
|
||||
'tests.seed': project.testSeed,
|
||||
'jna.nosys': 'true',
|
||||
'compiler.java': project.ext.compilerJavaVersion.getMajorVersion()
|
||||
|
||||
if (project.ext.inFipsJvm) {
|
||||
systemProperty 'runtime.java', project.ext.runtimeJavaVersion.getMajorVersion() + "FIPS"
|
||||
} else {
|
||||
outputMode System.getProperty('tests.output', 'onerror')
|
||||
systemProperty 'runtime.java', project.ext.runtimeJavaVersion.getMajorVersion()
|
||||
}
|
||||
// TODO: remove setting logging level via system property
|
||||
systemProperty 'tests.logger.level', 'WARN'
|
||||
System.getProperties().each { key, value ->
|
||||
if ((key.startsWith('tests.') || key.startsWith('es.'))) {
|
||||
systemProperty key, value
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
balancers {
|
||||
executionTime cacheFilename: ".local-${project.version}-${name}-execution-times.log"
|
||||
}
|
||||
// TODO: remove this once ctx isn't added to update script params in 7.0
|
||||
systemProperty 'es.scripting.update.ctx_in_params', 'false'
|
||||
|
||||
listeners {
|
||||
junitReport()
|
||||
}
|
||||
// Set the system keystore/truststore password if we're running tests in a FIPS-140 JVM
|
||||
if (project.inFipsJvm) {
|
||||
systemProperty 'javax.net.ssl.trustStorePassword', 'password'
|
||||
systemProperty 'javax.net.ssl.keyStorePassword', 'password'
|
||||
}
|
||||
|
||||
exclude '**/*$*.class'
|
||||
testLogging {
|
||||
showExceptions = true
|
||||
showCauses = true
|
||||
exceptionFormat = 'full'
|
||||
}
|
||||
|
||||
project.plugins.withType(ShadowPlugin).whenPluginAdded {
|
||||
// Test against a shadow jar if we made one
|
||||
classpath -= project.tasks.compileJava.outputs.files
|
||||
classpath += project.tasks.shadowJar.outputs.files
|
||||
dependsOn project.tasks.shadowJar
|
||||
project.plugins.withType(ShadowPlugin).whenPluginAdded {
|
||||
// Test against a shadow jar if we made one
|
||||
classpath -= project.tasks.compileJava.outputs.files
|
||||
classpath += project.tasks.shadowJar.outputs.files
|
||||
|
||||
dependsOn project.tasks.shadowJar
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private static String findDefaultParallel(Project project) {
|
||||
private static int findDefaultParallel(Project project) {
|
||||
if (project.file("/proc/cpuinfo").exists()) {
|
||||
// Count physical cores on any Linux distro ( don't count hyper-threading )
|
||||
Map<String, Integer> socketToCore = [:]
|
||||
|
@ -1046,7 +1027,7 @@ class BuildPlugin implements Plugin<Project> {
|
|||
}
|
||||
}
|
||||
})
|
||||
return socketToCore.values().sum().toString();
|
||||
return socketToCore.values().sum()
|
||||
} else if ('Mac OS X'.equals(System.getProperty('os.name'))) {
|
||||
// Ask macOS to count physical CPUs for us
|
||||
ByteArrayOutputStream stdout = new ByteArrayOutputStream()
|
||||
|
@ -1055,16 +1036,9 @@ class BuildPlugin implements Plugin<Project> {
|
|||
args '-n', 'hw.physicalcpu'
|
||||
standardOutput = stdout
|
||||
}
|
||||
return stdout.toString('UTF-8').trim();
|
||||
}
|
||||
return 'auto';
|
||||
}
|
||||
|
||||
/** Configures the test task */
|
||||
static Task configureTest(Project project) {
|
||||
project.tasks.getByName('test') {
|
||||
include '**/*Tests.class'
|
||||
return Integer.parseInt(stdout.toString('UTF-8').trim())
|
||||
}
|
||||
return Runtime.getRuntime().availableProcessors() / 2
|
||||
}
|
||||
|
||||
private static configurePrecommit(Project project) {
|
||||
|
@ -1094,4 +1068,58 @@ class BuildPlugin implements Plugin<Project> {
|
|||
deps.mappings = project.dependencyLicenses.mappings
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Pins the test seed at configuration time so it isn't different on every
|
||||
* {@link Test} execution. This is useful if random
|
||||
* decisions in one run of {@linkplain Test} influence the
|
||||
* outcome of subsequent runs. Pinning the seed up front like this makes
|
||||
* the reproduction line from one run be useful on another run.
|
||||
*/
|
||||
static String setupSeed(Project project) {
|
||||
if (project.rootProject.ext.has('testSeed')) {
|
||||
/* Skip this if we've already pinned the testSeed. It is important
|
||||
* that this checks the rootProject so that we know we've only ever
|
||||
* initialized one time. */
|
||||
return project.rootProject.ext.testSeed
|
||||
}
|
||||
|
||||
String testSeed = System.getProperty('tests.seed')
|
||||
if (testSeed == null) {
|
||||
long seed = new Random(System.currentTimeMillis()).nextLong()
|
||||
testSeed = Long.toUnsignedString(seed, 16).toUpperCase(Locale.ROOT)
|
||||
}
|
||||
|
||||
project.rootProject.ext.testSeed = testSeed
|
||||
return testSeed
|
||||
}
|
||||
|
||||
private static class TestFailureReportingPlugin implements Plugin<Project> {
|
||||
@Override
|
||||
void apply(Project project) {
|
||||
if (project != project.rootProject) {
|
||||
throw new IllegalStateException("${this.class.getName()} can only be applied to the root project.")
|
||||
}
|
||||
|
||||
project.gradle.addListener(new TaskActionListener() {
|
||||
@Override
|
||||
void beforeActions(Task task) {
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
void afterActions(Task task) {
|
||||
if (task instanceof Test) {
|
||||
ErrorReportingTestListener listener = task.extensions.findByType(ErrorReportingTestListener)
|
||||
if (listener != null && listener.getFailedTests().size() > 0) {
|
||||
task.logger.lifecycle("\nTests with failures:")
|
||||
listener.getFailedTests().each {
|
||||
task.logger.lifecycle(" - ${it.getFullName()}")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
package com.carrotsearch.gradle.junit4
|
||||
package org.elasticsearch.gradle
|
||||
|
||||
import org.gradle.api.logging.LogLevel
|
||||
import org.gradle.api.logging.Logger
|
|
@ -18,30 +18,36 @@
|
|||
*/
|
||||
package org.elasticsearch.gradle.test
|
||||
|
||||
import com.carrotsearch.gradle.junit4.RandomizedTestingTask
|
||||
import org.elasticsearch.gradle.VersionProperties
|
||||
import org.elasticsearch.gradle.testclusters.ElasticsearchCluster
|
||||
import org.elasticsearch.gradle.testclusters.TestClustersPlugin
|
||||
import org.gradle.api.DefaultTask
|
||||
import org.gradle.api.Task
|
||||
import org.gradle.api.execution.TaskExecutionAdapter
|
||||
import org.gradle.api.logging.Logger
|
||||
import org.gradle.api.logging.Logging
|
||||
import org.gradle.api.tasks.Copy
|
||||
import org.gradle.api.tasks.Input
|
||||
import org.gradle.api.tasks.TaskState
|
||||
import org.gradle.api.tasks.options.Option
|
||||
import org.gradle.api.tasks.testing.Test
|
||||
import org.gradle.plugins.ide.idea.IdeaPlugin
|
||||
import org.gradle.process.CommandLineArgumentProvider
|
||||
|
||||
import java.nio.charset.StandardCharsets
|
||||
import java.nio.file.Files
|
||||
import java.util.stream.Stream
|
||||
|
||||
/**
|
||||
* A wrapper task around setting up a cluster and running rest tests.
|
||||
*/
|
||||
public class RestIntegTestTask extends DefaultTask {
|
||||
class RestIntegTestTask extends DefaultTask {
|
||||
|
||||
private static final Logger LOGGER = Logging.getLogger(RestIntegTestTask)
|
||||
|
||||
protected ClusterConfiguration clusterConfig
|
||||
|
||||
protected RandomizedTestingTask runner
|
||||
protected Test runner
|
||||
|
||||
protected Task clusterInit
|
||||
|
||||
|
@ -52,8 +58,8 @@ public class RestIntegTestTask extends DefaultTask {
|
|||
@Input
|
||||
Boolean includePackaged = false
|
||||
|
||||
public RestIntegTestTask() {
|
||||
runner = project.tasks.create("${name}Runner", RandomizedTestingTask.class)
|
||||
RestIntegTestTask() {
|
||||
runner = project.tasks.create("${name}Runner", Test.class)
|
||||
super.dependsOn(runner)
|
||||
clusterInit = project.tasks.create(name: "${name}Cluster#init", dependsOn: project.testClasses)
|
||||
runner.dependsOn(clusterInit)
|
||||
|
@ -71,35 +77,66 @@ public class RestIntegTestTask extends DefaultTask {
|
|||
runner.useCluster project.testClusters."$name"
|
||||
}
|
||||
|
||||
// disable the build cache for rest test tasks
|
||||
// there are a number of inputs we aren't properly tracking here so we'll just not cache these for now
|
||||
runner.outputs.doNotCacheIf('Caching is disabled for REST integration tests') { true }
|
||||
|
||||
// override/add more for rest tests
|
||||
runner.parallelism = '1'
|
||||
runner.maxParallelForks = 1
|
||||
runner.include('**/*IT.class')
|
||||
runner.systemProperty('tests.rest.load_packaged', 'false')
|
||||
|
||||
/*
|
||||
* We use lazy-evaluated strings in order to configure system properties whose value will not be known until
|
||||
* execution time (e.g. cluster port numbers). Adding these via the normal DSL doesn't work as these get treated
|
||||
* as task inputs and therefore Gradle attempts to snapshot them before/after task execution. This fails due
|
||||
* to the GStrings containing references to non-serializable objects.
|
||||
*
|
||||
* We bypass this by instead passing this system properties vi a CommandLineArgumentProvider. This has the added
|
||||
* side-effect that these properties are NOT treated as inputs, therefore they don't influence things like the
|
||||
* build cache key or up to date checking.
|
||||
*/
|
||||
def nonInputProperties = new CommandLineArgumentProvider() {
|
||||
private final Map<String, Object> systemProperties = [:]
|
||||
|
||||
void systemProperty(String key, Object value) {
|
||||
systemProperties.put(key, value)
|
||||
}
|
||||
|
||||
@Override
|
||||
Iterable<String> asArguments() {
|
||||
return systemProperties.collect { key, value ->
|
||||
"-D${key}=${value.toString()}".toString()
|
||||
}
|
||||
}
|
||||
}
|
||||
runner.jvmArgumentProviders.add(nonInputProperties)
|
||||
runner.ext.nonInputProperties = nonInputProperties
|
||||
|
||||
if (System.getProperty("tests.rest.cluster") == null) {
|
||||
if (System.getProperty("tests.cluster") != null) {
|
||||
throw new IllegalArgumentException("tests.rest.cluster and tests.cluster must both be null or non-null")
|
||||
}
|
||||
if (usesTestclusters == true) {
|
||||
ElasticsearchCluster cluster = project.testClusters."${name}"
|
||||
runner.systemProperty('tests.rest.cluster', {cluster.allHttpSocketURI.join(",") })
|
||||
runner.systemProperty('tests.config.dir', {cluster.singleNode().getConfigDir()})
|
||||
runner.systemProperty('tests.cluster', {cluster.transportPortURI})
|
||||
nonInputProperties.systemProperty('tests.rest.cluster', "${-> cluster.allHttpSocketURI.join(",") }")
|
||||
nonInputProperties.systemProperty('tests.config.dir', "${-> cluster.singleNode().getConfigDir() }")
|
||||
nonInputProperties.systemProperty('tests.cluster', "${-> cluster.transportPortURI }")
|
||||
} else {
|
||||
// we pass all nodes to the rest cluster to allow the clients to round-robin between them
|
||||
// this is more realistic than just talking to a single node
|
||||
runner.systemProperty('tests.rest.cluster', "${-> nodes.collect { it.httpUri() }.join(",")}")
|
||||
runner.systemProperty('tests.config.dir', "${-> nodes[0].pathConf}")
|
||||
nonInputProperties.systemProperty('tests.rest.cluster', "${-> nodes.collect { it.httpUri() }.join(",")}")
|
||||
nonInputProperties.systemProperty('tests.config.dir', "${-> nodes[0].pathConf}")
|
||||
// TODO: our "client" qa tests currently use the rest-test plugin. instead they should have their own plugin
|
||||
// that sets up the test cluster and passes this transport uri instead of http uri. Until then, we pass
|
||||
// both as separate sysprops
|
||||
runner.systemProperty('tests.cluster', "${-> nodes[0].transportUri()}")
|
||||
nonInputProperties.systemProperty('tests.cluster', "${-> nodes[0].transportUri()}")
|
||||
|
||||
// dump errors and warnings from cluster log on failure
|
||||
TaskExecutionAdapter logDumpListener = new TaskExecutionAdapter() {
|
||||
@Override
|
||||
void afterExecute(Task task, TaskState state) {
|
||||
if (state.failure != null) {
|
||||
if (task == runner && state.failure != null) {
|
||||
for (NodeInfo nodeInfo : nodes) {
|
||||
printLogExcerpt(nodeInfo)
|
||||
}
|
||||
|
@ -194,9 +231,9 @@ public class RestIntegTestTask extends DefaultTask {
|
|||
/** Print out an excerpt of the log from the given node. */
|
||||
protected static void printLogExcerpt(NodeInfo nodeInfo) {
|
||||
File logFile = new File(nodeInfo.homeDir, "logs/${nodeInfo.clusterName}.log")
|
||||
println("\nCluster ${nodeInfo.clusterName} - node ${nodeInfo.nodeNum} log excerpt:")
|
||||
println("(full log at ${logFile})")
|
||||
println('-----------------------------------------')
|
||||
LOGGER.lifecycle("\nCluster ${nodeInfo.clusterName} - node ${nodeInfo.nodeNum} log excerpt:")
|
||||
LOGGER.lifecycle("(full log at ${logFile})")
|
||||
LOGGER.lifecycle('-----------------------------------------')
|
||||
Stream<String> stream = Files.lines(logFile.toPath(), StandardCharsets.UTF_8)
|
||||
try {
|
||||
boolean inStartup = true
|
||||
|
@ -211,9 +248,9 @@ public class RestIntegTestTask extends DefaultTask {
|
|||
}
|
||||
if (inStartup || inExcerpt) {
|
||||
if (linesSkipped != 0) {
|
||||
println("... SKIPPED ${linesSkipped} LINES ...")
|
||||
LOGGER.lifecycle("... SKIPPED ${linesSkipped} LINES ...")
|
||||
}
|
||||
println(line)
|
||||
LOGGER.lifecycle(line)
|
||||
linesSkipped = 0
|
||||
} else {
|
||||
++linesSkipped
|
||||
|
@ -225,7 +262,7 @@ public class RestIntegTestTask extends DefaultTask {
|
|||
} finally {
|
||||
stream.close()
|
||||
}
|
||||
println('=========================================')
|
||||
LOGGER.lifecycle('=========================================')
|
||||
|
||||
}
|
||||
|
||||
|
|
|
@ -20,7 +20,8 @@
|
|||
|
||||
package org.elasticsearch.gradle.test
|
||||
|
||||
import com.carrotsearch.gradle.junit4.RandomizedTestingPlugin
|
||||
|
||||
import groovy.transform.CompileStatic
|
||||
import org.elasticsearch.gradle.BuildPlugin
|
||||
import org.elasticsearch.gradle.ExportElasticsearchBuildResourcesTask
|
||||
import org.elasticsearch.gradle.VersionProperties
|
||||
|
@ -28,48 +29,66 @@ import org.elasticsearch.gradle.precommit.PrecommitTasks
|
|||
import org.gradle.api.InvalidUserDataException
|
||||
import org.gradle.api.Plugin
|
||||
import org.gradle.api.Project
|
||||
import org.gradle.api.artifacts.Configuration
|
||||
import org.gradle.api.plugins.JavaBasePlugin
|
||||
import org.gradle.api.plugins.JavaPlugin
|
||||
import org.gradle.api.tasks.SourceSet
|
||||
import org.gradle.api.tasks.SourceSetContainer
|
||||
import org.gradle.api.tasks.compile.JavaCompile
|
||||
import org.gradle.api.tasks.testing.Test
|
||||
import org.gradle.plugins.ide.eclipse.model.EclipseModel
|
||||
import org.gradle.plugins.ide.idea.model.IdeaModel
|
||||
|
||||
/**
|
||||
* Configures the build to compile tests against Elasticsearch's test framework
|
||||
* and run REST tests. Use BuildPlugin if you want to build main code as well
|
||||
* as tests.
|
||||
*/
|
||||
public class StandaloneRestTestPlugin implements Plugin<Project> {
|
||||
@CompileStatic
|
||||
class StandaloneRestTestPlugin implements Plugin<Project> {
|
||||
|
||||
@Override
|
||||
public void apply(Project project) {
|
||||
void apply(Project project) {
|
||||
if (project.pluginManager.hasPlugin('elasticsearch.build')) {
|
||||
throw new InvalidUserDataException('elasticsearch.standalone-test '
|
||||
+ 'elasticsearch.standalone-rest-test, and elasticsearch.build '
|
||||
+ 'are mutually exclusive')
|
||||
}
|
||||
project.pluginManager.apply(JavaBasePlugin)
|
||||
project.pluginManager.apply(RandomizedTestingPlugin)
|
||||
|
||||
project.getTasks().create("buildResources", ExportElasticsearchBuildResourcesTask)
|
||||
BuildPlugin.globalBuildInfo(project)
|
||||
BuildPlugin.configureRepositories(project)
|
||||
BuildPlugin.applyCommonTestConfig(project)
|
||||
BuildPlugin.configureTestTasks(project)
|
||||
|
||||
// only setup tests to build
|
||||
project.sourceSets.create('test')
|
||||
SourceSetContainer sourceSets = project.extensions.getByType(SourceSetContainer)
|
||||
SourceSet testSourceSet = sourceSets.create('test')
|
||||
|
||||
project.tasks.withType(Test) { Test test ->
|
||||
test.testClassesDirs = testSourceSet.output.classesDirs
|
||||
test.classpath = testSourceSet.runtimeClasspath
|
||||
}
|
||||
|
||||
// create a compileOnly configuration as others might expect it
|
||||
project.configurations.create("compileOnly")
|
||||
project.dependencies.add('testCompile', "org.elasticsearch.test:framework:${VersionProperties.elasticsearch}")
|
||||
|
||||
project.eclipse.classpath.sourceSets = [project.sourceSets.test]
|
||||
project.eclipse.classpath.plusConfigurations = [project.configurations.testRuntime]
|
||||
project.idea.module.testSourceDirs += project.sourceSets.test.java.srcDirs
|
||||
project.idea.module.scopes['TEST'] = [plus: [project.configurations.testRuntime]]
|
||||
EclipseModel eclipse = project.extensions.getByType(EclipseModel)
|
||||
eclipse.classpath.sourceSets = [testSourceSet]
|
||||
eclipse.classpath.plusConfigurations = [project.configurations.getByName(JavaPlugin.TEST_RUNTIME_CLASSPATH_CONFIGURATION_NAME)]
|
||||
|
||||
IdeaModel idea = project.extensions.getByType(IdeaModel)
|
||||
idea.module.testSourceDirs += testSourceSet.java.srcDirs
|
||||
idea.module.scopes.put('TEST', [plus: [project.configurations.getByName(JavaPlugin.TEST_RUNTIME_CLASSPATH_CONFIGURATION_NAME)]] as Map<String, Collection<Configuration>>)
|
||||
|
||||
PrecommitTasks.create(project, false)
|
||||
project.check.dependsOn(project.precommit)
|
||||
project.tasks.getByName('check').dependsOn(project.tasks.getByName('precommit'))
|
||||
|
||||
project.tasks.withType(JavaCompile) {
|
||||
project.tasks.withType(JavaCompile) { JavaCompile task ->
|
||||
// This will be the default in Gradle 5.0
|
||||
if (options.compilerArgs.contains("-processor") == false) {
|
||||
options.compilerArgs << '-proc:none'
|
||||
if (task.options.compilerArgs.contains("-processor") == false) {
|
||||
task.options.compilerArgs << '-proc:none'
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -19,34 +19,30 @@
|
|||
|
||||
package org.elasticsearch.gradle.test
|
||||
|
||||
import com.carrotsearch.gradle.junit4.RandomizedTestingTask
|
||||
import groovy.transform.CompileStatic
|
||||
import org.elasticsearch.gradle.BuildPlugin
|
||||
import org.gradle.api.Plugin
|
||||
import org.gradle.api.Project
|
||||
import org.gradle.api.plugins.JavaBasePlugin
|
||||
import org.gradle.api.tasks.testing.Test
|
||||
|
||||
/**
|
||||
* Configures the build to compile against Elasticsearch's test framework and
|
||||
* run integration and unit tests. Use BuildPlugin if you want to build main
|
||||
* code as well as tests. */
|
||||
public class StandaloneTestPlugin implements Plugin<Project> {
|
||||
@CompileStatic
|
||||
class StandaloneTestPlugin implements Plugin<Project> {
|
||||
|
||||
@Override
|
||||
public void apply(Project project) {
|
||||
void apply(Project project) {
|
||||
project.pluginManager.apply(StandaloneRestTestPlugin)
|
||||
|
||||
Map testOptions = [
|
||||
name: 'test',
|
||||
type: RandomizedTestingTask,
|
||||
dependsOn: 'testClasses',
|
||||
group: JavaBasePlugin.VERIFICATION_GROUP,
|
||||
description: 'Runs unit tests that are separate'
|
||||
]
|
||||
RandomizedTestingTask test = project.tasks.create(testOptions)
|
||||
Test test = project.tasks.create('test', Test)
|
||||
test.group = JavaBasePlugin.VERIFICATION_GROUP
|
||||
test.description = 'Runs unit tests that are separate'
|
||||
|
||||
BuildPlugin.configureCompile(project)
|
||||
test.classpath = project.sourceSets.test.runtimeClasspath
|
||||
test.testClassesDirs = project.sourceSets.test.output.classesDirs
|
||||
test.mustRunAfter(project.precommit)
|
||||
project.check.dependsOn(test)
|
||||
test.mustRunAfter(project.tasks.getByName('precommit'))
|
||||
project.tasks.getByName('check').dependsOn(test)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -18,7 +18,7 @@
|
|||
*/
|
||||
package org.elasticsearch.gradle.vagrant
|
||||
|
||||
import com.carrotsearch.gradle.junit4.LoggingOutputStream
|
||||
import org.elasticsearch.gradle.LoggingOutputStream
|
||||
import org.gradle.api.GradleScriptException
|
||||
import org.gradle.api.logging.Logger
|
||||
import org.gradle.internal.logging.progress.ProgressLogger
|
||||
|
|
|
@ -18,7 +18,7 @@
|
|||
*/
|
||||
package org.elasticsearch.gradle.vagrant
|
||||
|
||||
import com.carrotsearch.gradle.junit4.LoggingOutputStream
|
||||
import org.elasticsearch.gradle.LoggingOutputStream
|
||||
import org.gradle.internal.logging.progress.ProgressLogger
|
||||
|
||||
/**
|
||||
|
|
|
@ -31,12 +31,10 @@ import org.gradle.api.tasks.SourceSet;
|
|||
import org.gradle.api.tasks.SourceSetContainer;
|
||||
import org.gradle.api.tasks.TaskAction;
|
||||
import org.gradle.api.tasks.testing.Test;
|
||||
import org.gradle.api.tasks.util.PatternFilterable;
|
||||
|
||||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
import java.lang.annotation.Annotation;
|
||||
import java.lang.reflect.InvocationTargetException;
|
||||
import java.lang.reflect.Method;
|
||||
import java.lang.reflect.Modifier;
|
||||
import java.net.MalformedURLException;
|
||||
|
@ -75,17 +73,6 @@ public class TestingConventionsTasks extends DefaultTask {
|
|||
public Map<String, Set<File>> classFilesPerEnabledTask(FileTree testClassFiles) {
|
||||
Map<String, Set<File>> collector = new HashMap<>();
|
||||
|
||||
// RandomizedTestingTask
|
||||
collector.putAll(
|
||||
getProject().getTasks().withType(getRandomizedTestingTask()).stream()
|
||||
.filter(Task::getEnabled)
|
||||
.collect(Collectors.toMap(
|
||||
Task::getPath,
|
||||
task -> testClassFiles.matching(getRandomizedTestingPatternSet(task)).getFiles()
|
||||
)
|
||||
)
|
||||
);
|
||||
|
||||
// Gradle Test
|
||||
collector.putAll(
|
||||
getProject().getTasks().withType(Test.class).stream()
|
||||
|
@ -279,32 +266,6 @@ public class TestingConventionsTasks extends DefaultTask {
|
|||
.collect(Collectors.joining("\n"));
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
private PatternFilterable getRandomizedTestingPatternSet(Task task) {
|
||||
try {
|
||||
if (
|
||||
getRandomizedTestingTask().isAssignableFrom(task.getClass()) == false
|
||||
) {
|
||||
throw new IllegalStateException("Expected " + task + " to be RandomizedTestingTask or Test but it was " + task.getClass());
|
||||
}
|
||||
Method getPatternSet = task.getClass().getMethod("getPatternSet");
|
||||
return (PatternFilterable) getPatternSet.invoke(task);
|
||||
} catch (NoSuchMethodException e) {
|
||||
throw new IllegalStateException("Expecte task to have a `patternSet` " + task, e);
|
||||
} catch (IllegalAccessException | InvocationTargetException e) {
|
||||
throw new IllegalStateException("Failed to get pattern set from task" + task, e);
|
||||
}
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
private Class<? extends Task> getRandomizedTestingTask() {
|
||||
try {
|
||||
return (Class<? extends Task>) Class.forName("com.carrotsearch.gradle.junit4.RandomizedTestingTask");
|
||||
} catch (ClassNotFoundException | ClassCastException e) {
|
||||
throw new IllegalStateException("Failed to load randomized testing class", e);
|
||||
}
|
||||
}
|
||||
|
||||
private String checkNoneExists(String message, Stream<? extends Class<?>> stream) {
|
||||
String problem = stream
|
||||
.map(each -> " * " + each.getName())
|
||||
|
|
|
@ -0,0 +1,266 @@
|
|||
package org.elasticsearch.gradle.test;
|
||||
|
||||
import org.gradle.api.internal.tasks.testing.logging.FullExceptionFormatter;
|
||||
import org.gradle.api.internal.tasks.testing.logging.TestExceptionFormatter;
|
||||
import org.gradle.api.logging.Logger;
|
||||
import org.gradle.api.logging.Logging;
|
||||
import org.gradle.api.tasks.testing.TestDescriptor;
|
||||
import org.gradle.api.tasks.testing.TestListener;
|
||||
import org.gradle.api.tasks.testing.TestOutputEvent;
|
||||
import org.gradle.api.tasks.testing.TestOutputListener;
|
||||
import org.gradle.api.tasks.testing.TestResult;
|
||||
import org.gradle.api.tasks.testing.logging.TestLogging;
|
||||
|
||||
import java.io.BufferedOutputStream;
|
||||
import java.io.BufferedReader;
|
||||
import java.io.Closeable;
|
||||
import java.io.File;
|
||||
import java.io.FileOutputStream;
|
||||
import java.io.FileReader;
|
||||
import java.io.IOException;
|
||||
import java.io.PrintStream;
|
||||
import java.io.PrintWriter;
|
||||
import java.io.UncheckedIOException;
|
||||
import java.io.Writer;
|
||||
import java.util.Deque;
|
||||
import java.util.LinkedHashSet;
|
||||
import java.util.LinkedList;
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
|
||||
public class ErrorReportingTestListener implements TestOutputListener, TestListener {
|
||||
private static final Logger LOGGER = Logging.getLogger(ErrorReportingTestListener.class);
|
||||
private static final String REPRODUCE_WITH_PREFIX = "REPRODUCE WITH";
|
||||
|
||||
private final TestExceptionFormatter formatter;
|
||||
private final File outputDirectory;
|
||||
private Map<Descriptor, EventWriter> eventWriters = new ConcurrentHashMap<>();
|
||||
private Map<Descriptor, Deque<String>> reproductionLines = new ConcurrentHashMap<>();
|
||||
private Set<Descriptor> failedTests = new LinkedHashSet<>();
|
||||
|
||||
public ErrorReportingTestListener(TestLogging testLogging, File outputDirectory) {
|
||||
this.formatter = new FullExceptionFormatter(testLogging);
|
||||
this.outputDirectory = outputDirectory;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onOutput(TestDescriptor testDescriptor, TestOutputEvent outputEvent) {
|
||||
TestDescriptor suite = testDescriptor.getParent();
|
||||
|
||||
// Check if this is output from the test suite itself (e.g. afterTest or beforeTest)
|
||||
if (testDescriptor.isComposite()) {
|
||||
suite = testDescriptor;
|
||||
}
|
||||
|
||||
// Hold on to any repro messages so we can report them immediately on test case failure
|
||||
if (outputEvent.getMessage().startsWith(REPRODUCE_WITH_PREFIX)) {
|
||||
Deque<String> lines = reproductionLines.computeIfAbsent(Descriptor.of(suite), d -> new LinkedList<>());
|
||||
lines.add(outputEvent.getMessage());
|
||||
}
|
||||
|
||||
EventWriter eventWriter = eventWriters.computeIfAbsent(Descriptor.of(suite), EventWriter::new);
|
||||
eventWriter.write(outputEvent);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void beforeSuite(TestDescriptor suite) {
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
public void afterSuite(final TestDescriptor suite, TestResult result) {
|
||||
Descriptor descriptor = Descriptor.of(suite);
|
||||
|
||||
try {
|
||||
// if the test suite failed, report all captured output
|
||||
if (result.getResultType().equals(TestResult.ResultType.FAILURE)) {
|
||||
EventWriter eventWriter = eventWriters.get(descriptor);
|
||||
|
||||
if (eventWriter != null) {
|
||||
// It's not explicit what the threading guarantees are for TestListener method execution so we'll
|
||||
// be explicitly safe here to avoid interleaving output from multiple test suites
|
||||
synchronized (this) {
|
||||
// make sure we've flushed everything to disk before reading
|
||||
eventWriter.flush();
|
||||
|
||||
System.err.println("\n\nSuite: " + suite);
|
||||
|
||||
try (BufferedReader reader = eventWriter.reader()) {
|
||||
PrintStream out = System.out;
|
||||
for (String message = reader.readLine(); message != null; message = reader.readLine()) {
|
||||
if (message.startsWith(" 1> ")) {
|
||||
out = System.out;
|
||||
} else if (message.startsWith(" 2> ")) {
|
||||
out = System.err;
|
||||
}
|
||||
|
||||
out.println(message);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
} catch (IOException e) {
|
||||
throw new UncheckedIOException("Error reading test suite output", e);
|
||||
} finally {
|
||||
reproductionLines.remove(descriptor);
|
||||
EventWriter writer = eventWriters.remove(descriptor);
|
||||
if (writer != null) {
|
||||
try {
|
||||
writer.close();
|
||||
} catch (IOException e) {
|
||||
LOGGER.error("Failed to close test suite output stream", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void beforeTest(TestDescriptor testDescriptor) {
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
public void afterTest(TestDescriptor testDescriptor, TestResult result) {
|
||||
if (result.getResultType() == TestResult.ResultType.FAILURE) {
|
||||
failedTests.add(Descriptor.of(testDescriptor));
|
||||
|
||||
if (testDescriptor.getParent() != null) {
|
||||
// go back and fetch the reproduction line for this test failure
|
||||
Deque<String> lines = reproductionLines.get(Descriptor.of(testDescriptor.getParent()));
|
||||
if (lines != null) {
|
||||
String line = lines.getLast();
|
||||
if (line != null) {
|
||||
System.err.print('\n' + line);
|
||||
}
|
||||
}
|
||||
|
||||
// include test failure exception stacktraces in test suite output log
|
||||
if (result.getExceptions().size() > 0) {
|
||||
String message = formatter.format(testDescriptor, result.getExceptions()).substring(4);
|
||||
EventWriter eventWriter = eventWriters.computeIfAbsent(Descriptor.of(testDescriptor.getParent()), EventWriter::new);
|
||||
|
||||
eventWriter.write(new TestOutputEvent() {
|
||||
@Override
|
||||
public Destination getDestination() {
|
||||
return Destination.StdErr;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getMessage() {
|
||||
return message;
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public Set<Descriptor> getFailedTests() {
|
||||
return failedTests;
|
||||
}
|
||||
|
||||
/**
|
||||
* Class for identifying test output sources. We use this rather than Gradle's {@link TestDescriptor} as we want
|
||||
* to avoid any nasty memory leak issues that come from keeping Gradle implementation types in memory. Since we
|
||||
* use this a the key for our HashMap, it's best to control the implementation as there's no guarantee that Gradle's
|
||||
* various {@link TestDescriptor} implementations reliably implement equals and hashCode.
|
||||
*/
|
||||
public static class Descriptor {
|
||||
private final String name;
|
||||
private final String className;
|
||||
private final String parent;
|
||||
|
||||
private Descriptor(String name, String className, String parent) {
|
||||
this.name = name;
|
||||
this.className = className;
|
||||
this.parent = parent;
|
||||
}
|
||||
|
||||
public static Descriptor of(TestDescriptor d) {
|
||||
return new Descriptor(d.getName(), d.getClassName(), d.getParent() == null ? null : d.getParent().toString());
|
||||
}
|
||||
|
||||
public String getClassName() {
|
||||
return className;
|
||||
}
|
||||
|
||||
public String getFullName() {
|
||||
return className + "." + name;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
if (this == o) return true;
|
||||
if (o == null || getClass() != o.getClass()) return false;
|
||||
Descriptor that = (Descriptor) o;
|
||||
return Objects.equals(name, that.name) &&
|
||||
Objects.equals(className, that.className) &&
|
||||
Objects.equals(parent, that.parent);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(name, className, parent);
|
||||
}
|
||||
}
|
||||
|
||||
private class EventWriter implements Closeable {
|
||||
private final File outputFile;
|
||||
private final Writer writer;
|
||||
|
||||
EventWriter(Descriptor descriptor) {
|
||||
this.outputFile = new File(outputDirectory, descriptor.getClassName() + ".out");
|
||||
|
||||
FileOutputStream fos;
|
||||
try {
|
||||
fos = new FileOutputStream(this.outputFile);
|
||||
} catch (IOException e) {
|
||||
throw new UncheckedIOException("Unable to create test suite output file", e);
|
||||
}
|
||||
|
||||
this.writer = new PrintWriter(new BufferedOutputStream(fos));
|
||||
}
|
||||
|
||||
public void write(TestOutputEvent event) {
|
||||
String prefix;
|
||||
if (event.getDestination() == TestOutputEvent.Destination.StdOut) {
|
||||
prefix = " 1> ";
|
||||
} else {
|
||||
prefix = " 2> ";
|
||||
}
|
||||
|
||||
try {
|
||||
if (event.getMessage().equals("\n")) {
|
||||
writer.write(event.getMessage());
|
||||
} else {
|
||||
writer.write(prefix + event.getMessage());
|
||||
}
|
||||
} catch (IOException e) {
|
||||
throw new UncheckedIOException("Unable to write test suite output", e);
|
||||
}
|
||||
}
|
||||
|
||||
public void flush() throws IOException {
|
||||
writer.flush();
|
||||
}
|
||||
|
||||
public BufferedReader reader() {
|
||||
try {
|
||||
return new BufferedReader(new FileReader(outputFile));
|
||||
} catch (IOException e) {
|
||||
throw new UncheckedIOException("Unable to read test suite output file", e);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() throws IOException {
|
||||
writer.close();
|
||||
|
||||
// there's no need to keep this stuff on disk after suite execution
|
||||
outputFile.delete();
|
||||
}
|
||||
}
|
||||
}
|
|
@ -32,9 +32,8 @@ import org.gradle.api.Task;
|
|||
import org.gradle.api.plugins.BasePlugin;
|
||||
import org.gradle.api.plugins.ExtraPropertiesExtension;
|
||||
import org.gradle.api.tasks.TaskContainer;
|
||||
import org.gradle.api.tasks.testing.Test;
|
||||
|
||||
import java.lang.reflect.InvocationTargetException;
|
||||
import java.lang.reflect.Method;
|
||||
import java.util.Collections;
|
||||
import java.util.function.BiConsumer;
|
||||
|
||||
|
@ -103,7 +102,7 @@ public class TestFixturesPlugin implements Plugin<Project> {
|
|||
.matching(fixtureProject -> fixtureProject.equals(project) == false)
|
||||
.all(fixtureProject -> project.evaluationDependsOn(fixtureProject.getPath()));
|
||||
|
||||
conditionTaskByType(tasks, extension, getTaskClass("com.carrotsearch.gradle.junit4.RandomizedTestingTask"));
|
||||
conditionTaskByType(tasks, extension, Test.class);
|
||||
conditionTaskByType(tasks, extension, getTaskClass("org.elasticsearch.gradle.test.RestIntegTestTask"));
|
||||
conditionTaskByType(tasks, extension, TestingConventionsTasks.class);
|
||||
conditionTaskByType(tasks, extension, ComposeUp.class);
|
||||
|
@ -116,18 +115,14 @@ public class TestFixturesPlugin implements Plugin<Project> {
|
|||
return;
|
||||
}
|
||||
|
||||
tasks.withType(getTaskClass("com.carrotsearch.gradle.junit4.RandomizedTestingTask"), task ->
|
||||
tasks.withType(Test.class, task ->
|
||||
extension.fixtures.all(fixtureProject -> {
|
||||
fixtureProject.getTasks().matching(it -> it.getName().equals("buildFixture")).all(buildFixture ->
|
||||
task.dependsOn(buildFixture)
|
||||
);
|
||||
fixtureProject.getTasks().matching(it -> it.getName().equals("composeDown")).all(composeDown ->
|
||||
task.finalizedBy(composeDown)
|
||||
);
|
||||
fixtureProject.getTasks().matching(it -> it.getName().equals("buildFixture")).all(task::dependsOn);
|
||||
fixtureProject.getTasks().matching(it -> it.getName().equals("composeDown")).all(task::finalizedBy);
|
||||
configureServiceInfoForTask(
|
||||
task,
|
||||
fixtureProject,
|
||||
(name, port) -> setSystemProperty(task, name, port)
|
||||
task::systemProperty
|
||||
);
|
||||
task.dependsOn(fixtureProject.getTasks().getByName("postProcessFixture"));
|
||||
})
|
||||
|
@ -182,17 +177,6 @@ public class TestFixturesPlugin implements Plugin<Project> {
|
|||
return hasDockerCompose && Boolean.parseBoolean(System.getProperty("tests.fixture.enabled", "true"));
|
||||
}
|
||||
|
||||
private void setSystemProperty(Task task, String name, Object value) {
|
||||
try {
|
||||
Method systemProperty = task.getClass().getMethod("systemProperty", String.class, Object.class);
|
||||
systemProperty.invoke(task, name, value);
|
||||
} catch (NoSuchMethodException e) {
|
||||
throw new IllegalArgumentException("Could not find systemProperty method on RandomizedTestingTask", e);
|
||||
} catch (IllegalAccessException | InvocationTargetException e) {
|
||||
throw new IllegalArgumentException("Could not call systemProperty method on RandomizedTestingTask", e);
|
||||
}
|
||||
}
|
||||
|
||||
private void disableTaskByType(TaskContainer tasks, Class<? extends Task> type) {
|
||||
tasks.withType(type, task -> task.setEnabled(false));
|
||||
}
|
||||
|
|
|
@ -1 +0,0 @@
|
|||
implementation-class=com.carrotsearch.gradle.junit4.RandomizedTestingPlugin
|
|
@ -62,7 +62,7 @@ public class TestingConventionsTasksIT extends GradleIntegrationTestCase {
|
|||
BuildResult result = runner.buildAndFail();
|
||||
assertOutputContains(result.getOutput(),
|
||||
"Expected at least one test class included in task :empty_test_task:emptyTest, but found none.",
|
||||
"Expected at least one test class included in task :empty_test_task:emptyTestRandomized, but found none."
|
||||
"Expected at least one test class included in task :empty_test_task:test, but found none."
|
||||
);
|
||||
}
|
||||
|
||||
|
@ -71,9 +71,8 @@ public class TestingConventionsTasksIT extends GradleIntegrationTestCase {
|
|||
.withArguments("clean", ":all_classes_in_tasks:testingConventions", "-i", "-s");
|
||||
BuildResult result = runner.buildAndFail();
|
||||
assertOutputContains(result.getOutput(),
|
||||
"Test classes are not included in any enabled task (:all_classes_in_tasks:emptyTestRandomized):",
|
||||
" * org.elasticsearch.gradle.testkit.NamingConventionIT",
|
||||
" * org.elasticsearch.gradle.testkit.NamingConventionTests"
|
||||
"Test classes are not included in any enabled task (:all_classes_in_tasks:test):",
|
||||
" * org.elasticsearch.gradle.testkit.NamingConventionIT"
|
||||
);
|
||||
}
|
||||
|
||||
|
|
|
@ -27,7 +27,7 @@ forbiddenApisTest.enabled = false
|
|||
// requires dependency on testing fw
|
||||
jarHell.enabled = false
|
||||
// we don't have tests for now
|
||||
unitTest.enabled = false
|
||||
test.enabled = false
|
||||
|
||||
task hello {
|
||||
doFirst {
|
||||
|
|
|
@ -25,22 +25,16 @@ allprojects {
|
|||
baseClasses = []
|
||||
}
|
||||
}
|
||||
|
||||
unitTest.enabled = false
|
||||
}
|
||||
|
||||
project(':empty_test_task') {
|
||||
task emptyTest(type: Test) {
|
||||
|
||||
}
|
||||
|
||||
task emptyTestRandomized(type: com.carrotsearch.gradle.junit4.RandomizedTestingTask) {
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
project(':all_classes_in_tasks') {
|
||||
task emptyTestRandomized(type: com.carrotsearch.gradle.junit4.RandomizedTestingTask) {
|
||||
test {
|
||||
include "**/Convention*"
|
||||
}
|
||||
}
|
||||
|
@ -54,14 +48,14 @@ project(':not_implementing_base') {
|
|||
baseClass 'org.elasticsearch.gradle.testkit.Integration'
|
||||
}
|
||||
}
|
||||
task randomized(type: com.carrotsearch.gradle.junit4.RandomizedTestingTask) {
|
||||
test {
|
||||
include "**/*IT.class"
|
||||
include "**/*Tests.class"
|
||||
}
|
||||
}
|
||||
|
||||
project(':valid_setup_no_base') {
|
||||
task randomized(type: com.carrotsearch.gradle.junit4.RandomizedTestingTask) {
|
||||
test {
|
||||
include "**/*IT.class"
|
||||
include "**/*Tests.class"
|
||||
}
|
||||
|
@ -72,7 +66,7 @@ project(':tests_in_main') {
|
|||
}
|
||||
|
||||
project (':valid_setup_with_base') {
|
||||
task randomized(type: com.carrotsearch.gradle.junit4.RandomizedTestingTask) {
|
||||
test {
|
||||
include "**/*IT.class"
|
||||
include "**/*Tests.class"
|
||||
}
|
||||
|
|
|
@ -29,7 +29,7 @@ archivesBaseName = 'client-benchmarks'
|
|||
mainClassName = 'org.elasticsearch.client.benchmark.BenchmarkMain'
|
||||
|
||||
// never try to invoke tests on the benchmark project - there aren't any
|
||||
unitTest.enabled = false
|
||||
test.enabled = false
|
||||
|
||||
dependencies {
|
||||
compile 'org.apache.commons:commons-math3:3.2'
|
||||
|
|
|
@ -36,5 +36,5 @@ dependenciesInfo.enabled = false
|
|||
compileJava.options.compilerArgs << "-Xlint:-cast,-rawtypes,-unchecked"
|
||||
|
||||
// no unit tests
|
||||
unitTest.enabled = false
|
||||
test.enabled = false
|
||||
integTest.enabled = false
|
||||
|
|
|
@ -64,6 +64,9 @@ dependencies {
|
|||
testCompile "org.hamcrest:hamcrest-all:${versions.hamcrest}"
|
||||
//this is needed to make RestHighLevelClientTests#testApiNamingConventions work from IDEs
|
||||
testCompile "org.elasticsearch:rest-api-spec:${version}"
|
||||
// Needed for serialization tests:
|
||||
// (In order to serialize a server side class to a client side class or the other way around)
|
||||
testCompile "org.elasticsearch.plugin:x-pack-core:${version}"
|
||||
|
||||
restSpec "org.elasticsearch:rest-api-spec:${version}"
|
||||
}
|
||||
|
|
|
@ -0,0 +1,93 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.client.dataframe.transforms;
|
||||
|
||||
import org.elasticsearch.common.ParseField;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.xcontent.ConstructingObjectParser;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Objects;
|
||||
|
||||
public class DataFrameTransformCheckpointStats {
|
||||
public static final ParseField TIMESTAMP_MILLIS = new ParseField("timestamp_millis");
|
||||
public static final ParseField TIME_UPPER_BOUND_MILLIS = new ParseField("time_upper_bound_millis");
|
||||
|
||||
public static DataFrameTransformCheckpointStats EMPTY = new DataFrameTransformCheckpointStats(0L, 0L);
|
||||
|
||||
private final long timestampMillis;
|
||||
private final long timeUpperBoundMillis;
|
||||
|
||||
public static final ConstructingObjectParser<DataFrameTransformCheckpointStats, Void> LENIENT_PARSER = new ConstructingObjectParser<>(
|
||||
"data_frame_transform_checkpoint_stats", true, args -> {
|
||||
long timestamp = args[0] == null ? 0L : (Long) args[0];
|
||||
long timeUpperBound = args[1] == null ? 0L : (Long) args[1];
|
||||
|
||||
return new DataFrameTransformCheckpointStats(timestamp, timeUpperBound);
|
||||
});
|
||||
|
||||
static {
|
||||
LENIENT_PARSER.declareLong(ConstructingObjectParser.optionalConstructorArg(), TIMESTAMP_MILLIS);
|
||||
LENIENT_PARSER.declareLong(ConstructingObjectParser.optionalConstructorArg(), TIME_UPPER_BOUND_MILLIS);
|
||||
}
|
||||
|
||||
public static DataFrameTransformCheckpointStats fromXContent(XContentParser parser) throws IOException {
|
||||
return LENIENT_PARSER.parse(parser, null);
|
||||
}
|
||||
|
||||
public DataFrameTransformCheckpointStats(final long timestampMillis, final long timeUpperBoundMillis) {
|
||||
this.timestampMillis = timestampMillis;
|
||||
this.timeUpperBoundMillis = timeUpperBoundMillis;
|
||||
}
|
||||
|
||||
public DataFrameTransformCheckpointStats(StreamInput in) throws IOException {
|
||||
this.timestampMillis = in.readLong();
|
||||
this.timeUpperBoundMillis = in.readLong();
|
||||
}
|
||||
|
||||
public long getTimestampMillis() {
|
||||
return timestampMillis;
|
||||
}
|
||||
|
||||
public long getTimeUpperBoundMillis() {
|
||||
return timeUpperBoundMillis;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(timestampMillis, timeUpperBoundMillis);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object other) {
|
||||
if (this == other) {
|
||||
return true;
|
||||
}
|
||||
|
||||
if (other == null || getClass() != other.getClass()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
DataFrameTransformCheckpointStats that = (DataFrameTransformCheckpointStats) other;
|
||||
|
||||
return this.timestampMillis == that.timestampMillis && this.timeUpperBoundMillis == that.timeUpperBoundMillis;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,102 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.client.dataframe.transforms;
|
||||
|
||||
import org.elasticsearch.common.ParseField;
|
||||
import org.elasticsearch.common.xcontent.ConstructingObjectParser;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
|
||||
import java.util.Objects;
|
||||
|
||||
public class DataFrameTransformCheckpointingInfo {
|
||||
|
||||
public static final ParseField CURRENT_CHECKPOINT = new ParseField("current");
|
||||
public static final ParseField IN_PROGRESS_CHECKPOINT = new ParseField("in_progress");
|
||||
public static final ParseField OPERATIONS_BEHIND = new ParseField("operations_behind");
|
||||
|
||||
private final DataFrameTransformCheckpointStats current;
|
||||
private final DataFrameTransformCheckpointStats inProgress;
|
||||
private final long operationsBehind;
|
||||
|
||||
|
||||
private static final ConstructingObjectParser<DataFrameTransformCheckpointingInfo, Void> LENIENT_PARSER =
|
||||
new ConstructingObjectParser<>(
|
||||
"data_frame_transform_checkpointing_info", true, a -> {
|
||||
long behind = a[2] == null ? 0L : (Long) a[2];
|
||||
|
||||
return new DataFrameTransformCheckpointingInfo(
|
||||
a[0] == null ? DataFrameTransformCheckpointStats.EMPTY : (DataFrameTransformCheckpointStats) a[0],
|
||||
a[1] == null ? DataFrameTransformCheckpointStats.EMPTY : (DataFrameTransformCheckpointStats) a[1], behind);
|
||||
});
|
||||
|
||||
static {
|
||||
LENIENT_PARSER.declareObject(ConstructingObjectParser.optionalConstructorArg(),
|
||||
(p, c) -> DataFrameTransformCheckpointStats.fromXContent(p), CURRENT_CHECKPOINT);
|
||||
LENIENT_PARSER.declareObject(ConstructingObjectParser.optionalConstructorArg(),
|
||||
(p, c) -> DataFrameTransformCheckpointStats.fromXContent(p), IN_PROGRESS_CHECKPOINT);
|
||||
LENIENT_PARSER.declareLong(ConstructingObjectParser.optionalConstructorArg(), OPERATIONS_BEHIND);
|
||||
}
|
||||
|
||||
public DataFrameTransformCheckpointingInfo(DataFrameTransformCheckpointStats current, DataFrameTransformCheckpointStats inProgress,
|
||||
long operationsBehind) {
|
||||
this.current = Objects.requireNonNull(current);
|
||||
this.inProgress = Objects.requireNonNull(inProgress);
|
||||
this.operationsBehind = operationsBehind;
|
||||
}
|
||||
|
||||
public DataFrameTransformCheckpointStats getCurrent() {
|
||||
return current;
|
||||
}
|
||||
|
||||
public DataFrameTransformCheckpointStats getInProgress() {
|
||||
return inProgress;
|
||||
}
|
||||
|
||||
public long getOperationsBehind() {
|
||||
return operationsBehind;
|
||||
}
|
||||
|
||||
public static DataFrameTransformCheckpointingInfo fromXContent(XContentParser p) {
|
||||
return LENIENT_PARSER.apply(p, null);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(current, inProgress, operationsBehind);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object other) {
|
||||
if (this == other) {
|
||||
return true;
|
||||
}
|
||||
|
||||
if (other == null || getClass() != other.getClass()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
DataFrameTransformCheckpointingInfo that = (DataFrameTransformCheckpointingInfo) other;
|
||||
|
||||
return Objects.equals(this.current, that.current) &&
|
||||
Objects.equals(this.inProgress, that.inProgress) &&
|
||||
this.operationsBehind == that.operationsBehind;
|
||||
}
|
||||
|
||||
}
|
|
@ -42,7 +42,7 @@ public class DataFrameTransformState {
|
|||
private static final ParseField INDEXER_STATE = new ParseField("indexer_state");
|
||||
private static final ParseField TASK_STATE = new ParseField("task_state");
|
||||
private static final ParseField CURRENT_POSITION = new ParseField("current_position");
|
||||
private static final ParseField GENERATION = new ParseField("generation");
|
||||
private static final ParseField CHECKPOINT = new ParseField("checkpoint");
|
||||
private static final ParseField REASON = new ParseField("reason");
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
|
@ -69,7 +69,7 @@ public class DataFrameTransformState {
|
|||
}
|
||||
throw new IllegalArgumentException("Unsupported token [" + p.currentToken() + "]");
|
||||
}, CURRENT_POSITION, ObjectParser.ValueType.VALUE_OBJECT_ARRAY);
|
||||
PARSER.declareLong(ConstructingObjectParser.optionalConstructorArg(), GENERATION);
|
||||
PARSER.declareLong(ConstructingObjectParser.optionalConstructorArg(), CHECKPOINT);
|
||||
PARSER.declareString(ConstructingObjectParser.optionalConstructorArg(), REASON);
|
||||
}
|
||||
|
||||
|
@ -79,19 +79,19 @@ public class DataFrameTransformState {
|
|||
|
||||
private final DataFrameTransformTaskState taskState;
|
||||
private final IndexerState indexerState;
|
||||
private final long generation;
|
||||
private final long checkpoint;
|
||||
private final SortedMap<String, Object> currentPosition;
|
||||
private final String reason;
|
||||
|
||||
public DataFrameTransformState(DataFrameTransformTaskState taskState,
|
||||
IndexerState indexerState,
|
||||
@Nullable Map<String, Object> position,
|
||||
long generation,
|
||||
long checkpoint,
|
||||
@Nullable String reason) {
|
||||
this.taskState = taskState;
|
||||
this.indexerState = indexerState;
|
||||
this.currentPosition = position == null ? null : Collections.unmodifiableSortedMap(new TreeMap<>(position));
|
||||
this.generation = generation;
|
||||
this.checkpoint = checkpoint;
|
||||
this.reason = reason;
|
||||
}
|
||||
|
||||
|
@ -108,8 +108,8 @@ public class DataFrameTransformState {
|
|||
return currentPosition;
|
||||
}
|
||||
|
||||
public long getGeneration() {
|
||||
return generation;
|
||||
public long getCheckpoint() {
|
||||
return checkpoint;
|
||||
}
|
||||
|
||||
@Nullable
|
||||
|
@ -132,13 +132,13 @@ public class DataFrameTransformState {
|
|||
return Objects.equals(this.taskState, that.taskState) &&
|
||||
Objects.equals(this.indexerState, that.indexerState) &&
|
||||
Objects.equals(this.currentPosition, that.currentPosition) &&
|
||||
this.generation == that.generation &&
|
||||
this.checkpoint == that.checkpoint &&
|
||||
Objects.equals(this.reason, that.reason);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(taskState, indexerState, currentPosition, generation, reason);
|
||||
return Objects.hash(taskState, indexerState, currentPosition, checkpoint, reason);
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -31,16 +31,20 @@ public class DataFrameTransformStateAndStats {
|
|||
public static final ParseField ID = new ParseField("id");
|
||||
public static final ParseField STATE_FIELD = new ParseField("state");
|
||||
public static final ParseField STATS_FIELD = new ParseField("stats");
|
||||
public static final ParseField CHECKPOINTING_INFO_FIELD = new ParseField("checkpointing");
|
||||
|
||||
public static final ConstructingObjectParser<DataFrameTransformStateAndStats, Void> PARSER = new ConstructingObjectParser<>(
|
||||
"data_frame_transform_state_and_stats", true,
|
||||
a -> new DataFrameTransformStateAndStats((String) a[0], (DataFrameTransformState) a[1], (DataFrameIndexerTransformStats) a[2]));
|
||||
a -> new DataFrameTransformStateAndStats((String) a[0], (DataFrameTransformState) a[1], (DataFrameIndexerTransformStats) a[2],
|
||||
(DataFrameTransformCheckpointingInfo) a[3]));
|
||||
|
||||
static {
|
||||
PARSER.declareString(ConstructingObjectParser.constructorArg(), ID);
|
||||
PARSER.declareObject(ConstructingObjectParser.constructorArg(), DataFrameTransformState.PARSER::apply, STATE_FIELD);
|
||||
PARSER.declareObject(ConstructingObjectParser.constructorArg(), (p, c) -> DataFrameIndexerTransformStats.fromXContent(p),
|
||||
STATS_FIELD);
|
||||
PARSER.declareObject(ConstructingObjectParser.optionalConstructorArg(),
|
||||
(p, c) -> DataFrameTransformCheckpointingInfo.fromXContent(p), CHECKPOINTING_INFO_FIELD);
|
||||
}
|
||||
|
||||
public static DataFrameTransformStateAndStats fromXContent(XContentParser parser) throws IOException {
|
||||
|
@ -50,11 +54,14 @@ public class DataFrameTransformStateAndStats {
|
|||
private final String id;
|
||||
private final DataFrameTransformState transformState;
|
||||
private final DataFrameIndexerTransformStats transformStats;
|
||||
private final DataFrameTransformCheckpointingInfo checkpointingInfo;
|
||||
|
||||
public DataFrameTransformStateAndStats(String id, DataFrameTransformState state, DataFrameIndexerTransformStats stats) {
|
||||
public DataFrameTransformStateAndStats(String id, DataFrameTransformState state, DataFrameIndexerTransformStats stats,
|
||||
DataFrameTransformCheckpointingInfo checkpointingInfo) {
|
||||
this.id = id;
|
||||
this.transformState = state;
|
||||
this.transformStats = stats;
|
||||
this.checkpointingInfo = checkpointingInfo;
|
||||
}
|
||||
|
||||
public String getId() {
|
||||
|
@ -69,9 +76,13 @@ public class DataFrameTransformStateAndStats {
|
|||
return transformState;
|
||||
}
|
||||
|
||||
public DataFrameTransformCheckpointingInfo getCheckpointingInfo() {
|
||||
return checkpointingInfo;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(id, transformState, transformStats);
|
||||
return Objects.hash(id, transformState, transformStats, checkpointingInfo);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -87,6 +98,7 @@ public class DataFrameTransformStateAndStats {
|
|||
DataFrameTransformStateAndStats that = (DataFrameTransformStateAndStats) other;
|
||||
|
||||
return Objects.equals(this.id, that.id) && Objects.equals(this.transformState, that.transformState)
|
||||
&& Objects.equals(this.transformStats, that.transformStats);
|
||||
&& Objects.equals(this.transformStats, that.transformStats)
|
||||
&& Objects.equals(this.checkpointingInfo, that.checkpointingInfo);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,9 +1,22 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.protocol;
|
||||
package org.elasticsearch.client;
|
||||
|
||||
import org.elasticsearch.common.io.stream.Streamable;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
|
@ -14,6 +27,11 @@ import java.io.IOException;
|
|||
|
||||
import static org.elasticsearch.test.AbstractXContentTestCase.xContentTester;
|
||||
|
||||
/**
|
||||
* @deprecated Use {@link AbstractResponseTestCase} instead of this class.
|
||||
*/
|
||||
// TODO: Remove and change subclasses to use AbstractResponseTestCase instead
|
||||
@Deprecated
|
||||
public abstract class AbstractHlrcStreamableXContentTestCase<T extends ToXContent & Streamable, H>
|
||||
extends AbstractStreamableXContentTestCase<T> {
|
||||
|
|
@ -1,9 +1,22 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.protocol;
|
||||
package org.elasticsearch.client;
|
||||
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
|
@ -11,6 +24,11 @@ import org.elasticsearch.test.AbstractXContentTestCase;
|
|||
|
||||
import java.io.IOException;
|
||||
|
||||
/**
|
||||
* @deprecated Use {@link AbstractResponseTestCase} instead of this class.
|
||||
*/
|
||||
// TODO: Remove and change subclasses to use AbstractResponseTestCase instead
|
||||
@Deprecated
|
||||
public abstract class AbstractHlrcXContentTestCase<T extends ToXContent, H> extends AbstractXContentTestCase<T> {
|
||||
|
||||
/**
|
|
@ -0,0 +1,65 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.client;
|
||||
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.xcontent.LoggingDeprecationHandler;
|
||||
import org.elasticsearch.common.xcontent.NamedXContentRegistry;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContent;
|
||||
import org.elasticsearch.common.xcontent.XContentFactory;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
/**
|
||||
* Base class for HLRC request parsing tests.
|
||||
*
|
||||
* This case class facilitates generating client side request test instances and
|
||||
* verifies that they are correctly parsed into server side request instances.
|
||||
*
|
||||
* @param <C> The class representing the request on the client side.
|
||||
* @param <S> The class representing the request on the server side.
|
||||
*/
|
||||
public abstract class AbstractRequestTestCase<C extends ToXContent, S> extends ESTestCase {
|
||||
|
||||
public final void testFromXContent() throws IOException {
|
||||
final C clientTestInstance = createClientTestInstance();
|
||||
|
||||
final XContentType xContentType = randomFrom(XContentType.values());
|
||||
final BytesReference bytes = toShuffledXContent(clientTestInstance, xContentType, ToXContent.EMPTY_PARAMS, randomBoolean());
|
||||
|
||||
final XContent xContent = XContentFactory.xContent(xContentType);
|
||||
final XContentParser parser = xContent.createParser(
|
||||
NamedXContentRegistry.EMPTY,
|
||||
LoggingDeprecationHandler.INSTANCE,
|
||||
bytes.streamInput());
|
||||
final S serverInstance = doParseToServerInstance(parser);
|
||||
assertInstances(serverInstance, clientTestInstance);
|
||||
}
|
||||
|
||||
protected abstract C createClientTestInstance();
|
||||
|
||||
protected abstract S doParseToServerInstance(XContentParser parser) throws IOException;
|
||||
|
||||
protected abstract void assertInstances(S serverInstance, C clientTestInstance);
|
||||
|
||||
}
|
|
@ -18,7 +18,6 @@
|
|||
*/
|
||||
package org.elasticsearch.client;
|
||||
|
||||
import org.elasticsearch.cluster.ClusterModule;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.xcontent.LoggingDeprecationHandler;
|
||||
import org.elasticsearch.common.xcontent.NamedXContentRegistry;
|
||||
|
@ -42,23 +41,19 @@ import java.io.IOException;
|
|||
*/
|
||||
public abstract class AbstractResponseTestCase<S extends ToXContent, C> extends ESTestCase {
|
||||
|
||||
private static final int NUMBER_OF_TEST_RUNS = 20;
|
||||
|
||||
public final void testFromXContent() throws IOException {
|
||||
for (int i = 0; i < NUMBER_OF_TEST_RUNS; i++) {
|
||||
final S serverTestInstance = createServerTestInstance();
|
||||
final S serverTestInstance = createServerTestInstance();
|
||||
|
||||
final XContentType xContentType = randomFrom(XContentType.values());
|
||||
final BytesReference bytes = toShuffledXContent(serverTestInstance, xContentType, ToXContent.EMPTY_PARAMS, randomBoolean());
|
||||
final XContentType xContentType = randomFrom(XContentType.values());
|
||||
final BytesReference bytes = toShuffledXContent(serverTestInstance, xContentType, ToXContent.EMPTY_PARAMS, randomBoolean());
|
||||
|
||||
final XContent xContent = XContentFactory.xContent(xContentType);
|
||||
final XContentParser parser = xContent.createParser(
|
||||
new NamedXContentRegistry(ClusterModule.getNamedXWriteables()),
|
||||
LoggingDeprecationHandler.INSTANCE,
|
||||
bytes.streamInput());
|
||||
final C clientInstance = doParseToClientInstance(parser);
|
||||
assertInstances(serverTestInstance, clientInstance);
|
||||
}
|
||||
final XContent xContent = XContentFactory.xContent(xContentType);
|
||||
final XContentParser parser = xContent.createParser(
|
||||
NamedXContentRegistry.EMPTY,
|
||||
LoggingDeprecationHandler.INSTANCE,
|
||||
bytes.streamInput());
|
||||
final C clientInstance = doParseToClientInstance(parser);
|
||||
assertInstances(serverTestInstance, clientInstance);
|
||||
}
|
||||
|
||||
protected abstract S createServerTestInstance();
|
||||
|
|
|
@ -878,6 +878,18 @@ public class MachineLearningIT extends ESRestHighLevelClientTestCase {
|
|||
|
||||
waitForJobToClose(jobId);
|
||||
|
||||
long prevJobTimeStamp = System.currentTimeMillis() / 1000;
|
||||
|
||||
// Check that the current timestamp component, in seconds, differs from previously.
|
||||
// Note that we used to use an 'awaitBusy(() -> false, 1, TimeUnit.SECONDS);'
|
||||
// for the same purpose but the new approach...
|
||||
// a) explicitly checks that the timestamps, in seconds, are actually different and
|
||||
// b) is slightly more efficient since we may not need to wait an entire second for the timestamp to increment
|
||||
assertBusy(() -> {
|
||||
long timeNow = System.currentTimeMillis() / 1000;
|
||||
assertFalse(prevJobTimeStamp >= timeNow);
|
||||
});
|
||||
|
||||
// Update snapshot timestamp to force it out of snapshot retention window
|
||||
long oneDayAgo = nowMillis - TimeValue.timeValueHours(24).getMillis() - 1;
|
||||
updateModelSnapshotTimestamp(jobId, String.valueOf(oneDayAgo));
|
||||
|
@ -903,6 +915,7 @@ public class MachineLearningIT extends ESRestHighLevelClientTestCase {
|
|||
return forecastJobResponse.getForecastId();
|
||||
}
|
||||
|
||||
@AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/41070")
|
||||
public void testDeleteExpiredData() throws Exception {
|
||||
|
||||
String jobId = "test-delete-expired-data";
|
||||
|
@ -1418,6 +1431,7 @@ public class MachineLearningIT extends ESRestHighLevelClientTestCase {
|
|||
}
|
||||
|
||||
private void updateModelSnapshotTimestamp(String jobId, String timestamp) throws Exception {
|
||||
|
||||
MachineLearningClient machineLearningClient = highLevelClient().machineLearning();
|
||||
|
||||
GetModelSnapshotsRequest getModelSnapshotsRequest = new GetModelSnapshotsRequest(jobId);
|
||||
|
@ -1435,9 +1449,6 @@ public class MachineLearningIT extends ESRestHighLevelClientTestCase {
|
|||
UpdateRequest updateSnapshotRequest = new UpdateRequest(".ml-anomalies-" + jobId, "_doc", documentId);
|
||||
updateSnapshotRequest.doc(snapshotUpdate.getBytes(StandardCharsets.UTF_8), XContentType.JSON);
|
||||
highLevelClient().update(updateSnapshotRequest, RequestOptions.DEFAULT);
|
||||
|
||||
// Wait a second to ensure subsequent model snapshots will have a different ID (it depends on epoch seconds)
|
||||
awaitBusy(() -> false, 1, TimeUnit.SECONDS);
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -1,13 +1,26 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.protocol.xpack;
|
||||
package org.elasticsearch.client;
|
||||
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.protocol.AbstractHlrcStreamableXContentTestCase;
|
||||
import org.elasticsearch.protocol.xpack.XPackInfoResponse;
|
||||
import org.elasticsearch.protocol.xpack.XPackInfoResponse.BuildInfo;
|
||||
import org.elasticsearch.protocol.xpack.XPackInfoResponse.LicenseInfo;
|
||||
import org.elasticsearch.protocol.xpack.XPackInfoResponse.FeatureSetsInfo;
|
|
@ -20,50 +20,110 @@
|
|||
package org.elasticsearch.client.ccr;
|
||||
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.client.ccr.AutoFollowStats.AutoFollowedCluster;
|
||||
import org.elasticsearch.client.AbstractResponseTestCase;
|
||||
import org.elasticsearch.client.ccr.IndicesFollowStats.ShardFollowStats;
|
||||
import org.elasticsearch.common.collect.Tuple;
|
||||
import org.elasticsearch.common.unit.ByteSizeUnit;
|
||||
import org.elasticsearch.common.unit.ByteSizeValue;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.xpack.core.ccr.ShardFollowNodeTaskStatus;
|
||||
import org.elasticsearch.xpack.core.ccr.action.CcrStatsAction;
|
||||
import org.elasticsearch.xpack.core.ccr.action.FollowStatsAction;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.NavigableMap;
|
||||
import java.util.TreeMap;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
import static org.elasticsearch.test.AbstractXContentTestCase.xContentTester;
|
||||
import static org.hamcrest.Matchers.anyOf;
|
||||
import static org.hamcrest.Matchers.containsString;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.hamcrest.Matchers.instanceOf;
|
||||
|
||||
public class CcrStatsResponseTests extends ESTestCase {
|
||||
public class CcrStatsResponseTests extends AbstractResponseTestCase<CcrStatsAction.Response, CcrStatsResponse> {
|
||||
|
||||
public void testFromXContent() throws IOException {
|
||||
xContentTester(this::createParser,
|
||||
CcrStatsResponseTests::createTestInstance,
|
||||
CcrStatsResponseTests::toXContent,
|
||||
CcrStatsResponse::fromXContent)
|
||||
.supportsUnknownFields(true)
|
||||
.assertEqualsConsumer(CcrStatsResponseTests::assertEqualInstances)
|
||||
.assertToXContentEquivalence(false)
|
||||
.test();
|
||||
@Override
|
||||
protected CcrStatsAction.Response createServerTestInstance() {
|
||||
org.elasticsearch.xpack.core.ccr.AutoFollowStats autoFollowStats = new org.elasticsearch.xpack.core.ccr.AutoFollowStats(
|
||||
randomNonNegativeLong(),
|
||||
randomNonNegativeLong(),
|
||||
randomNonNegativeLong(),
|
||||
randomReadExceptions(),
|
||||
randomTrackingClusters()
|
||||
);
|
||||
FollowStatsAction.StatsResponses statsResponse = createStatsResponse();
|
||||
return new CcrStatsAction.Response(autoFollowStats, statsResponse);
|
||||
}
|
||||
|
||||
// Needed, because exceptions in IndicesFollowStats and AutoFollowStats cannot be compared
|
||||
private static void assertEqualInstances(CcrStatsResponse expectedInstance, CcrStatsResponse newInstance) {
|
||||
assertNotSame(expectedInstance, newInstance);
|
||||
static NavigableMap<String, Tuple<Long, ElasticsearchException>> randomReadExceptions() {
|
||||
final int count = randomIntBetween(0, 16);
|
||||
final NavigableMap<String, Tuple<Long, ElasticsearchException>> readExceptions = new TreeMap<>();
|
||||
for (int i = 0; i < count; i++) {
|
||||
readExceptions.put("" + i, Tuple.tuple(randomNonNegativeLong(),
|
||||
new ElasticsearchException(new IllegalStateException("index [" + i + "]"))));
|
||||
}
|
||||
return readExceptions;
|
||||
}
|
||||
|
||||
static NavigableMap<String, org.elasticsearch.xpack.core.ccr.AutoFollowStats.AutoFollowedCluster> randomTrackingClusters() {
|
||||
final int count = randomIntBetween(0, 16);
|
||||
final NavigableMap<String, org.elasticsearch.xpack.core.ccr.AutoFollowStats.AutoFollowedCluster> readExceptions = new TreeMap<>();
|
||||
for (int i = 0; i < count; i++) {
|
||||
readExceptions.put("" + i,
|
||||
new org.elasticsearch.xpack.core.ccr.AutoFollowStats.AutoFollowedCluster(randomLong(), randomNonNegativeLong()));
|
||||
}
|
||||
return readExceptions;
|
||||
}
|
||||
|
||||
static FollowStatsAction.StatsResponses createStatsResponse() {
|
||||
int numResponses = randomIntBetween(0, 8);
|
||||
List<FollowStatsAction.StatsResponse> responses = new ArrayList<>(numResponses);
|
||||
for (int i = 0; i < numResponses; i++) {
|
||||
ShardFollowNodeTaskStatus status = new ShardFollowNodeTaskStatus(
|
||||
randomAlphaOfLength(4),
|
||||
randomAlphaOfLength(4),
|
||||
randomAlphaOfLength(4),
|
||||
randomInt(),
|
||||
randomNonNegativeLong(),
|
||||
randomNonNegativeLong(),
|
||||
randomNonNegativeLong(),
|
||||
randomNonNegativeLong(),
|
||||
randomNonNegativeLong(),
|
||||
randomIntBetween(0, Integer.MAX_VALUE),
|
||||
randomIntBetween(0, Integer.MAX_VALUE),
|
||||
randomIntBetween(0, Integer.MAX_VALUE),
|
||||
randomNonNegativeLong(),
|
||||
randomNonNegativeLong(),
|
||||
randomNonNegativeLong(),
|
||||
randomNonNegativeLong(),
|
||||
randomNonNegativeLong(),
|
||||
randomNonNegativeLong(),
|
||||
randomNonNegativeLong(),
|
||||
randomNonNegativeLong(),
|
||||
randomNonNegativeLong(),
|
||||
randomNonNegativeLong(),
|
||||
randomNonNegativeLong(),
|
||||
randomNonNegativeLong(),
|
||||
randomNonNegativeLong(),
|
||||
Collections.emptyNavigableMap(),
|
||||
randomLong(),
|
||||
randomBoolean() ? new ElasticsearchException("fatal error") : null);
|
||||
responses.add(new FollowStatsAction.StatsResponse(status));
|
||||
}
|
||||
return new FollowStatsAction.StatsResponses(Collections.emptyList(), Collections.emptyList(), responses);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected CcrStatsResponse doParseToClientInstance(XContentParser parser) throws IOException {
|
||||
return CcrStatsResponse.fromXContent(parser);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void assertInstances(CcrStatsAction.Response serverTestInstance, CcrStatsResponse clientInstance) {
|
||||
{
|
||||
AutoFollowStats newAutoFollowStats = newInstance.getAutoFollowStats();
|
||||
AutoFollowStats expectedAutoFollowStats = expectedInstance.getAutoFollowStats();
|
||||
AutoFollowStats newAutoFollowStats = clientInstance.getAutoFollowStats();
|
||||
org.elasticsearch.xpack.core.ccr.AutoFollowStats expectedAutoFollowStats = serverTestInstance.getAutoFollowStats();
|
||||
assertThat(newAutoFollowStats.getNumberOfSuccessfulFollowIndices(),
|
||||
equalTo(expectedAutoFollowStats.getNumberOfSuccessfulFollowIndices()));
|
||||
assertThat(newAutoFollowStats.getNumberOfFailedRemoteClusterStateRequests(),
|
||||
|
@ -89,62 +149,69 @@ public class CcrStatsResponseTests extends ESTestCase {
|
|||
}
|
||||
}
|
||||
{
|
||||
IndicesFollowStats newIndicesFollowStats = newInstance.getIndicesFollowStats();
|
||||
IndicesFollowStats expectedIndicesFollowStats = expectedInstance.getIndicesFollowStats();
|
||||
IndicesFollowStats newIndicesFollowStats = clientInstance.getIndicesFollowStats();
|
||||
|
||||
// sort by index name, then shard ID
|
||||
final Map<String, Map<Integer, FollowStatsAction.StatsResponse>> expectedIndicesFollowStats = new TreeMap<>();
|
||||
for (final FollowStatsAction.StatsResponse statsResponse : serverTestInstance.getFollowStats().getStatsResponses()) {
|
||||
expectedIndicesFollowStats.computeIfAbsent(
|
||||
statsResponse.status().followerIndex(),
|
||||
k -> new TreeMap<>()).put(statsResponse.status().getShardId(), statsResponse);
|
||||
}
|
||||
assertThat(newIndicesFollowStats.getShardFollowStats().size(),
|
||||
equalTo(expectedIndicesFollowStats.getShardFollowStats().size()));
|
||||
equalTo(expectedIndicesFollowStats.size()));
|
||||
assertThat(newIndicesFollowStats.getShardFollowStats().keySet(),
|
||||
equalTo(expectedIndicesFollowStats.getShardFollowStats().keySet()));
|
||||
equalTo(expectedIndicesFollowStats.keySet()));
|
||||
for (Map.Entry<String, List<ShardFollowStats>> indexEntry : newIndicesFollowStats.getShardFollowStats().entrySet()) {
|
||||
List<ShardFollowStats> newStats = indexEntry.getValue();
|
||||
List<ShardFollowStats> expectedStats = expectedIndicesFollowStats.getShardFollowStats(indexEntry.getKey());
|
||||
Map<Integer, FollowStatsAction.StatsResponse> expectedStats = expectedIndicesFollowStats.get(indexEntry.getKey());
|
||||
assertThat(newStats.size(), equalTo(expectedStats.size()));
|
||||
for (int i = 0; i < newStats.size(); i++) {
|
||||
ShardFollowStats actualShardFollowStats = newStats.get(i);
|
||||
ShardFollowStats expectedShardFollowStats = expectedStats.get(i);
|
||||
ShardFollowNodeTaskStatus expectedShardFollowStats = expectedStats.get(actualShardFollowStats.getShardId()).status();
|
||||
|
||||
assertThat(actualShardFollowStats.getRemoteCluster(), equalTo(expectedShardFollowStats.getRemoteCluster()));
|
||||
assertThat(actualShardFollowStats.getLeaderIndex(), equalTo(expectedShardFollowStats.getLeaderIndex()));
|
||||
assertThat(actualShardFollowStats.getFollowerIndex(), equalTo(expectedShardFollowStats.getFollowerIndex()));
|
||||
assertThat(actualShardFollowStats.getLeaderIndex(), equalTo(expectedShardFollowStats.leaderIndex()));
|
||||
assertThat(actualShardFollowStats.getFollowerIndex(), equalTo(expectedShardFollowStats.followerIndex()));
|
||||
assertThat(actualShardFollowStats.getShardId(), equalTo(expectedShardFollowStats.getShardId()));
|
||||
assertThat(actualShardFollowStats.getLeaderGlobalCheckpoint(),
|
||||
equalTo(expectedShardFollowStats.getLeaderGlobalCheckpoint()));
|
||||
assertThat(actualShardFollowStats.getLeaderMaxSeqNo(), equalTo(expectedShardFollowStats.getLeaderMaxSeqNo()));
|
||||
equalTo(expectedShardFollowStats.leaderGlobalCheckpoint()));
|
||||
assertThat(actualShardFollowStats.getLeaderMaxSeqNo(), equalTo(expectedShardFollowStats.leaderMaxSeqNo()));
|
||||
assertThat(actualShardFollowStats.getFollowerGlobalCheckpoint(),
|
||||
equalTo(expectedShardFollowStats.getFollowerGlobalCheckpoint()));
|
||||
assertThat(actualShardFollowStats.getLastRequestedSeqNo(), equalTo(expectedShardFollowStats.getLastRequestedSeqNo()));
|
||||
equalTo(expectedShardFollowStats.followerGlobalCheckpoint()));
|
||||
assertThat(actualShardFollowStats.getLastRequestedSeqNo(), equalTo(expectedShardFollowStats.lastRequestedSeqNo()));
|
||||
assertThat(actualShardFollowStats.getOutstandingReadRequests(),
|
||||
equalTo(expectedShardFollowStats.getOutstandingReadRequests()));
|
||||
equalTo(expectedShardFollowStats.outstandingReadRequests()));
|
||||
assertThat(actualShardFollowStats.getOutstandingWriteRequests(),
|
||||
equalTo(expectedShardFollowStats.getOutstandingWriteRequests()));
|
||||
equalTo(expectedShardFollowStats.outstandingWriteRequests()));
|
||||
assertThat(actualShardFollowStats.getWriteBufferOperationCount(),
|
||||
equalTo(expectedShardFollowStats.getWriteBufferOperationCount()));
|
||||
equalTo(expectedShardFollowStats.writeBufferOperationCount()));
|
||||
assertThat(actualShardFollowStats.getFollowerMappingVersion(),
|
||||
equalTo(expectedShardFollowStats.getFollowerMappingVersion()));
|
||||
equalTo(expectedShardFollowStats.followerMappingVersion()));
|
||||
assertThat(actualShardFollowStats.getFollowerSettingsVersion(),
|
||||
equalTo(expectedShardFollowStats.getFollowerSettingsVersion()));
|
||||
equalTo(expectedShardFollowStats.followerSettingsVersion()));
|
||||
assertThat(actualShardFollowStats.getTotalReadTimeMillis(),
|
||||
equalTo(expectedShardFollowStats.getTotalReadTimeMillis()));
|
||||
equalTo(expectedShardFollowStats.totalReadTimeMillis()));
|
||||
assertThat(actualShardFollowStats.getSuccessfulReadRequests(),
|
||||
equalTo(expectedShardFollowStats.getSuccessfulReadRequests()));
|
||||
assertThat(actualShardFollowStats.getFailedReadRequests(), equalTo(expectedShardFollowStats.getFailedReadRequests()));
|
||||
assertThat(actualShardFollowStats.getOperationsReads(), equalTo(expectedShardFollowStats.getOperationsReads()));
|
||||
assertThat(actualShardFollowStats.getBytesRead(), equalTo(expectedShardFollowStats.getBytesRead()));
|
||||
equalTo(expectedShardFollowStats.successfulReadRequests()));
|
||||
assertThat(actualShardFollowStats.getFailedReadRequests(), equalTo(expectedShardFollowStats.failedReadRequests()));
|
||||
assertThat(actualShardFollowStats.getOperationsReads(), equalTo(expectedShardFollowStats.operationsReads()));
|
||||
assertThat(actualShardFollowStats.getBytesRead(), equalTo(expectedShardFollowStats.bytesRead()));
|
||||
assertThat(actualShardFollowStats.getTotalWriteTimeMillis(),
|
||||
equalTo(expectedShardFollowStats.getTotalWriteTimeMillis()));
|
||||
equalTo(expectedShardFollowStats.totalWriteTimeMillis()));
|
||||
assertThat(actualShardFollowStats.getSuccessfulWriteRequests(),
|
||||
equalTo(expectedShardFollowStats.getSuccessfulWriteRequests()));
|
||||
equalTo(expectedShardFollowStats.successfulWriteRequests()));
|
||||
assertThat(actualShardFollowStats.getFailedWriteRequests(),
|
||||
equalTo(expectedShardFollowStats.getFailedWriteRequests()));
|
||||
assertThat(actualShardFollowStats.getOperationWritten(), equalTo(expectedShardFollowStats.getOperationWritten()));
|
||||
equalTo(expectedShardFollowStats.failedWriteRequests()));
|
||||
assertThat(actualShardFollowStats.getOperationWritten(), equalTo(expectedShardFollowStats.operationWritten()));
|
||||
assertThat(actualShardFollowStats.getReadExceptions().size(),
|
||||
equalTo(expectedShardFollowStats.getReadExceptions().size()));
|
||||
equalTo(expectedShardFollowStats.readExceptions().size()));
|
||||
assertThat(actualShardFollowStats.getReadExceptions().keySet(),
|
||||
equalTo(expectedShardFollowStats.getReadExceptions().keySet()));
|
||||
equalTo(expectedShardFollowStats.readExceptions().keySet()));
|
||||
for (final Map.Entry<Long, Tuple<Integer, ElasticsearchException>> entry :
|
||||
actualShardFollowStats.getReadExceptions().entrySet()) {
|
||||
final Tuple<Integer, ElasticsearchException> expectedTuple =
|
||||
expectedShardFollowStats.getReadExceptions().get(entry.getKey());
|
||||
expectedShardFollowStats.readExceptions().get(entry.getKey());
|
||||
assertThat(entry.getValue().v1(), equalTo(expectedTuple.v1()));
|
||||
// x-content loses the exception
|
||||
final ElasticsearchException expected = expectedTuple.v2();
|
||||
|
@ -156,246 +223,10 @@ public class CcrStatsResponseTests extends ESTestCase {
|
|||
assertThat(entry.getValue().v2().getCause().getMessage(), containsString(expected.getCause().getMessage()));
|
||||
}
|
||||
assertThat(actualShardFollowStats.getTimeSinceLastReadMillis(),
|
||||
equalTo(expectedShardFollowStats.getTimeSinceLastReadMillis()));
|
||||
equalTo(expectedShardFollowStats.timeSinceLastReadMillis()));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private static void toXContent(CcrStatsResponse response, XContentBuilder builder) throws IOException {
|
||||
builder.startObject();
|
||||
{
|
||||
AutoFollowStats autoFollowStats = response.getAutoFollowStats();
|
||||
builder.startObject(CcrStatsResponse.AUTO_FOLLOW_STATS_FIELD.getPreferredName());
|
||||
{
|
||||
builder.field(AutoFollowStats.NUMBER_OF_SUCCESSFUL_INDICES_AUTO_FOLLOWED.getPreferredName(),
|
||||
autoFollowStats.getNumberOfSuccessfulFollowIndices());
|
||||
builder.field(AutoFollowStats.NUMBER_OF_FAILED_REMOTE_CLUSTER_STATE_REQUESTS.getPreferredName(),
|
||||
autoFollowStats.getNumberOfFailedRemoteClusterStateRequests());
|
||||
builder.field(AutoFollowStats.NUMBER_OF_FAILED_INDICES_AUTO_FOLLOWED.getPreferredName(),
|
||||
autoFollowStats.getNumberOfFailedFollowIndices());
|
||||
builder.startArray(AutoFollowStats.RECENT_AUTO_FOLLOW_ERRORS.getPreferredName());
|
||||
for (Map.Entry<String, Tuple<Long, ElasticsearchException>> entry :
|
||||
autoFollowStats.getRecentAutoFollowErrors().entrySet()) {
|
||||
builder.startObject();
|
||||
{
|
||||
builder.field(AutoFollowStats.LEADER_INDEX.getPreferredName(), entry.getKey());
|
||||
builder.field(AutoFollowStats.TIMESTAMP.getPreferredName(), entry.getValue().v1());
|
||||
builder.field(AutoFollowStats.AUTO_FOLLOW_EXCEPTION.getPreferredName());
|
||||
builder.startObject();
|
||||
{
|
||||
ElasticsearchException.generateThrowableXContent(builder, ToXContent.EMPTY_PARAMS, entry.getValue().v2());
|
||||
}
|
||||
builder.endObject();
|
||||
}
|
||||
builder.endObject();
|
||||
}
|
||||
builder.endArray();
|
||||
builder.startArray(AutoFollowStats.AUTO_FOLLOWED_CLUSTERS.getPreferredName());
|
||||
for (Map.Entry<String, AutoFollowedCluster> entry : autoFollowStats.getAutoFollowedClusters().entrySet()) {
|
||||
builder.startObject();
|
||||
{
|
||||
builder.field(AutoFollowStats.CLUSTER_NAME.getPreferredName(), entry.getKey());
|
||||
builder.field(AutoFollowStats.TIME_SINCE_LAST_CHECK_MILLIS.getPreferredName(),
|
||||
entry.getValue().getTimeSinceLastCheckMillis());
|
||||
builder.field(AutoFollowStats.LAST_SEEN_METADATA_VERSION.getPreferredName(),
|
||||
entry.getValue().getLastSeenMetadataVersion());
|
||||
}
|
||||
builder.endObject();
|
||||
}
|
||||
builder.endArray();
|
||||
}
|
||||
builder.endObject();
|
||||
|
||||
IndicesFollowStats indicesFollowStats = response.getIndicesFollowStats();
|
||||
builder.startObject(CcrStatsResponse.FOLLOW_STATS_FIELD.getPreferredName());
|
||||
{
|
||||
builder.startArray(IndicesFollowStats.INDICES_FIELD.getPreferredName());
|
||||
for (Map.Entry<String, List<ShardFollowStats>> indexEntry :
|
||||
indicesFollowStats.getShardFollowStats().entrySet()) {
|
||||
builder.startObject();
|
||||
{
|
||||
builder.field(IndicesFollowStats.INDEX_FIELD.getPreferredName(), indexEntry.getKey());
|
||||
builder.startArray(IndicesFollowStats.SHARDS_FIELD.getPreferredName());
|
||||
{
|
||||
for (ShardFollowStats stats : indexEntry.getValue()) {
|
||||
builder.startObject();
|
||||
{
|
||||
builder.field(ShardFollowStats.LEADER_CLUSTER.getPreferredName(), stats.getRemoteCluster());
|
||||
builder.field(ShardFollowStats.LEADER_INDEX.getPreferredName(), stats.getLeaderIndex());
|
||||
builder.field(ShardFollowStats.FOLLOWER_INDEX.getPreferredName(), stats.getFollowerIndex());
|
||||
builder.field(ShardFollowStats.SHARD_ID.getPreferredName(), stats.getShardId());
|
||||
builder.field(ShardFollowStats.LEADER_GLOBAL_CHECKPOINT_FIELD.getPreferredName(),
|
||||
stats.getLeaderGlobalCheckpoint());
|
||||
builder.field(ShardFollowStats.LEADER_MAX_SEQ_NO_FIELD.getPreferredName(), stats.getLeaderMaxSeqNo());
|
||||
builder.field(ShardFollowStats.FOLLOWER_GLOBAL_CHECKPOINT_FIELD.getPreferredName(),
|
||||
stats.getFollowerGlobalCheckpoint());
|
||||
builder.field(ShardFollowStats.FOLLOWER_MAX_SEQ_NO_FIELD.getPreferredName(),
|
||||
stats.getFollowerMaxSeqNo());
|
||||
builder.field(ShardFollowStats.LAST_REQUESTED_SEQ_NO_FIELD.getPreferredName(),
|
||||
stats.getLastRequestedSeqNo());
|
||||
builder.field(ShardFollowStats.OUTSTANDING_READ_REQUESTS.getPreferredName(),
|
||||
stats.getOutstandingReadRequests());
|
||||
builder.field(ShardFollowStats.OUTSTANDING_WRITE_REQUESTS.getPreferredName(),
|
||||
stats.getOutstandingWriteRequests());
|
||||
builder.field(ShardFollowStats.WRITE_BUFFER_OPERATION_COUNT_FIELD.getPreferredName(),
|
||||
stats.getWriteBufferOperationCount());
|
||||
builder.humanReadableField(
|
||||
ShardFollowStats.WRITE_BUFFER_SIZE_IN_BYTES_FIELD.getPreferredName(),
|
||||
"write_buffer_size",
|
||||
new ByteSizeValue(stats.getWriteBufferSizeInBytes()));
|
||||
builder.field(ShardFollowStats.FOLLOWER_MAPPING_VERSION_FIELD.getPreferredName(),
|
||||
stats.getFollowerMappingVersion());
|
||||
builder.field(ShardFollowStats.FOLLOWER_SETTINGS_VERSION_FIELD.getPreferredName(),
|
||||
stats.getFollowerSettingsVersion());
|
||||
builder.humanReadableField(
|
||||
ShardFollowStats.TOTAL_READ_TIME_MILLIS_FIELD.getPreferredName(),
|
||||
"total_read_time",
|
||||
new TimeValue(stats.getTotalReadTimeMillis(), TimeUnit.MILLISECONDS));
|
||||
builder.humanReadableField(
|
||||
ShardFollowStats.TOTAL_READ_REMOTE_EXEC_TIME_MILLIS_FIELD.getPreferredName(),
|
||||
"total_read_remote_exec_time",
|
||||
new TimeValue(stats.getTotalReadRemoteExecTimeMillis(), TimeUnit.MILLISECONDS));
|
||||
builder.field(ShardFollowStats.SUCCESSFUL_READ_REQUESTS_FIELD.getPreferredName(),
|
||||
stats.getSuccessfulReadRequests());
|
||||
builder.field(ShardFollowStats.FAILED_READ_REQUESTS_FIELD.getPreferredName(),
|
||||
stats.getFailedReadRequests());
|
||||
builder.field(ShardFollowStats.OPERATIONS_READ_FIELD.getPreferredName(), stats.getOperationsReads());
|
||||
builder.humanReadableField(
|
||||
ShardFollowStats.BYTES_READ.getPreferredName(),
|
||||
"total_read",
|
||||
new ByteSizeValue(stats.getBytesRead(), ByteSizeUnit.BYTES));
|
||||
builder.humanReadableField(
|
||||
ShardFollowStats.TOTAL_WRITE_TIME_MILLIS_FIELD.getPreferredName(),
|
||||
"total_write_time",
|
||||
new TimeValue(stats.getTotalWriteTimeMillis(), TimeUnit.MILLISECONDS));
|
||||
builder.field(ShardFollowStats.SUCCESSFUL_WRITE_REQUESTS_FIELD.getPreferredName(),
|
||||
stats.getSuccessfulWriteRequests());
|
||||
builder.field(ShardFollowStats.FAILED_WRITE_REQUEST_FIELD.getPreferredName(),
|
||||
stats.getFailedWriteRequests());
|
||||
builder.field(ShardFollowStats.OPERATIONS_WRITTEN.getPreferredName(), stats.getOperationWritten());
|
||||
builder.startArray(ShardFollowStats.READ_EXCEPTIONS.getPreferredName());
|
||||
{
|
||||
for (final Map.Entry<Long, Tuple<Integer, ElasticsearchException>> entry :
|
||||
stats.getReadExceptions().entrySet()) {
|
||||
builder.startObject();
|
||||
{
|
||||
builder.field(ShardFollowStats.READ_EXCEPTIONS_ENTRY_FROM_SEQ_NO.getPreferredName(),
|
||||
entry.getKey());
|
||||
builder.field(ShardFollowStats.READ_EXCEPTIONS_RETRIES.getPreferredName(),
|
||||
entry.getValue().v1());
|
||||
builder.field(ShardFollowStats.READ_EXCEPTIONS_ENTRY_EXCEPTION.getPreferredName());
|
||||
builder.startObject();
|
||||
{
|
||||
ElasticsearchException.generateThrowableXContent(builder, ToXContent.EMPTY_PARAMS,
|
||||
entry.getValue().v2());
|
||||
}
|
||||
builder.endObject();
|
||||
}
|
||||
builder.endObject();
|
||||
}
|
||||
}
|
||||
builder.endArray();
|
||||
builder.humanReadableField(
|
||||
ShardFollowStats.TIME_SINCE_LAST_READ_MILLIS_FIELD.getPreferredName(),
|
||||
"time_since_last_read",
|
||||
new TimeValue(stats.getTimeSinceLastReadMillis(), TimeUnit.MILLISECONDS));
|
||||
if (stats.getFatalException() != null) {
|
||||
builder.field(ShardFollowStats.FATAL_EXCEPTION.getPreferredName());
|
||||
builder.startObject();
|
||||
{
|
||||
ElasticsearchException.generateThrowableXContent(builder, ToXContent.EMPTY_PARAMS,
|
||||
stats.getFatalException());
|
||||
}
|
||||
builder.endObject();
|
||||
}
|
||||
}
|
||||
builder.endObject();
|
||||
}
|
||||
}
|
||||
builder.endArray();
|
||||
}
|
||||
builder.endObject();
|
||||
}
|
||||
builder.endArray();
|
||||
}
|
||||
builder.endObject();
|
||||
}
|
||||
builder.endObject();
|
||||
}
|
||||
|
||||
private static CcrStatsResponse createTestInstance() {
|
||||
return new CcrStatsResponse(randomAutoFollowStats(), randomIndicesFollowStats());
|
||||
}
|
||||
|
||||
private static AutoFollowStats randomAutoFollowStats() {
|
||||
final int count = randomIntBetween(0, 16);
|
||||
final NavigableMap<String, Tuple<Long, ElasticsearchException>> readExceptions = new TreeMap<>();
|
||||
for (int i = 0; i < count; i++) {
|
||||
readExceptions.put("" + i, Tuple.tuple(randomNonNegativeLong(),
|
||||
new ElasticsearchException(new IllegalStateException("index [" + i + "]"))));
|
||||
}
|
||||
final NavigableMap<String, AutoFollowedCluster> autoFollowClusters = new TreeMap<>();
|
||||
for (int i = 0; i < count; i++) {
|
||||
autoFollowClusters.put("" + i, new AutoFollowedCluster(randomLong(), randomNonNegativeLong()));
|
||||
}
|
||||
return new AutoFollowStats(
|
||||
randomNonNegativeLong(),
|
||||
randomNonNegativeLong(),
|
||||
randomNonNegativeLong(),
|
||||
readExceptions,
|
||||
autoFollowClusters
|
||||
);
|
||||
}
|
||||
|
||||
static IndicesFollowStats randomIndicesFollowStats() {
|
||||
int numIndices = randomIntBetween(0, 16);
|
||||
NavigableMap<String, List<ShardFollowStats>> shardFollowStats = new TreeMap<>();
|
||||
for (int i = 0; i < numIndices; i++) {
|
||||
String index = randomAlphaOfLength(4);
|
||||
int numShards = randomIntBetween(0, 5);
|
||||
List<ShardFollowStats> stats = new ArrayList<>(numShards);
|
||||
shardFollowStats.put(index, stats);
|
||||
for (int j = 0; j < numShards; j++) {
|
||||
final int count = randomIntBetween(0, 16);
|
||||
final NavigableMap<Long, Tuple<Integer, ElasticsearchException>> readExceptions = new TreeMap<>();
|
||||
for (long k = 0; k < count; k++) {
|
||||
readExceptions.put(k, new Tuple<>(randomIntBetween(0, Integer.MAX_VALUE),
|
||||
new ElasticsearchException(new IllegalStateException("index [" + k + "]"))));
|
||||
}
|
||||
|
||||
stats.add(new ShardFollowStats(
|
||||
randomAlphaOfLength(4),
|
||||
randomAlphaOfLength(4),
|
||||
randomAlphaOfLength(4),
|
||||
randomInt(),
|
||||
randomNonNegativeLong(),
|
||||
randomNonNegativeLong(),
|
||||
randomNonNegativeLong(),
|
||||
randomNonNegativeLong(),
|
||||
randomNonNegativeLong(),
|
||||
randomIntBetween(0, Integer.MAX_VALUE),
|
||||
randomIntBetween(0, Integer.MAX_VALUE),
|
||||
randomIntBetween(0, Integer.MAX_VALUE),
|
||||
randomNonNegativeLong(),
|
||||
randomNonNegativeLong(),
|
||||
randomNonNegativeLong(),
|
||||
randomNonNegativeLong(),
|
||||
randomNonNegativeLong(),
|
||||
randomNonNegativeLong(),
|
||||
randomNonNegativeLong(),
|
||||
randomNonNegativeLong(),
|
||||
randomNonNegativeLong(),
|
||||
randomNonNegativeLong(),
|
||||
randomNonNegativeLong(),
|
||||
randomNonNegativeLong(),
|
||||
randomNonNegativeLong(),
|
||||
randomLong(),
|
||||
readExceptions,
|
||||
randomBoolean() ? new ElasticsearchException("fatal error") : null));
|
||||
}
|
||||
}
|
||||
return new IndicesFollowStats(shardFollowStats);
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -19,59 +19,89 @@
|
|||
|
||||
package org.elasticsearch.client.ccr;
|
||||
|
||||
import org.elasticsearch.client.ccr.FollowInfoResponse.FollowerInfo;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.elasticsearch.client.AbstractResponseTestCase;
|
||||
import org.elasticsearch.common.unit.ByteSizeValue;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.xpack.core.ccr.action.FollowInfoAction;
|
||||
import org.elasticsearch.xpack.core.ccr.action.FollowParameters;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.Locale;
|
||||
|
||||
import static org.elasticsearch.test.AbstractXContentTestCase.xContentTester;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.hamcrest.Matchers.nullValue;
|
||||
|
||||
public class FollowInfoResponseTests extends ESTestCase {
|
||||
public class FollowInfoResponseTests extends AbstractResponseTestCase<FollowInfoAction.Response, FollowInfoResponse> {
|
||||
|
||||
public void testFromXContent() throws IOException {
|
||||
xContentTester(this::createParser,
|
||||
FollowInfoResponseTests::createTestInstance,
|
||||
FollowInfoResponseTests::toXContent,
|
||||
FollowInfoResponse::fromXContent)
|
||||
.supportsUnknownFields(true)
|
||||
.test();
|
||||
}
|
||||
|
||||
private static void toXContent(FollowInfoResponse response, XContentBuilder builder) throws IOException {
|
||||
builder.startObject();
|
||||
builder.startArray(FollowInfoResponse.FOLLOWER_INDICES_FIELD.getPreferredName());
|
||||
for (FollowerInfo info : response.getInfos()) {
|
||||
builder.startObject();
|
||||
builder.field(FollowerInfo.FOLLOWER_INDEX_FIELD.getPreferredName(), info.getFollowerIndex());
|
||||
builder.field(FollowerInfo.REMOTE_CLUSTER_FIELD.getPreferredName(), info.getRemoteCluster());
|
||||
builder.field(FollowerInfo.LEADER_INDEX_FIELD.getPreferredName(), info.getLeaderIndex());
|
||||
builder.field(FollowerInfo.STATUS_FIELD.getPreferredName(), info.getStatus().getName());
|
||||
if (info.getParameters() != null) {
|
||||
builder.startObject(FollowerInfo.PARAMETERS_FIELD.getPreferredName());
|
||||
{
|
||||
info.getParameters().toXContentFragment(builder, ToXContent.EMPTY_PARAMS);
|
||||
}
|
||||
builder.endObject();
|
||||
}
|
||||
builder.endObject();
|
||||
}
|
||||
builder.endArray();
|
||||
builder.endObject();
|
||||
}
|
||||
|
||||
private static FollowInfoResponse createTestInstance() {
|
||||
int numInfos = randomIntBetween(0, 64);
|
||||
List<FollowerInfo> infos = new ArrayList<>(numInfos);
|
||||
@Override
|
||||
protected FollowInfoAction.Response createServerTestInstance() {
|
||||
int numInfos = randomIntBetween(0, 32);
|
||||
List<FollowInfoAction.Response.FollowerInfo> infos = new ArrayList<>(numInfos);
|
||||
for (int i = 0; i < numInfos; i++) {
|
||||
FollowInfoResponse.Status status = randomFrom(FollowInfoResponse.Status.values());
|
||||
FollowConfig followConfig = randomBoolean() ? FollowConfigTests.createTestInstance() : null;
|
||||
infos.add(new FollowerInfo(randomAlphaOfLength(4), randomAlphaOfLength(4), randomAlphaOfLength(4), status, followConfig));
|
||||
FollowParameters followParameters = null;
|
||||
if (randomBoolean()) {
|
||||
followParameters = randomFollowParameters();
|
||||
}
|
||||
|
||||
infos.add(new FollowInfoAction.Response.FollowerInfo(randomAlphaOfLength(4), randomAlphaOfLength(4), randomAlphaOfLength(4),
|
||||
randomFrom(FollowInfoAction.Response.Status.values()), followParameters));
|
||||
}
|
||||
return new FollowInfoAction.Response(infos);
|
||||
}
|
||||
|
||||
static FollowParameters randomFollowParameters() {
|
||||
FollowParameters followParameters = new FollowParameters();
|
||||
followParameters.setMaxOutstandingReadRequests(randomIntBetween(0, Integer.MAX_VALUE));
|
||||
followParameters.setMaxOutstandingWriteRequests(randomIntBetween(0, Integer.MAX_VALUE));
|
||||
followParameters.setMaxReadRequestOperationCount(randomIntBetween(0, Integer.MAX_VALUE));
|
||||
followParameters.setMaxWriteRequestOperationCount(randomIntBetween(0, Integer.MAX_VALUE));
|
||||
followParameters.setMaxReadRequestSize(new ByteSizeValue(randomNonNegativeLong()));
|
||||
followParameters.setMaxWriteRequestSize(new ByteSizeValue(randomNonNegativeLong()));
|
||||
followParameters.setMaxWriteBufferCount(randomIntBetween(0, Integer.MAX_VALUE));
|
||||
followParameters.setMaxWriteBufferSize(new ByteSizeValue(randomNonNegativeLong()));
|
||||
followParameters.setMaxRetryDelay(new TimeValue(randomNonNegativeLong()));
|
||||
followParameters.setReadPollTimeout(new TimeValue(randomNonNegativeLong()));
|
||||
return followParameters;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected FollowInfoResponse doParseToClientInstance(XContentParser parser) throws IOException {
|
||||
return FollowInfoResponse.fromXContent(parser);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void assertInstances(FollowInfoAction.Response serverTestInstance, FollowInfoResponse clientInstance) {
|
||||
assertThat(serverTestInstance.getFollowInfos().size(), equalTo(clientInstance.getInfos().size()));
|
||||
for (int i = 0; i < serverTestInstance.getFollowInfos().size(); i++) {
|
||||
FollowInfoAction.Response.FollowerInfo serverFollowInfo = serverTestInstance.getFollowInfos().get(i);
|
||||
FollowInfoResponse.FollowerInfo clientFollowerInfo = clientInstance.getInfos().get(i);
|
||||
|
||||
assertThat(serverFollowInfo.getRemoteCluster(), equalTo(clientFollowerInfo.getRemoteCluster()));
|
||||
assertThat(serverFollowInfo.getLeaderIndex(), equalTo(clientFollowerInfo.getLeaderIndex()));
|
||||
assertThat(serverFollowInfo.getFollowerIndex(), equalTo(clientFollowerInfo.getFollowerIndex()));
|
||||
assertThat(serverFollowInfo.getStatus().toString().toLowerCase(Locale.ROOT),
|
||||
equalTo(clientFollowerInfo.getStatus().getName().toLowerCase(Locale.ROOT)));
|
||||
|
||||
FollowParameters serverParams = serverFollowInfo.getParameters();
|
||||
FollowConfig clientParams = clientFollowerInfo.getParameters();
|
||||
if (serverParams != null) {
|
||||
assertThat(serverParams.getMaxReadRequestOperationCount(), equalTo(clientParams.getMaxReadRequestOperationCount()));
|
||||
assertThat(serverParams.getMaxWriteRequestOperationCount(), equalTo(clientParams.getMaxWriteRequestOperationCount()));
|
||||
assertThat(serverParams.getMaxOutstandingReadRequests(), equalTo(clientParams.getMaxOutstandingReadRequests()));
|
||||
assertThat(serverParams.getMaxOutstandingWriteRequests(), equalTo(clientParams.getMaxOutstandingWriteRequests()));
|
||||
assertThat(serverParams.getMaxReadRequestSize(), equalTo(clientParams.getMaxReadRequestSize()));
|
||||
assertThat(serverParams.getMaxWriteRequestSize(), equalTo(clientParams.getMaxWriteRequestSize()));
|
||||
assertThat(serverParams.getMaxWriteBufferCount(), equalTo(clientParams.getMaxWriteBufferCount()));
|
||||
assertThat(serverParams.getMaxWriteBufferSize(), equalTo(clientParams.getMaxWriteBufferSize()));
|
||||
assertThat(serverParams.getMaxRetryDelay(), equalTo(clientParams.getMaxRetryDelay()));
|
||||
assertThat(serverParams.getReadPollTimeout(), equalTo(clientParams.getReadPollTimeout()));
|
||||
} else {
|
||||
assertThat(clientParams, nullValue());
|
||||
}
|
||||
}
|
||||
return new FollowInfoResponse(infos);
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -20,234 +20,115 @@
|
|||
package org.elasticsearch.client.ccr;
|
||||
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.client.AbstractResponseTestCase;
|
||||
import org.elasticsearch.client.ccr.IndicesFollowStats.ShardFollowStats;
|
||||
import org.elasticsearch.common.collect.Tuple;
|
||||
import org.elasticsearch.common.unit.ByteSizeUnit;
|
||||
import org.elasticsearch.common.unit.ByteSizeValue;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.xpack.core.ccr.ShardFollowNodeTaskStatus;
|
||||
import org.elasticsearch.xpack.core.ccr.action.FollowStatsAction;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.TreeMap;
|
||||
|
||||
import static org.elasticsearch.client.ccr.CcrStatsResponseTests.randomIndicesFollowStats;
|
||||
import static org.elasticsearch.test.AbstractXContentTestCase.xContentTester;
|
||||
import static org.elasticsearch.client.ccr.CcrStatsResponseTests.createStatsResponse;
|
||||
import static org.hamcrest.Matchers.anyOf;
|
||||
import static org.hamcrest.Matchers.containsString;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.hamcrest.Matchers.instanceOf;
|
||||
|
||||
public class FollowStatsResponseTests extends ESTestCase {
|
||||
public class FollowStatsResponseTests extends AbstractResponseTestCase<FollowStatsAction.StatsResponses, FollowStatsResponse> {
|
||||
|
||||
public void testFromXContent() throws IOException {
|
||||
xContentTester(this::createParser,
|
||||
FollowStatsResponseTests::createTestInstance,
|
||||
FollowStatsResponseTests::toXContent,
|
||||
FollowStatsResponse::fromXContent)
|
||||
.supportsUnknownFields(true)
|
||||
.assertEqualsConsumer(FollowStatsResponseTests::assertEqualInstances)
|
||||
.assertToXContentEquivalence(false)
|
||||
.test();
|
||||
@Override
|
||||
protected FollowStatsAction.StatsResponses createServerTestInstance() {
|
||||
return createStatsResponse();
|
||||
}
|
||||
|
||||
// Needed, because exceptions in IndicesFollowStats cannot be compared
|
||||
private static void assertEqualInstances(FollowStatsResponse expectedInstance, FollowStatsResponse newInstance) {
|
||||
assertNotSame(expectedInstance, newInstance);
|
||||
{
|
||||
IndicesFollowStats newIndicesFollowStats = newInstance.getIndicesFollowStats();
|
||||
IndicesFollowStats expectedIndicesFollowStats = expectedInstance.getIndicesFollowStats();
|
||||
assertThat(newIndicesFollowStats.getShardFollowStats().size(),
|
||||
equalTo(expectedIndicesFollowStats.getShardFollowStats().size()));
|
||||
assertThat(newIndicesFollowStats.getShardFollowStats().keySet(),
|
||||
equalTo(expectedIndicesFollowStats.getShardFollowStats().keySet()));
|
||||
for (Map.Entry<String, List<ShardFollowStats>> indexEntry : newIndicesFollowStats.getShardFollowStats().entrySet()) {
|
||||
List<ShardFollowStats> newStats = indexEntry.getValue();
|
||||
List<ShardFollowStats> expectedStats = expectedIndicesFollowStats.getShardFollowStats(indexEntry.getKey());
|
||||
assertThat(newStats.size(), equalTo(expectedStats.size()));
|
||||
for (int i = 0; i < newStats.size(); i++) {
|
||||
ShardFollowStats actualShardFollowStats = newStats.get(i);
|
||||
ShardFollowStats expectedShardFollowStats = expectedStats.get(i);
|
||||
@Override
|
||||
protected FollowStatsResponse doParseToClientInstance(XContentParser parser) throws IOException {
|
||||
return FollowStatsResponse.fromXContent(parser);
|
||||
}
|
||||
|
||||
assertThat(actualShardFollowStats.getRemoteCluster(), equalTo(expectedShardFollowStats.getRemoteCluster()));
|
||||
assertThat(actualShardFollowStats.getLeaderIndex(), equalTo(expectedShardFollowStats.getLeaderIndex()));
|
||||
assertThat(actualShardFollowStats.getFollowerIndex(), equalTo(expectedShardFollowStats.getFollowerIndex()));
|
||||
assertThat(actualShardFollowStats.getShardId(), equalTo(expectedShardFollowStats.getShardId()));
|
||||
assertThat(actualShardFollowStats.getLeaderGlobalCheckpoint(),
|
||||
equalTo(expectedShardFollowStats.getLeaderGlobalCheckpoint()));
|
||||
assertThat(actualShardFollowStats.getLeaderMaxSeqNo(), equalTo(expectedShardFollowStats.getLeaderMaxSeqNo()));
|
||||
assertThat(actualShardFollowStats.getFollowerGlobalCheckpoint(),
|
||||
equalTo(expectedShardFollowStats.getFollowerGlobalCheckpoint()));
|
||||
assertThat(actualShardFollowStats.getLastRequestedSeqNo(), equalTo(expectedShardFollowStats.getLastRequestedSeqNo()));
|
||||
assertThat(actualShardFollowStats.getOutstandingReadRequests(),
|
||||
equalTo(expectedShardFollowStats.getOutstandingReadRequests()));
|
||||
assertThat(actualShardFollowStats.getOutstandingWriteRequests(),
|
||||
equalTo(expectedShardFollowStats.getOutstandingWriteRequests()));
|
||||
assertThat(actualShardFollowStats.getWriteBufferOperationCount(),
|
||||
equalTo(expectedShardFollowStats.getWriteBufferOperationCount()));
|
||||
assertThat(actualShardFollowStats.getFollowerMappingVersion(),
|
||||
equalTo(expectedShardFollowStats.getFollowerMappingVersion()));
|
||||
assertThat(actualShardFollowStats.getFollowerSettingsVersion(),
|
||||
equalTo(expectedShardFollowStats.getFollowerSettingsVersion()));
|
||||
assertThat(actualShardFollowStats.getTotalReadTimeMillis(),
|
||||
equalTo(expectedShardFollowStats.getTotalReadTimeMillis()));
|
||||
assertThat(actualShardFollowStats.getSuccessfulReadRequests(),
|
||||
equalTo(expectedShardFollowStats.getSuccessfulReadRequests()));
|
||||
assertThat(actualShardFollowStats.getFailedReadRequests(), equalTo(expectedShardFollowStats.getFailedReadRequests()));
|
||||
assertThat(actualShardFollowStats.getOperationsReads(), equalTo(expectedShardFollowStats.getOperationsReads()));
|
||||
assertThat(actualShardFollowStats.getBytesRead(), equalTo(expectedShardFollowStats.getBytesRead()));
|
||||
assertThat(actualShardFollowStats.getTotalWriteTimeMillis(),
|
||||
equalTo(expectedShardFollowStats.getTotalWriteTimeMillis()));
|
||||
assertThat(actualShardFollowStats.getSuccessfulWriteRequests(),
|
||||
equalTo(expectedShardFollowStats.getSuccessfulWriteRequests()));
|
||||
assertThat(actualShardFollowStats.getFailedWriteRequests(),
|
||||
equalTo(expectedShardFollowStats.getFailedWriteRequests()));
|
||||
assertThat(actualShardFollowStats.getOperationWritten(), equalTo(expectedShardFollowStats.getOperationWritten()));
|
||||
assertThat(actualShardFollowStats.getReadExceptions().size(),
|
||||
equalTo(expectedShardFollowStats.getReadExceptions().size()));
|
||||
assertThat(actualShardFollowStats.getReadExceptions().keySet(),
|
||||
equalTo(expectedShardFollowStats.getReadExceptions().keySet()));
|
||||
for (final Map.Entry<Long, Tuple<Integer, ElasticsearchException>> entry :
|
||||
actualShardFollowStats.getReadExceptions().entrySet()) {
|
||||
final Tuple<Integer, ElasticsearchException> expectedTuple =
|
||||
expectedShardFollowStats.getReadExceptions().get(entry.getKey());
|
||||
assertThat(entry.getValue().v1(), equalTo(expectedTuple.v1()));
|
||||
// x-content loses the exception
|
||||
final ElasticsearchException expected = expectedTuple.v2();
|
||||
assertThat(entry.getValue().v2().getMessage(), containsString(expected.getMessage()));
|
||||
assertNotNull(entry.getValue().v2().getCause());
|
||||
assertThat(
|
||||
entry.getValue().v2().getCause(),
|
||||
anyOf(instanceOf(ElasticsearchException.class), instanceOf(IllegalStateException.class)));
|
||||
assertThat(entry.getValue().v2().getCause().getMessage(), containsString(expected.getCause().getMessage()));
|
||||
}
|
||||
assertThat(actualShardFollowStats.getTimeSinceLastReadMillis(),
|
||||
equalTo(expectedShardFollowStats.getTimeSinceLastReadMillis()));
|
||||
@Override
|
||||
protected void assertInstances(FollowStatsAction.StatsResponses serverTestInstance, FollowStatsResponse clientInstance) {
|
||||
IndicesFollowStats newIndicesFollowStats = clientInstance.getIndicesFollowStats();
|
||||
|
||||
// sort by index name, then shard ID
|
||||
final Map<String, Map<Integer, FollowStatsAction.StatsResponse>> expectedIndicesFollowStats = new TreeMap<>();
|
||||
for (final FollowStatsAction.StatsResponse statsResponse : serverTestInstance.getStatsResponses()) {
|
||||
expectedIndicesFollowStats.computeIfAbsent(
|
||||
statsResponse.status().followerIndex(),
|
||||
k -> new TreeMap<>()).put(statsResponse.status().getShardId(), statsResponse);
|
||||
}
|
||||
assertThat(newIndicesFollowStats.getShardFollowStats().size(),
|
||||
equalTo(expectedIndicesFollowStats.size()));
|
||||
assertThat(newIndicesFollowStats.getShardFollowStats().keySet(),
|
||||
equalTo(expectedIndicesFollowStats.keySet()));
|
||||
for (Map.Entry<String, List<ShardFollowStats>> indexEntry : newIndicesFollowStats.getShardFollowStats().entrySet()) {
|
||||
List<ShardFollowStats> newStats = indexEntry.getValue();
|
||||
Map<Integer, FollowStatsAction.StatsResponse> expectedStats = expectedIndicesFollowStats.get(indexEntry.getKey());
|
||||
assertThat(newStats.size(), equalTo(expectedStats.size()));
|
||||
for (int i = 0; i < newStats.size(); i++) {
|
||||
ShardFollowStats actualShardFollowStats = newStats.get(i);
|
||||
ShardFollowNodeTaskStatus expectedShardFollowStats = expectedStats.get(actualShardFollowStats.getShardId()).status();
|
||||
|
||||
assertThat(actualShardFollowStats.getRemoteCluster(), equalTo(expectedShardFollowStats.getRemoteCluster()));
|
||||
assertThat(actualShardFollowStats.getLeaderIndex(), equalTo(expectedShardFollowStats.leaderIndex()));
|
||||
assertThat(actualShardFollowStats.getFollowerIndex(), equalTo(expectedShardFollowStats.followerIndex()));
|
||||
assertThat(actualShardFollowStats.getShardId(), equalTo(expectedShardFollowStats.getShardId()));
|
||||
assertThat(actualShardFollowStats.getLeaderGlobalCheckpoint(),
|
||||
equalTo(expectedShardFollowStats.leaderGlobalCheckpoint()));
|
||||
assertThat(actualShardFollowStats.getLeaderMaxSeqNo(), equalTo(expectedShardFollowStats.leaderMaxSeqNo()));
|
||||
assertThat(actualShardFollowStats.getFollowerGlobalCheckpoint(),
|
||||
equalTo(expectedShardFollowStats.followerGlobalCheckpoint()));
|
||||
assertThat(actualShardFollowStats.getLastRequestedSeqNo(), equalTo(expectedShardFollowStats.lastRequestedSeqNo()));
|
||||
assertThat(actualShardFollowStats.getOutstandingReadRequests(),
|
||||
equalTo(expectedShardFollowStats.outstandingReadRequests()));
|
||||
assertThat(actualShardFollowStats.getOutstandingWriteRequests(),
|
||||
equalTo(expectedShardFollowStats.outstandingWriteRequests()));
|
||||
assertThat(actualShardFollowStats.getWriteBufferOperationCount(),
|
||||
equalTo(expectedShardFollowStats.writeBufferOperationCount()));
|
||||
assertThat(actualShardFollowStats.getFollowerMappingVersion(),
|
||||
equalTo(expectedShardFollowStats.followerMappingVersion()));
|
||||
assertThat(actualShardFollowStats.getFollowerSettingsVersion(),
|
||||
equalTo(expectedShardFollowStats.followerSettingsVersion()));
|
||||
assertThat(actualShardFollowStats.getTotalReadTimeMillis(),
|
||||
equalTo(expectedShardFollowStats.totalReadTimeMillis()));
|
||||
assertThat(actualShardFollowStats.getSuccessfulReadRequests(),
|
||||
equalTo(expectedShardFollowStats.successfulReadRequests()));
|
||||
assertThat(actualShardFollowStats.getFailedReadRequests(), equalTo(expectedShardFollowStats.failedReadRequests()));
|
||||
assertThat(actualShardFollowStats.getOperationsReads(), equalTo(expectedShardFollowStats.operationsReads()));
|
||||
assertThat(actualShardFollowStats.getBytesRead(), equalTo(expectedShardFollowStats.bytesRead()));
|
||||
assertThat(actualShardFollowStats.getTotalWriteTimeMillis(),
|
||||
equalTo(expectedShardFollowStats.totalWriteTimeMillis()));
|
||||
assertThat(actualShardFollowStats.getSuccessfulWriteRequests(),
|
||||
equalTo(expectedShardFollowStats.successfulWriteRequests()));
|
||||
assertThat(actualShardFollowStats.getFailedWriteRequests(),
|
||||
equalTo(expectedShardFollowStats.failedWriteRequests()));
|
||||
assertThat(actualShardFollowStats.getOperationWritten(), equalTo(expectedShardFollowStats.operationWritten()));
|
||||
assertThat(actualShardFollowStats.getReadExceptions().size(),
|
||||
equalTo(expectedShardFollowStats.readExceptions().size()));
|
||||
assertThat(actualShardFollowStats.getReadExceptions().keySet(),
|
||||
equalTo(expectedShardFollowStats.readExceptions().keySet()));
|
||||
for (final Map.Entry<Long, Tuple<Integer, ElasticsearchException>> entry :
|
||||
actualShardFollowStats.getReadExceptions().entrySet()) {
|
||||
final Tuple<Integer, ElasticsearchException> expectedTuple =
|
||||
expectedShardFollowStats.readExceptions().get(entry.getKey());
|
||||
assertThat(entry.getValue().v1(), equalTo(expectedTuple.v1()));
|
||||
// x-content loses the exception
|
||||
final ElasticsearchException expected = expectedTuple.v2();
|
||||
assertThat(entry.getValue().v2().getMessage(), containsString(expected.getMessage()));
|
||||
assertNotNull(entry.getValue().v2().getCause());
|
||||
assertThat(
|
||||
entry.getValue().v2().getCause(),
|
||||
anyOf(instanceOf(ElasticsearchException.class), instanceOf(IllegalStateException.class)));
|
||||
assertThat(entry.getValue().v2().getCause().getMessage(), containsString(expected.getCause().getMessage()));
|
||||
}
|
||||
assertThat(actualShardFollowStats.getTimeSinceLastReadMillis(),
|
||||
equalTo(expectedShardFollowStats.timeSinceLastReadMillis()));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private static void toXContent(FollowStatsResponse response, XContentBuilder builder) throws IOException {
|
||||
builder.startObject();
|
||||
{
|
||||
builder.startArray(IndicesFollowStats.INDICES_FIELD.getPreferredName());
|
||||
for (Map.Entry<String, List<ShardFollowStats>> indexEntry :
|
||||
response.getIndicesFollowStats().getShardFollowStats().entrySet()) {
|
||||
builder.startObject();
|
||||
{
|
||||
builder.field(IndicesFollowStats.INDEX_FIELD.getPreferredName(), indexEntry.getKey());
|
||||
builder.startArray(IndicesFollowStats.SHARDS_FIELD.getPreferredName());
|
||||
{
|
||||
for (ShardFollowStats stats : indexEntry.getValue()) {
|
||||
builder.startObject();
|
||||
{
|
||||
builder.field(ShardFollowStats.LEADER_CLUSTER.getPreferredName(), stats.getRemoteCluster());
|
||||
builder.field(ShardFollowStats.LEADER_INDEX.getPreferredName(), stats.getLeaderIndex());
|
||||
builder.field(ShardFollowStats.FOLLOWER_INDEX.getPreferredName(), stats.getFollowerIndex());
|
||||
builder.field(ShardFollowStats.SHARD_ID.getPreferredName(), stats.getShardId());
|
||||
builder.field(ShardFollowStats.LEADER_GLOBAL_CHECKPOINT_FIELD.getPreferredName(),
|
||||
stats.getLeaderGlobalCheckpoint());
|
||||
builder.field(ShardFollowStats.LEADER_MAX_SEQ_NO_FIELD.getPreferredName(), stats.getLeaderMaxSeqNo());
|
||||
builder.field(ShardFollowStats.FOLLOWER_GLOBAL_CHECKPOINT_FIELD.getPreferredName(),
|
||||
stats.getFollowerGlobalCheckpoint());
|
||||
builder.field(ShardFollowStats.FOLLOWER_MAX_SEQ_NO_FIELD.getPreferredName(),
|
||||
stats.getFollowerMaxSeqNo());
|
||||
builder.field(ShardFollowStats.LAST_REQUESTED_SEQ_NO_FIELD.getPreferredName(),
|
||||
stats.getLastRequestedSeqNo());
|
||||
builder.field(ShardFollowStats.OUTSTANDING_READ_REQUESTS.getPreferredName(),
|
||||
stats.getOutstandingReadRequests());
|
||||
builder.field(ShardFollowStats.OUTSTANDING_WRITE_REQUESTS.getPreferredName(),
|
||||
stats.getOutstandingWriteRequests());
|
||||
builder.field(ShardFollowStats.WRITE_BUFFER_OPERATION_COUNT_FIELD.getPreferredName(),
|
||||
stats.getWriteBufferOperationCount());
|
||||
builder.humanReadableField(
|
||||
ShardFollowStats.WRITE_BUFFER_SIZE_IN_BYTES_FIELD.getPreferredName(),
|
||||
"write_buffer_size",
|
||||
new ByteSizeValue(stats.getWriteBufferSizeInBytes()));
|
||||
builder.field(ShardFollowStats.FOLLOWER_MAPPING_VERSION_FIELD.getPreferredName(),
|
||||
stats.getFollowerMappingVersion());
|
||||
builder.field(ShardFollowStats.FOLLOWER_SETTINGS_VERSION_FIELD.getPreferredName(),
|
||||
stats.getFollowerSettingsVersion());
|
||||
builder.humanReadableField(
|
||||
ShardFollowStats.TOTAL_READ_TIME_MILLIS_FIELD.getPreferredName(),
|
||||
"total_read_time",
|
||||
new TimeValue(stats.getTotalReadTimeMillis(), TimeUnit.MILLISECONDS));
|
||||
builder.humanReadableField(
|
||||
ShardFollowStats.TOTAL_READ_REMOTE_EXEC_TIME_MILLIS_FIELD.getPreferredName(),
|
||||
"total_read_remote_exec_time",
|
||||
new TimeValue(stats.getTotalReadRemoteExecTimeMillis(), TimeUnit.MILLISECONDS));
|
||||
builder.field(ShardFollowStats.SUCCESSFUL_READ_REQUESTS_FIELD.getPreferredName(),
|
||||
stats.getSuccessfulReadRequests());
|
||||
builder.field(ShardFollowStats.FAILED_READ_REQUESTS_FIELD.getPreferredName(),
|
||||
stats.getFailedReadRequests());
|
||||
builder.field(ShardFollowStats.OPERATIONS_READ_FIELD.getPreferredName(), stats.getOperationsReads());
|
||||
builder.humanReadableField(
|
||||
ShardFollowStats.BYTES_READ.getPreferredName(),
|
||||
"total_read",
|
||||
new ByteSizeValue(stats.getBytesRead(), ByteSizeUnit.BYTES));
|
||||
builder.humanReadableField(
|
||||
ShardFollowStats.TOTAL_WRITE_TIME_MILLIS_FIELD.getPreferredName(),
|
||||
"total_write_time",
|
||||
new TimeValue(stats.getTotalWriteTimeMillis(), TimeUnit.MILLISECONDS));
|
||||
builder.field(ShardFollowStats.SUCCESSFUL_WRITE_REQUESTS_FIELD.getPreferredName(),
|
||||
stats.getSuccessfulWriteRequests());
|
||||
builder.field(ShardFollowStats.FAILED_WRITE_REQUEST_FIELD.getPreferredName(),
|
||||
stats.getFailedWriteRequests());
|
||||
builder.field(ShardFollowStats.OPERATIONS_WRITTEN.getPreferredName(), stats.getOperationWritten());
|
||||
builder.startArray(ShardFollowStats.READ_EXCEPTIONS.getPreferredName());
|
||||
{
|
||||
for (final Map.Entry<Long, Tuple<Integer, ElasticsearchException>> entry :
|
||||
stats.getReadExceptions().entrySet()) {
|
||||
builder.startObject();
|
||||
{
|
||||
builder.field(ShardFollowStats.READ_EXCEPTIONS_ENTRY_FROM_SEQ_NO.getPreferredName(),
|
||||
entry.getKey());
|
||||
builder.field(ShardFollowStats.READ_EXCEPTIONS_RETRIES.getPreferredName(),
|
||||
entry.getValue().v1());
|
||||
builder.field(ShardFollowStats.READ_EXCEPTIONS_ENTRY_EXCEPTION.getPreferredName());
|
||||
builder.startObject();
|
||||
{
|
||||
ElasticsearchException.generateThrowableXContent(builder, ToXContent.EMPTY_PARAMS,
|
||||
entry.getValue().v2());
|
||||
}
|
||||
builder.endObject();
|
||||
}
|
||||
builder.endObject();
|
||||
}
|
||||
}
|
||||
builder.endArray();
|
||||
builder.humanReadableField(
|
||||
ShardFollowStats.TIME_SINCE_LAST_READ_MILLIS_FIELD.getPreferredName(),
|
||||
"time_since_last_read",
|
||||
new TimeValue(stats.getTimeSinceLastReadMillis(), TimeUnit.MILLISECONDS));
|
||||
if (stats.getFatalException() != null) {
|
||||
builder.field(ShardFollowStats.FATAL_EXCEPTION.getPreferredName());
|
||||
builder.startObject();
|
||||
{
|
||||
ElasticsearchException.generateThrowableXContent(builder, ToXContent.EMPTY_PARAMS,
|
||||
stats.getFatalException());
|
||||
}
|
||||
builder.endObject();
|
||||
}
|
||||
}
|
||||
builder.endObject();
|
||||
}
|
||||
}
|
||||
builder.endArray();
|
||||
}
|
||||
builder.endObject();
|
||||
}
|
||||
builder.endArray();
|
||||
}
|
||||
builder.endObject();
|
||||
}
|
||||
|
||||
private static FollowStatsResponse createTestInstance() {
|
||||
return new FollowStatsResponse(randomIndicesFollowStats());
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -19,99 +19,111 @@
|
|||
|
||||
package org.elasticsearch.client.ccr;
|
||||
|
||||
import org.elasticsearch.client.AbstractResponseTestCase;
|
||||
import org.elasticsearch.common.unit.ByteSizeValue;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.xpack.core.ccr.AutoFollowMetadata;
|
||||
import org.elasticsearch.xpack.core.ccr.action.GetAutoFollowPatternAction;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.NavigableMap;
|
||||
import java.util.TreeMap;
|
||||
|
||||
import static org.elasticsearch.client.ccr.PutAutoFollowPatternRequest.FOLLOW_PATTERN_FIELD;
|
||||
import static org.elasticsearch.client.ccr.PutAutoFollowPatternRequest.LEADER_PATTERNS_FIELD;
|
||||
import static org.elasticsearch.client.ccr.PutFollowRequest.REMOTE_CLUSTER_FIELD;
|
||||
import static org.elasticsearch.test.AbstractXContentTestCase.xContentTester;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.hamcrest.Matchers.notNullValue;
|
||||
|
||||
public class GetAutoFollowPatternResponseTests extends ESTestCase {
|
||||
public class GetAutoFollowPatternResponseTests extends AbstractResponseTestCase<
|
||||
GetAutoFollowPatternAction.Response,
|
||||
GetAutoFollowPatternResponse> {
|
||||
|
||||
public void testFromXContent() throws IOException {
|
||||
xContentTester(this::createParser,
|
||||
this::createTestInstance,
|
||||
GetAutoFollowPatternResponseTests::toXContent,
|
||||
GetAutoFollowPatternResponse::fromXContent)
|
||||
.supportsUnknownFields(true)
|
||||
.test();
|
||||
}
|
||||
|
||||
private GetAutoFollowPatternResponse createTestInstance() {
|
||||
@Override
|
||||
protected GetAutoFollowPatternAction.Response createServerTestInstance() {
|
||||
int numPatterns = randomIntBetween(0, 16);
|
||||
NavigableMap<String, GetAutoFollowPatternResponse.Pattern> patterns = new TreeMap<>();
|
||||
NavigableMap<String, AutoFollowMetadata.AutoFollowPattern> patterns = new TreeMap<>();
|
||||
for (int i = 0; i < numPatterns; i++) {
|
||||
GetAutoFollowPatternResponse.Pattern pattern = new GetAutoFollowPatternResponse.Pattern(
|
||||
randomAlphaOfLength(4), Collections.singletonList(randomAlphaOfLength(4)), randomAlphaOfLength(4));
|
||||
String remoteCluster = randomAlphaOfLength(4);
|
||||
List<String> leaderIndexPatters = Collections.singletonList(randomAlphaOfLength(4));
|
||||
String followIndexNamePattern = randomAlphaOfLength(4);
|
||||
|
||||
Integer maxOutstandingReadRequests = null;
|
||||
if (randomBoolean()) {
|
||||
pattern.setMaxOutstandingReadRequests(randomIntBetween(0, Integer.MAX_VALUE));
|
||||
maxOutstandingReadRequests = randomIntBetween(0, Integer.MAX_VALUE);
|
||||
}
|
||||
Integer maxOutstandingWriteRequests = null;
|
||||
if (randomBoolean()) {
|
||||
pattern.setMaxOutstandingWriteRequests(randomIntBetween(0, Integer.MAX_VALUE));
|
||||
maxOutstandingWriteRequests = randomIntBetween(0, Integer.MAX_VALUE);
|
||||
}
|
||||
Integer maxReadRequestOperationCount = null;
|
||||
if (randomBoolean()) {
|
||||
pattern.setMaxReadRequestOperationCount(randomIntBetween(0, Integer.MAX_VALUE));
|
||||
maxReadRequestOperationCount = randomIntBetween(0, Integer.MAX_VALUE);
|
||||
}
|
||||
ByteSizeValue maxReadRequestSize = null;
|
||||
if (randomBoolean()) {
|
||||
pattern.setMaxReadRequestSize(new ByteSizeValue(randomNonNegativeLong()));
|
||||
maxReadRequestSize = new ByteSizeValue(randomNonNegativeLong());
|
||||
}
|
||||
Integer maxWriteBufferCount = null;
|
||||
if (randomBoolean()) {
|
||||
pattern.setMaxWriteBufferCount(randomIntBetween(0, Integer.MAX_VALUE));
|
||||
maxWriteBufferCount = randomIntBetween(0, Integer.MAX_VALUE);
|
||||
}
|
||||
ByteSizeValue maxWriteBufferSize = null;
|
||||
if (randomBoolean()) {
|
||||
pattern.setMaxWriteBufferSize(new ByteSizeValue(randomNonNegativeLong()));
|
||||
maxWriteBufferSize = new ByteSizeValue(randomNonNegativeLong());
|
||||
}
|
||||
Integer maxWriteRequestOperationCount = null;
|
||||
if (randomBoolean()) {
|
||||
pattern.setMaxWriteRequestOperationCount(randomIntBetween(0, Integer.MAX_VALUE));
|
||||
maxWriteRequestOperationCount = randomIntBetween(0, Integer.MAX_VALUE);
|
||||
}
|
||||
ByteSizeValue maxWriteRequestSize = null;
|
||||
if (randomBoolean()) {
|
||||
pattern.setMaxWriteRequestSize(new ByteSizeValue(randomNonNegativeLong()));
|
||||
maxWriteRequestSize = new ByteSizeValue(randomNonNegativeLong());
|
||||
}
|
||||
TimeValue maxRetryDelay = null;
|
||||
if (randomBoolean()) {
|
||||
pattern.setMaxRetryDelay(new TimeValue(randomNonNegativeLong()));
|
||||
maxRetryDelay = new TimeValue(randomNonNegativeLong());
|
||||
}
|
||||
TimeValue readPollTimeout = null;
|
||||
if (randomBoolean()) {
|
||||
pattern.setReadPollTimeout(new TimeValue(randomNonNegativeLong()));
|
||||
readPollTimeout = new TimeValue(randomNonNegativeLong());
|
||||
}
|
||||
patterns.put(randomAlphaOfLength(4), pattern);
|
||||
patterns.put(randomAlphaOfLength(4), new AutoFollowMetadata.AutoFollowPattern(remoteCluster, leaderIndexPatters,
|
||||
followIndexNamePattern, maxReadRequestOperationCount, maxWriteRequestOperationCount, maxOutstandingReadRequests,
|
||||
maxOutstandingWriteRequests, maxReadRequestSize, maxWriteRequestSize, maxWriteBufferCount, maxWriteBufferSize,
|
||||
maxRetryDelay, readPollTimeout));
|
||||
}
|
||||
return new GetAutoFollowPatternResponse(patterns);
|
||||
return new GetAutoFollowPatternAction.Response(patterns);
|
||||
}
|
||||
|
||||
public static void toXContent(GetAutoFollowPatternResponse response, XContentBuilder builder) throws IOException {
|
||||
builder.startObject();
|
||||
{
|
||||
builder.startArray(GetAutoFollowPatternResponse.PATTERNS_FIELD.getPreferredName());
|
||||
for (Map.Entry<String, GetAutoFollowPatternResponse.Pattern> entry : response.getPatterns().entrySet()) {
|
||||
builder.startObject();
|
||||
{
|
||||
builder.field(GetAutoFollowPatternResponse.NAME_FIELD.getPreferredName(), entry.getKey());
|
||||
builder.startObject(GetAutoFollowPatternResponse.PATTERN_FIELD.getPreferredName());
|
||||
{
|
||||
GetAutoFollowPatternResponse.Pattern pattern = entry.getValue();
|
||||
builder.field(REMOTE_CLUSTER_FIELD.getPreferredName(), pattern.getRemoteCluster());
|
||||
builder.field(LEADER_PATTERNS_FIELD.getPreferredName(), pattern.getLeaderIndexPatterns());
|
||||
if (pattern.getFollowIndexNamePattern()!= null) {
|
||||
builder.field(FOLLOW_PATTERN_FIELD.getPreferredName(), pattern.getFollowIndexNamePattern());
|
||||
}
|
||||
entry.getValue().toXContentFragment(builder, ToXContent.EMPTY_PARAMS);
|
||||
}
|
||||
builder.endObject();
|
||||
}
|
||||
builder.endObject();
|
||||
}
|
||||
builder.endArray();
|
||||
}
|
||||
builder.endObject();
|
||||
@Override
|
||||
protected GetAutoFollowPatternResponse doParseToClientInstance(XContentParser parser) throws IOException {
|
||||
return GetAutoFollowPatternResponse.fromXContent(parser);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void assertInstances(GetAutoFollowPatternAction.Response serverTestInstance, GetAutoFollowPatternResponse clientInstance) {
|
||||
assertThat(serverTestInstance.getAutoFollowPatterns().size(), equalTo(clientInstance.getPatterns().size()));
|
||||
for (Map.Entry<String, AutoFollowMetadata.AutoFollowPattern> entry : serverTestInstance.getAutoFollowPatterns().entrySet()) {
|
||||
AutoFollowMetadata.AutoFollowPattern serverPattern = entry.getValue();
|
||||
GetAutoFollowPatternResponse.Pattern clientPattern = clientInstance.getPatterns().get(entry.getKey());
|
||||
assertThat(clientPattern, notNullValue());
|
||||
|
||||
assertThat(serverPattern.getRemoteCluster(), equalTo(clientPattern.getRemoteCluster()));
|
||||
assertThat(serverPattern.getLeaderIndexPatterns(), equalTo(clientPattern.getLeaderIndexPatterns()));
|
||||
assertThat(serverPattern.getFollowIndexPattern(), equalTo(clientPattern.getFollowIndexNamePattern()));
|
||||
assertThat(serverPattern.getMaxOutstandingReadRequests(), equalTo(clientPattern.getMaxOutstandingReadRequests()));
|
||||
assertThat(serverPattern.getMaxOutstandingWriteRequests(), equalTo(clientPattern.getMaxOutstandingWriteRequests()));
|
||||
assertThat(serverPattern.getMaxReadRequestOperationCount(), equalTo(clientPattern.getMaxReadRequestOperationCount()));
|
||||
assertThat(serverPattern.getMaxWriteRequestOperationCount(), equalTo(clientPattern.getMaxWriteRequestOperationCount()));
|
||||
assertThat(serverPattern.getMaxReadRequestSize(), equalTo(clientPattern.getMaxReadRequestSize()));
|
||||
assertThat(serverPattern.getMaxWriteRequestSize(), equalTo(clientPattern.getMaxWriteRequestSize()));
|
||||
assertThat(serverPattern.getMaxWriteBufferCount(), equalTo(clientPattern.getMaxWriteBufferCount()));
|
||||
assertThat(serverPattern.getMaxWriteBufferSize(), equalTo(clientPattern.getMaxWriteBufferSize()));
|
||||
assertThat(serverPattern.getMaxRetryDelay(), equalTo(clientPattern.getMaxRetryDelay()));
|
||||
assertThat(serverPattern.getReadPollTimeout(), equalTo(clientPattern.getReadPollTimeout()));
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -19,71 +19,24 @@
|
|||
|
||||
package org.elasticsearch.client.ccr;
|
||||
|
||||
import org.elasticsearch.client.AbstractRequestTestCase;
|
||||
import org.elasticsearch.common.unit.ByteSizeValue;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.xcontent.ConstructingObjectParser;
|
||||
import org.elasticsearch.common.xcontent.ObjectParser;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.test.AbstractXContentTestCase;
|
||||
import org.elasticsearch.xpack.core.ccr.action.PutAutoFollowPatternAction;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
|
||||
public class PutAutoFollowPatternRequestTests extends AbstractXContentTestCase<PutAutoFollowPatternRequest> {
|
||||
import static org.elasticsearch.client.ccr.PutFollowRequestTests.assertFollowConfig;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
private static final ConstructingObjectParser<PutAutoFollowPatternRequest, Void> PARSER = new ConstructingObjectParser<>("test_parser",
|
||||
true, (args) -> new PutAutoFollowPatternRequest("name", (String) args[0], (List<String>) args[1]));
|
||||
|
||||
static {
|
||||
PARSER.declareString(ConstructingObjectParser.constructorArg(), PutFollowRequest.REMOTE_CLUSTER_FIELD);
|
||||
PARSER.declareStringArray(ConstructingObjectParser.constructorArg(), PutAutoFollowPatternRequest.LEADER_PATTERNS_FIELD);
|
||||
PARSER.declareString(PutAutoFollowPatternRequest::setFollowIndexNamePattern, PutAutoFollowPatternRequest.FOLLOW_PATTERN_FIELD);
|
||||
PARSER.declareInt(PutAutoFollowPatternRequest::setMaxReadRequestOperationCount, FollowConfig.MAX_READ_REQUEST_OPERATION_COUNT);
|
||||
PARSER.declareField(
|
||||
PutAutoFollowPatternRequest::setMaxReadRequestSize,
|
||||
(p, c) -> ByteSizeValue.parseBytesSizeValue(p.text(), FollowConfig.MAX_READ_REQUEST_SIZE.getPreferredName()),
|
||||
PutFollowRequest.MAX_READ_REQUEST_SIZE,
|
||||
ObjectParser.ValueType.STRING);
|
||||
PARSER.declareInt(PutAutoFollowPatternRequest::setMaxOutstandingReadRequests, FollowConfig.MAX_OUTSTANDING_READ_REQUESTS);
|
||||
PARSER.declareInt(PutAutoFollowPatternRequest::setMaxWriteRequestOperationCount, FollowConfig.MAX_WRITE_REQUEST_OPERATION_COUNT);
|
||||
PARSER.declareField(
|
||||
PutAutoFollowPatternRequest::setMaxWriteRequestSize,
|
||||
(p, c) -> ByteSizeValue.parseBytesSizeValue(p.text(), FollowConfig.MAX_WRITE_REQUEST_SIZE.getPreferredName()),
|
||||
PutFollowRequest.MAX_WRITE_REQUEST_SIZE,
|
||||
ObjectParser.ValueType.STRING);
|
||||
PARSER.declareInt(PutAutoFollowPatternRequest::setMaxOutstandingWriteRequests, FollowConfig.MAX_OUTSTANDING_WRITE_REQUESTS);
|
||||
PARSER.declareInt(PutAutoFollowPatternRequest::setMaxWriteBufferCount, FollowConfig.MAX_WRITE_BUFFER_COUNT);
|
||||
PARSER.declareField(
|
||||
PutAutoFollowPatternRequest::setMaxWriteBufferSize,
|
||||
(p, c) -> ByteSizeValue.parseBytesSizeValue(p.text(), FollowConfig.MAX_WRITE_BUFFER_SIZE.getPreferredName()),
|
||||
PutFollowRequest.MAX_WRITE_BUFFER_SIZE,
|
||||
ObjectParser.ValueType.STRING);
|
||||
PARSER.declareField(
|
||||
PutAutoFollowPatternRequest::setMaxRetryDelay,
|
||||
(p, c) -> TimeValue.parseTimeValue(p.text(), FollowConfig.MAX_RETRY_DELAY_FIELD.getPreferredName()),
|
||||
PutFollowRequest.MAX_RETRY_DELAY_FIELD,
|
||||
ObjectParser.ValueType.STRING);
|
||||
PARSER.declareField(
|
||||
PutAutoFollowPatternRequest::setReadPollTimeout,
|
||||
(p, c) -> TimeValue.parseTimeValue(p.text(), FollowConfig.READ_POLL_TIMEOUT.getPreferredName()),
|
||||
PutFollowRequest.READ_POLL_TIMEOUT,
|
||||
ObjectParser.ValueType.STRING);
|
||||
}
|
||||
public class PutAutoFollowPatternRequestTests extends AbstractRequestTestCase<
|
||||
PutAutoFollowPatternRequest,
|
||||
PutAutoFollowPatternAction.Request> {
|
||||
|
||||
@Override
|
||||
protected PutAutoFollowPatternRequest doParseInstance(XContentParser parser) throws IOException {
|
||||
return PARSER.apply(parser, null);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected boolean supportsUnknownFields() {
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected PutAutoFollowPatternRequest createTestInstance() {
|
||||
protected PutAutoFollowPatternRequest createClientTestInstance() {
|
||||
// Name isn't serialized, because it specified in url path, so no need to randomly generate it here.
|
||||
PutAutoFollowPatternRequest putAutoFollowPatternRequest = new PutAutoFollowPatternRequest("name",
|
||||
randomAlphaOfLength(4), Arrays.asList(generateRandomStringArray(4, 4, false)));
|
||||
|
@ -123,4 +76,18 @@ public class PutAutoFollowPatternRequestTests extends AbstractXContentTestCase<P
|
|||
return putAutoFollowPatternRequest;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected PutAutoFollowPatternAction.Request doParseToServerInstance(XContentParser parser) throws IOException {
|
||||
return PutAutoFollowPatternAction.Request.fromXContent(parser, "name");
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void assertInstances(PutAutoFollowPatternAction.Request serverInstance, PutAutoFollowPatternRequest clientTestInstance) {
|
||||
assertThat(serverInstance.getName(), equalTo(clientTestInstance.getName()));
|
||||
assertThat(serverInstance.getRemoteCluster(), equalTo(clientTestInstance.getRemoteCluster()));
|
||||
assertThat(serverInstance.getLeaderIndexPatterns(), equalTo(clientTestInstance.getLeaderIndexPatterns()));
|
||||
assertThat(serverInstance.getFollowIndexNamePattern(), equalTo(clientTestInstance.getFollowIndexNamePattern()));
|
||||
assertFollowConfig(serverInstance.getParameters(), clientTestInstance);
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -19,67 +19,22 @@
|
|||
|
||||
package org.elasticsearch.client.ccr;
|
||||
|
||||
import org.elasticsearch.action.support.ActiveShardCount;
|
||||
import org.elasticsearch.client.AbstractRequestTestCase;
|
||||
import org.elasticsearch.common.unit.ByteSizeValue;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.xcontent.ConstructingObjectParser;
|
||||
import org.elasticsearch.common.xcontent.ObjectParser;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.test.AbstractXContentTestCase;
|
||||
import org.elasticsearch.xpack.core.ccr.action.FollowParameters;
|
||||
import org.elasticsearch.xpack.core.ccr.action.PutFollowAction;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
public class PutFollowRequestTests extends AbstractXContentTestCase<PutFollowRequest> {
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
|
||||
private static final ConstructingObjectParser<PutFollowRequest, Void> PARSER = new ConstructingObjectParser<>("test_parser",
|
||||
true, (args) -> new PutFollowRequest((String) args[0], (String) args[1], "followerIndex"));
|
||||
|
||||
static {
|
||||
PARSER.declareString(ConstructingObjectParser.constructorArg(), PutFollowRequest.REMOTE_CLUSTER_FIELD);
|
||||
PARSER.declareString(ConstructingObjectParser.constructorArg(), PutFollowRequest.LEADER_INDEX_FIELD);
|
||||
PARSER.declareInt(PutFollowRequest::setMaxReadRequestOperationCount, PutFollowRequest.MAX_READ_REQUEST_OPERATION_COUNT);
|
||||
PARSER.declareField(
|
||||
PutFollowRequest::setMaxReadRequestSize,
|
||||
(p, c) -> ByteSizeValue.parseBytesSizeValue(p.text(), PutFollowRequest.MAX_READ_REQUEST_SIZE.getPreferredName()),
|
||||
PutFollowRequest.MAX_READ_REQUEST_SIZE,
|
||||
ObjectParser.ValueType.STRING);
|
||||
PARSER.declareInt(PutFollowRequest::setMaxOutstandingReadRequests, PutFollowRequest.MAX_OUTSTANDING_READ_REQUESTS);
|
||||
PARSER.declareInt(PutFollowRequest::setMaxWriteRequestOperationCount, PutFollowRequest.MAX_WRITE_REQUEST_OPERATION_COUNT);
|
||||
PARSER.declareField(
|
||||
PutFollowRequest::setMaxWriteRequestSize,
|
||||
(p, c) -> ByteSizeValue.parseBytesSizeValue(p.text(), PutFollowRequest.MAX_WRITE_REQUEST_SIZE.getPreferredName()),
|
||||
PutFollowRequest.MAX_WRITE_REQUEST_SIZE,
|
||||
ObjectParser.ValueType.STRING);
|
||||
PARSER.declareInt(PutFollowRequest::setMaxOutstandingWriteRequests, PutFollowRequest.MAX_OUTSTANDING_WRITE_REQUESTS);
|
||||
PARSER.declareInt(PutFollowRequest::setMaxWriteBufferCount, PutFollowRequest.MAX_WRITE_BUFFER_COUNT);
|
||||
PARSER.declareField(
|
||||
PutFollowRequest::setMaxWriteBufferSize,
|
||||
(p, c) -> ByteSizeValue.parseBytesSizeValue(p.text(), PutFollowRequest.MAX_WRITE_BUFFER_SIZE.getPreferredName()),
|
||||
PutFollowRequest.MAX_WRITE_BUFFER_SIZE,
|
||||
ObjectParser.ValueType.STRING);
|
||||
PARSER.declareField(
|
||||
PutFollowRequest::setMaxRetryDelay,
|
||||
(p, c) -> TimeValue.parseTimeValue(p.text(), PutFollowRequest.MAX_RETRY_DELAY_FIELD.getPreferredName()),
|
||||
PutFollowRequest.MAX_RETRY_DELAY_FIELD,
|
||||
ObjectParser.ValueType.STRING);
|
||||
PARSER.declareField(
|
||||
PutFollowRequest::setReadPollTimeout,
|
||||
(p, c) -> TimeValue.parseTimeValue(p.text(), PutFollowRequest.READ_POLL_TIMEOUT.getPreferredName()),
|
||||
PutFollowRequest.READ_POLL_TIMEOUT,
|
||||
ObjectParser.ValueType.STRING);
|
||||
}
|
||||
public class PutFollowRequestTests extends AbstractRequestTestCase<PutFollowRequest, PutFollowAction.Request> {
|
||||
|
||||
@Override
|
||||
protected PutFollowRequest doParseInstance(XContentParser parser) throws IOException {
|
||||
return PARSER.apply(parser, null);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected boolean supportsUnknownFields() {
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected PutFollowRequest createTestInstance() {
|
||||
protected PutFollowRequest createClientTestInstance() {
|
||||
PutFollowRequest putFollowRequest =
|
||||
new PutFollowRequest(randomAlphaOfLength(4), randomAlphaOfLength(4), "followerIndex");
|
||||
if (randomBoolean()) {
|
||||
|
@ -115,4 +70,30 @@ public class PutFollowRequestTests extends AbstractXContentTestCase<PutFollowReq
|
|||
return putFollowRequest;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected PutFollowAction.Request doParseToServerInstance(XContentParser parser) throws IOException {
|
||||
return PutFollowAction.Request.fromXContent(parser, "followerIndex", ActiveShardCount.DEFAULT);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void assertInstances(PutFollowAction.Request serverInstance, PutFollowRequest clientTestInstance) {
|
||||
assertThat(serverInstance.getRemoteCluster(), equalTo(clientTestInstance.getRemoteCluster()));
|
||||
assertThat(serverInstance.getLeaderIndex(), equalTo(clientTestInstance.getLeaderIndex()));
|
||||
assertThat(serverInstance.getFollowerIndex(), equalTo(clientTestInstance.getFollowerIndex()));
|
||||
assertFollowConfig(serverInstance.getParameters(), clientTestInstance);
|
||||
}
|
||||
|
||||
static void assertFollowConfig(FollowParameters serverParameters, FollowConfig clientConfig) {
|
||||
assertThat(serverParameters.getMaxReadRequestOperationCount(), equalTo(clientConfig.getMaxReadRequestOperationCount()));
|
||||
assertThat(serverParameters.getMaxWriteRequestOperationCount(), equalTo(clientConfig.getMaxWriteRequestOperationCount()));
|
||||
assertThat(serverParameters.getMaxOutstandingReadRequests(), equalTo(clientConfig.getMaxOutstandingReadRequests()));
|
||||
assertThat(serverParameters.getMaxOutstandingWriteRequests(), equalTo(clientConfig.getMaxOutstandingWriteRequests()));
|
||||
assertThat(serverParameters.getMaxReadRequestSize(), equalTo(clientConfig.getMaxReadRequestSize()));
|
||||
assertThat(serverParameters.getMaxWriteRequestSize(), equalTo(clientConfig.getMaxWriteRequestSize()));
|
||||
assertThat(serverParameters.getMaxWriteBufferCount(), equalTo(clientConfig.getMaxWriteBufferCount()));
|
||||
assertThat(serverParameters.getMaxWriteBufferSize(), equalTo(clientConfig.getMaxWriteBufferSize()));
|
||||
assertThat(serverParameters.getMaxRetryDelay(), equalTo(clientConfig.getMaxRetryDelay()));
|
||||
assertThat(serverParameters.getReadPollTimeout(), equalTo(clientConfig.getReadPollTimeout()));
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -19,35 +19,30 @@
|
|||
|
||||
package org.elasticsearch.client.ccr;
|
||||
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.elasticsearch.client.AbstractResponseTestCase;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.xpack.core.ccr.action.PutFollowAction;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import static org.elasticsearch.test.AbstractXContentTestCase.xContentTester;
|
||||
import static org.hamcrest.Matchers.is;
|
||||
|
||||
public class PutFollowResponseTests extends ESTestCase {
|
||||
public class PutFollowResponseTests extends AbstractResponseTestCase<PutFollowAction.Response, PutFollowResponse> {
|
||||
|
||||
public void testFromXContent() throws IOException {
|
||||
xContentTester(this::createParser,
|
||||
this::createTestInstance,
|
||||
PutFollowResponseTests::toXContent,
|
||||
PutFollowResponse::fromXContent)
|
||||
.supportsUnknownFields(true)
|
||||
.test();
|
||||
@Override
|
||||
protected PutFollowAction.Response createServerTestInstance() {
|
||||
return new PutFollowAction.Response(randomBoolean(), randomBoolean(), randomBoolean());
|
||||
}
|
||||
|
||||
private PutFollowResponse createTestInstance() {
|
||||
return new PutFollowResponse(randomBoolean(), randomBoolean(), randomBoolean());
|
||||
@Override
|
||||
protected PutFollowResponse doParseToClientInstance(XContentParser parser) throws IOException {
|
||||
return PutFollowResponse.fromXContent(parser);
|
||||
}
|
||||
|
||||
public static void toXContent(PutFollowResponse response, XContentBuilder builder) throws IOException {
|
||||
builder.startObject();
|
||||
{
|
||||
builder.field(PutFollowResponse.FOLLOW_INDEX_CREATED.getPreferredName(), response.isFollowIndexCreated());
|
||||
builder.field(PutFollowResponse.FOLLOW_INDEX_SHARDS_ACKED.getPreferredName(), response.isFollowIndexShardsAcked());
|
||||
builder.field(PutFollowResponse.INDEX_FOLLOWING_STARTED.getPreferredName(), response.isIndexFollowingStarted());
|
||||
}
|
||||
builder.endObject();
|
||||
@Override
|
||||
protected void assertInstances(PutFollowAction.Response serverTestInstance, PutFollowResponse clientInstance) {
|
||||
assertThat(serverTestInstance.isFollowIndexCreated(), is(clientInstance.isFollowIndexCreated()));
|
||||
assertThat(serverTestInstance.isFollowIndexShardsAcked(), is(clientInstance.isFollowIndexShardsAcked()));
|
||||
assertThat(serverTestInstance.isIndexFollowingStarted(), is(clientInstance.isIndexFollowingStarted()));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -19,64 +19,21 @@
|
|||
|
||||
package org.elasticsearch.client.ccr;
|
||||
|
||||
import org.elasticsearch.client.AbstractRequestTestCase;
|
||||
import org.elasticsearch.common.unit.ByteSizeValue;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.xcontent.ObjectParser;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.test.AbstractXContentTestCase;
|
||||
import org.elasticsearch.xpack.core.ccr.action.ResumeFollowAction;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
public class ResumeFollowRequestTests extends AbstractXContentTestCase<ResumeFollowRequest> {
|
||||
import static org.elasticsearch.client.ccr.PutFollowRequestTests.assertFollowConfig;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
|
||||
private static final ObjectParser<ResumeFollowRequest, Void> PARSER = new ObjectParser<>("test_parser",
|
||||
true, () -> new ResumeFollowRequest("followerIndex"));
|
||||
|
||||
static {
|
||||
PARSER.declareInt(ResumeFollowRequest::setMaxReadRequestOperationCount, FollowConfig.MAX_READ_REQUEST_OPERATION_COUNT);
|
||||
PARSER.declareField(
|
||||
ResumeFollowRequest::setMaxReadRequestSize,
|
||||
(p, c) -> ByteSizeValue.parseBytesSizeValue(p.text(), FollowConfig.MAX_READ_REQUEST_SIZE.getPreferredName()),
|
||||
PutFollowRequest.MAX_READ_REQUEST_SIZE,
|
||||
ObjectParser.ValueType.STRING);
|
||||
PARSER.declareInt(ResumeFollowRequest::setMaxOutstandingReadRequests, FollowConfig.MAX_OUTSTANDING_READ_REQUESTS);
|
||||
PARSER.declareInt(ResumeFollowRequest::setMaxWriteRequestOperationCount, FollowConfig.MAX_WRITE_REQUEST_OPERATION_COUNT);
|
||||
PARSER.declareField(
|
||||
ResumeFollowRequest::setMaxWriteRequestSize,
|
||||
(p, c) -> ByteSizeValue.parseBytesSizeValue(p.text(), FollowConfig.MAX_WRITE_REQUEST_SIZE.getPreferredName()),
|
||||
PutFollowRequest.MAX_WRITE_REQUEST_SIZE,
|
||||
ObjectParser.ValueType.STRING);
|
||||
PARSER.declareInt(ResumeFollowRequest::setMaxOutstandingWriteRequests, FollowConfig.MAX_OUTSTANDING_WRITE_REQUESTS);
|
||||
PARSER.declareInt(ResumeFollowRequest::setMaxWriteBufferCount, FollowConfig.MAX_WRITE_BUFFER_COUNT);
|
||||
PARSER.declareField(
|
||||
ResumeFollowRequest::setMaxWriteBufferSize,
|
||||
(p, c) -> ByteSizeValue.parseBytesSizeValue(p.text(), FollowConfig.MAX_WRITE_BUFFER_SIZE.getPreferredName()),
|
||||
PutFollowRequest.MAX_WRITE_BUFFER_SIZE,
|
||||
ObjectParser.ValueType.STRING);
|
||||
PARSER.declareField(
|
||||
ResumeFollowRequest::setMaxRetryDelay,
|
||||
(p, c) -> TimeValue.parseTimeValue(p.text(), FollowConfig.MAX_RETRY_DELAY_FIELD.getPreferredName()),
|
||||
PutFollowRequest.MAX_RETRY_DELAY_FIELD,
|
||||
ObjectParser.ValueType.STRING);
|
||||
PARSER.declareField(
|
||||
ResumeFollowRequest::setReadPollTimeout,
|
||||
(p, c) -> TimeValue.parseTimeValue(p.text(), FollowConfig.READ_POLL_TIMEOUT.getPreferredName()),
|
||||
PutFollowRequest.READ_POLL_TIMEOUT,
|
||||
ObjectParser.ValueType.STRING);
|
||||
}
|
||||
public class ResumeFollowRequestTests extends AbstractRequestTestCase<ResumeFollowRequest, ResumeFollowAction.Request> {
|
||||
|
||||
@Override
|
||||
protected ResumeFollowRequest doParseInstance(XContentParser parser) throws IOException {
|
||||
return PARSER.apply(parser, null);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected boolean supportsUnknownFields() {
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected ResumeFollowRequest createTestInstance() {
|
||||
protected ResumeFollowRequest createClientTestInstance() {
|
||||
ResumeFollowRequest resumeFollowRequest = new ResumeFollowRequest("followerIndex");
|
||||
if (randomBoolean()) {
|
||||
resumeFollowRequest.setMaxOutstandingReadRequests(randomIntBetween(0, Integer.MAX_VALUE));
|
||||
|
@ -111,4 +68,15 @@ public class ResumeFollowRequestTests extends AbstractXContentTestCase<ResumeFol
|
|||
return resumeFollowRequest;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected ResumeFollowAction.Request doParseToServerInstance(XContentParser parser) throws IOException {
|
||||
return ResumeFollowAction.Request.fromXContent(parser, "followerIndex");
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void assertInstances(ResumeFollowAction.Request serverInstance, ResumeFollowRequest clientTestInstance) {
|
||||
assertThat(serverInstance.getFollowerIndex(), equalTo(clientTestInstance.getFollowerIndex()));
|
||||
assertFollowConfig(serverInstance.getParameters(), clientTestInstance);
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -0,0 +1,51 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.client.dataframe.transforms;
|
||||
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import static org.elasticsearch.test.AbstractXContentTestCase.xContentTester;
|
||||
|
||||
public class DataFrameTransformCheckpointStatsTests extends ESTestCase {
|
||||
|
||||
public void testFromXContent() throws IOException {
|
||||
xContentTester(this::createParser,
|
||||
DataFrameTransformCheckpointStatsTests::randomDataFrameTransformCheckpointStats,
|
||||
DataFrameTransformCheckpointStatsTests::toXContent,
|
||||
DataFrameTransformCheckpointStats::fromXContent)
|
||||
.supportsUnknownFields(true)
|
||||
.test();
|
||||
}
|
||||
|
||||
public static DataFrameTransformCheckpointStats randomDataFrameTransformCheckpointStats() {
|
||||
return new DataFrameTransformCheckpointStats(randomLongBetween(1, 1_000_000), randomLongBetween(0, 1_000_000));
|
||||
}
|
||||
|
||||
public static void toXContent(DataFrameTransformCheckpointStats stats, XContentBuilder builder) throws IOException {
|
||||
builder.startObject();
|
||||
builder.field("timestamp_millis", stats.getTimestampMillis());
|
||||
builder.field("time_upper_bound_millis", stats.getTimeUpperBoundMillis());
|
||||
builder.endObject();
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,61 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.client.dataframe.transforms;
|
||||
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import static org.elasticsearch.test.AbstractXContentTestCase.xContentTester;
|
||||
|
||||
public class DataFrameTransformCheckpointingInfoTests extends ESTestCase {
|
||||
|
||||
public void testFromXContent() throws IOException {
|
||||
xContentTester(this::createParser,
|
||||
DataFrameTransformCheckpointingInfoTests::randomDataFrameTransformCheckpointingInfo,
|
||||
DataFrameTransformCheckpointingInfoTests::toXContent,
|
||||
DataFrameTransformCheckpointingInfo::fromXContent)
|
||||
.supportsUnknownFields(false)
|
||||
.test();
|
||||
}
|
||||
|
||||
public static DataFrameTransformCheckpointingInfo randomDataFrameTransformCheckpointingInfo() {
|
||||
return new DataFrameTransformCheckpointingInfo(
|
||||
DataFrameTransformCheckpointStatsTests.randomDataFrameTransformCheckpointStats(),
|
||||
DataFrameTransformCheckpointStatsTests.randomDataFrameTransformCheckpointStats(),
|
||||
randomLongBetween(0, 10000));
|
||||
}
|
||||
|
||||
public static void toXContent(DataFrameTransformCheckpointingInfo info, XContentBuilder builder) throws IOException {
|
||||
builder.startObject();
|
||||
if (info.getCurrent().getTimestampMillis() > 0) {
|
||||
builder.field("current");
|
||||
DataFrameTransformCheckpointStatsTests.toXContent(info.getCurrent(), builder);
|
||||
}
|
||||
if (info.getInProgress().getTimestampMillis() > 0) {
|
||||
builder.field("in_progress");
|
||||
DataFrameTransformCheckpointStatsTests.toXContent(info.getInProgress(), builder);
|
||||
}
|
||||
builder.field("operations_behind", info.getOperationsBehind());
|
||||
builder.endObject();
|
||||
}
|
||||
|
||||
}
|
|
@ -41,7 +41,8 @@ public class DataFrameTransformStateAndStatsTests extends ESTestCase {
|
|||
public static DataFrameTransformStateAndStats randomInstance() {
|
||||
return new DataFrameTransformStateAndStats(randomAlphaOfLength(10),
|
||||
DataFrameTransformStateTests.randomDataFrameTransformState(),
|
||||
DataFrameIndexerTransformStatsTests.randomStats());
|
||||
DataFrameIndexerTransformStatsTests.randomStats(),
|
||||
DataFrameTransformCheckpointingInfoTests.randomDataFrameTransformCheckpointingInfo());
|
||||
}
|
||||
|
||||
public static void toXContent(DataFrameTransformStateAndStats stateAndStats, XContentBuilder builder) throws IOException {
|
||||
|
@ -51,6 +52,8 @@ public class DataFrameTransformStateAndStatsTests extends ESTestCase {
|
|||
DataFrameTransformStateTests.toXContent(stateAndStats.getTransformState(), builder);
|
||||
builder.field(DataFrameTransformStateAndStats.STATS_FIELD.getPreferredName());
|
||||
DataFrameIndexerTransformStatsTests.toXContent(stateAndStats.getTransformStats(), builder);
|
||||
builder.field(DataFrameTransformStateAndStats.CHECKPOINTING_INFO_FIELD.getPreferredName());
|
||||
DataFrameTransformCheckpointingInfoTests.toXContent(stateAndStats.getCheckpointingInfo(), builder);
|
||||
builder.endObject();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -56,7 +56,7 @@ public class DataFrameTransformStateTests extends ESTestCase {
|
|||
if (state.getPosition() != null) {
|
||||
builder.field("current_position", state.getPosition());
|
||||
}
|
||||
builder.field("generation", state.getGeneration());
|
||||
builder.field("checkpoint", state.getCheckpoint());
|
||||
if (state.getReason() != null) {
|
||||
builder.field("reason", state.getReason());
|
||||
}
|
||||
|
|
|
@ -1,19 +1,31 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.xpack.core.dataframe.transforms.hlrc;
|
||||
package org.elasticsearch.client.dataframe.transforms.hlrc;
|
||||
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.protocol.AbstractHlrcXContentTestCase;
|
||||
import org.elasticsearch.client.AbstractHlrcXContentTestCase;
|
||||
import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameIndexerTransformStats;
|
||||
import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameIndexerTransformStatsTests;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
public class DataFrameIndexerTransformStatsHlrcTests extends AbstractHlrcXContentTestCase<
|
||||
public class DataFrameIndexerTransformStatsTests extends AbstractHlrcXContentTestCase<
|
||||
DataFrameIndexerTransformStats,
|
||||
org.elasticsearch.client.dataframe.transforms.DataFrameIndexerTransformStats> {
|
||||
|
||||
|
@ -38,7 +50,7 @@ public class DataFrameIndexerTransformStatsHlrcTests extends AbstractHlrcXConten
|
|||
|
||||
@Override
|
||||
protected DataFrameIndexerTransformStats createTestInstance() {
|
||||
return DataFrameIndexerTransformStatsTests.randomStats(DataFrameIndexerTransformStats.DEFAULT_TRANSFORM_ID);
|
||||
return DataFrameTransformStateTests.randomStats(DataFrameIndexerTransformStats.DEFAULT_TRANSFORM_ID);
|
||||
}
|
||||
|
||||
@Override
|
|
@ -0,0 +1,64 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.client.dataframe.transforms.hlrc;
|
||||
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.client.AbstractHlrcXContentTestCase;
|
||||
import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformCheckpointStats;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
public class DataFrameTransformCheckpointStatsTests extends AbstractHlrcXContentTestCase<
|
||||
DataFrameTransformCheckpointStats,
|
||||
org.elasticsearch.client.dataframe.transforms.DataFrameTransformCheckpointStats> {
|
||||
|
||||
public static DataFrameTransformCheckpointStats fromHlrc(
|
||||
org.elasticsearch.client.dataframe.transforms.DataFrameTransformCheckpointStats instance) {
|
||||
return new DataFrameTransformCheckpointStats(instance.getTimestampMillis(), instance.getTimeUpperBoundMillis());
|
||||
}
|
||||
|
||||
@Override
|
||||
public org.elasticsearch.client.dataframe.transforms.DataFrameTransformCheckpointStats doHlrcParseInstance(XContentParser parser)
|
||||
throws IOException {
|
||||
return org.elasticsearch.client.dataframe.transforms.DataFrameTransformCheckpointStats.fromXContent(parser);
|
||||
}
|
||||
|
||||
@Override
|
||||
public DataFrameTransformCheckpointStats convertHlrcToInternal(
|
||||
org.elasticsearch.client.dataframe.transforms.DataFrameTransformCheckpointStats instance) {
|
||||
return fromHlrc(instance);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected DataFrameTransformCheckpointStats createTestInstance() {
|
||||
return DataFrameTransformStateTests.randomDataFrameTransformCheckpointStats();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected DataFrameTransformCheckpointStats doParseInstance(XContentParser parser) throws IOException {
|
||||
return DataFrameTransformCheckpointStats.fromXContent(parser);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected boolean supportsUnknownFields() {
|
||||
return true;
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,67 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.client.dataframe.transforms.hlrc;
|
||||
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.client.AbstractHlrcXContentTestCase;
|
||||
import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformCheckpointingInfo;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
public class DataFrameTransformCheckpointingInfoTests extends AbstractHlrcXContentTestCase<
|
||||
DataFrameTransformCheckpointingInfo,
|
||||
org.elasticsearch.client.dataframe.transforms.DataFrameTransformCheckpointingInfo> {
|
||||
|
||||
public static DataFrameTransformCheckpointingInfo fromHlrc(
|
||||
org.elasticsearch.client.dataframe.transforms.DataFrameTransformCheckpointingInfo instance) {
|
||||
return new DataFrameTransformCheckpointingInfo(
|
||||
DataFrameTransformCheckpointStatsTests.fromHlrc(instance.getCurrent()),
|
||||
DataFrameTransformCheckpointStatsTests.fromHlrc(instance.getInProgress()),
|
||||
instance.getOperationsBehind());
|
||||
}
|
||||
|
||||
@Override
|
||||
public org.elasticsearch.client.dataframe.transforms.DataFrameTransformCheckpointingInfo doHlrcParseInstance(XContentParser parser)
|
||||
throws IOException {
|
||||
return org.elasticsearch.client.dataframe.transforms.DataFrameTransformCheckpointingInfo.fromXContent(parser);
|
||||
}
|
||||
|
||||
@Override
|
||||
public DataFrameTransformCheckpointingInfo convertHlrcToInternal(
|
||||
org.elasticsearch.client.dataframe.transforms.DataFrameTransformCheckpointingInfo instance) {
|
||||
return fromHlrc(instance);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected DataFrameTransformCheckpointingInfo createTestInstance() {
|
||||
return DataFrameTransformStateTests.randomDataFrameTransformCheckpointingInfo();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected DataFrameTransformCheckpointingInfo doParseInstance(XContentParser parser) throws IOException {
|
||||
return DataFrameTransformCheckpointingInfo.fromXContent(parser);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected boolean supportsUnknownFields() {
|
||||
return true;
|
||||
}
|
||||
|
||||
}
|
|
@ -1,21 +1,33 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.xpack.core.dataframe.transforms.hlrc;
|
||||
package org.elasticsearch.client.dataframe.transforms.hlrc;
|
||||
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.protocol.AbstractHlrcXContentTestCase;
|
||||
import org.elasticsearch.client.AbstractHlrcXContentTestCase;
|
||||
import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameIndexerTransformStats;
|
||||
import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformStateAndStats;
|
||||
import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformStateAndStatsTests;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.function.Predicate;
|
||||
|
||||
public class DataFrameTransformStateAndStatsHlrcTests extends AbstractHlrcXContentTestCase<DataFrameTransformStateAndStats,
|
||||
public class DataFrameTransformStateAndStatsTests extends AbstractHlrcXContentTestCase<DataFrameTransformStateAndStats,
|
||||
org.elasticsearch.client.dataframe.transforms.DataFrameTransformStateAndStats> {
|
||||
|
||||
@Override
|
||||
|
@ -28,14 +40,15 @@ public class DataFrameTransformStateAndStatsHlrcTests extends AbstractHlrcXConte
|
|||
public DataFrameTransformStateAndStats convertHlrcToInternal(
|
||||
org.elasticsearch.client.dataframe.transforms.DataFrameTransformStateAndStats instance) {
|
||||
return new DataFrameTransformStateAndStats(instance.getId(),
|
||||
DataFrameTransformStateHlrcTests.fromHlrc(instance.getTransformState()),
|
||||
DataFrameIndexerTransformStatsHlrcTests.fromHlrc(instance.getTransformStats()));
|
||||
DataFrameTransformStateTests.fromHlrc(instance.getTransformState()),
|
||||
DataFrameIndexerTransformStatsTests.fromHlrc(instance.getTransformStats()),
|
||||
DataFrameTransformCheckpointingInfoTests.fromHlrc(instance.getCheckpointingInfo()));
|
||||
}
|
||||
|
||||
@Override
|
||||
protected DataFrameTransformStateAndStats createTestInstance() {
|
||||
// the transform id is not part of HLRC as it's only to a field for internal storage, therefore use a default id
|
||||
return DataFrameTransformStateAndStatsTests
|
||||
return DataFrameTransformStateTests
|
||||
.randomDataFrameTransformStateAndStats(DataFrameIndexerTransformStats.DEFAULT_TRANSFORM_ID);
|
||||
}
|
||||
|
|
@ -0,0 +1,125 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.client.dataframe.transforms.hlrc;
|
||||
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.client.AbstractHlrcXContentTestCase;
|
||||
import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameIndexerTransformStats;
|
||||
import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformCheckpointStats;
|
||||
import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformCheckpointingInfo;
|
||||
import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformState;
|
||||
import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformStateAndStats;
|
||||
import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformTaskState;
|
||||
import org.elasticsearch.xpack.core.indexing.IndexerState;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
import java.util.function.Predicate;
|
||||
|
||||
public class DataFrameTransformStateTests extends AbstractHlrcXContentTestCase<DataFrameTransformState,
|
||||
org.elasticsearch.client.dataframe.transforms.DataFrameTransformState> {
|
||||
|
||||
public static DataFrameTransformState fromHlrc(org.elasticsearch.client.dataframe.transforms.DataFrameTransformState instance) {
|
||||
return new DataFrameTransformState(DataFrameTransformTaskState.fromString(instance.getTaskState().value()),
|
||||
IndexerState.fromString(instance.getIndexerState().value()), instance.getPosition(), instance.getCheckpoint(),
|
||||
instance.getReason());
|
||||
}
|
||||
|
||||
@Override
|
||||
public org.elasticsearch.client.dataframe.transforms.DataFrameTransformState doHlrcParseInstance(XContentParser parser)
|
||||
throws IOException {
|
||||
return org.elasticsearch.client.dataframe.transforms.DataFrameTransformState.fromXContent(parser);
|
||||
}
|
||||
|
||||
@Override
|
||||
public DataFrameTransformState convertHlrcToInternal(org.elasticsearch.client.dataframe.transforms.DataFrameTransformState instance) {
|
||||
return fromHlrc(instance);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected DataFrameTransformState createTestInstance() {
|
||||
return randomDataFrameTransformState();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected DataFrameTransformState doParseInstance(XContentParser parser) throws IOException {
|
||||
return DataFrameTransformState.fromXContent(parser);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected boolean supportsUnknownFields() {
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Predicate<String> getRandomFieldsExcludeFilter() {
|
||||
return field -> field.equals("current_position");
|
||||
}
|
||||
|
||||
public static DataFrameTransformStateAndStats randomDataFrameTransformStateAndStats(String id) {
|
||||
return new DataFrameTransformStateAndStats(id,
|
||||
randomDataFrameTransformState(),
|
||||
randomStats(id),
|
||||
randomDataFrameTransformCheckpointingInfo());
|
||||
}
|
||||
|
||||
public static DataFrameTransformCheckpointingInfo randomDataFrameTransformCheckpointingInfo() {
|
||||
return new DataFrameTransformCheckpointingInfo(randomDataFrameTransformCheckpointStats(),
|
||||
randomDataFrameTransformCheckpointStats(), randomNonNegativeLong());
|
||||
}
|
||||
|
||||
public static DataFrameTransformCheckpointStats randomDataFrameTransformCheckpointStats() {
|
||||
return new DataFrameTransformCheckpointStats(randomNonNegativeLong(), randomNonNegativeLong());
|
||||
}
|
||||
|
||||
public static DataFrameIndexerTransformStats randomStats(String transformId) {
|
||||
return new DataFrameIndexerTransformStats(transformId, randomLongBetween(10L, 10000L),
|
||||
randomLongBetween(0L, 10000L), randomLongBetween(0L, 10000L), randomLongBetween(0L, 10000L), randomLongBetween(0L, 10000L),
|
||||
randomLongBetween(0L, 10000L), randomLongBetween(0L, 10000L), randomLongBetween(0L, 10000L), randomLongBetween(0L, 10000L),
|
||||
randomLongBetween(0L, 10000L));
|
||||
}
|
||||
|
||||
public static DataFrameTransformState randomDataFrameTransformState() {
|
||||
return new DataFrameTransformState(randomFrom(DataFrameTransformTaskState.values()),
|
||||
randomFrom(IndexerState.values()),
|
||||
randomPosition(),
|
||||
randomLongBetween(0,10),
|
||||
randomBoolean() ? null : randomAlphaOfLength(10));
|
||||
}
|
||||
|
||||
private static Map<String, Object> randomPosition() {
|
||||
if (randomBoolean()) {
|
||||
return null;
|
||||
}
|
||||
int numFields = randomIntBetween(1, 5);
|
||||
Map<String, Object> position = new HashMap<>();
|
||||
for (int i = 0; i < numFields; i++) {
|
||||
Object value;
|
||||
if (randomBoolean()) {
|
||||
value = randomLong();
|
||||
} else {
|
||||
value = randomAlphaOfLengthBetween(1, 10);
|
||||
}
|
||||
position.put(randomAlphaOfLengthBetween(3, 10), value);
|
||||
}
|
||||
return position;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,222 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.client.graph.hlrc;
|
||||
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.action.ShardOperationFailedException;
|
||||
import org.elasticsearch.action.search.ShardSearchFailure;
|
||||
import org.elasticsearch.client.graph.Connection;
|
||||
import org.elasticsearch.client.graph.GraphExploreResponse;
|
||||
import org.elasticsearch.client.graph.Vertex;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.client.AbstractHlrcXContentTestCase;
|
||||
import org.elasticsearch.protocol.xpack.graph.Connection.ConnectionId;
|
||||
import org.elasticsearch.test.AbstractXContentTestCase;
|
||||
import org.junit.Assert;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collection;
|
||||
import java.util.Comparator;
|
||||
import java.util.HashMap;
|
||||
import java.util.LinkedHashMap;
|
||||
import java.util.Map;
|
||||
import java.util.function.Function;
|
||||
import java.util.function.Predicate;
|
||||
import java.util.function.Supplier;
|
||||
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
|
||||
public class GraphExploreResponseTests extends AbstractHlrcXContentTestCase<
|
||||
org.elasticsearch.protocol.xpack.graph.GraphExploreResponse,
|
||||
GraphExploreResponse> {
|
||||
|
||||
static final Function<Vertex.VertexId, org.elasticsearch.protocol.xpack.graph.Vertex.VertexId> VERTEX_ID_FUNCTION =
|
||||
vId -> new org.elasticsearch.protocol.xpack.graph.Vertex.VertexId(vId.getField(), vId.getTerm());
|
||||
static final Function<Vertex, org.elasticsearch.protocol.xpack.graph.Vertex> VERTEX_FUNCTION =
|
||||
v -> new org.elasticsearch.protocol.xpack.graph.Vertex(v.getField(), v.getTerm(), v.getWeight(),
|
||||
v.getHopDepth(), v.getBg(), v.getFg());
|
||||
|
||||
@Override
|
||||
public GraphExploreResponse doHlrcParseInstance(XContentParser parser) throws IOException {
|
||||
return GraphExploreResponse.fromXContent(parser);
|
||||
}
|
||||
|
||||
@Override
|
||||
public org.elasticsearch.protocol.xpack.graph.GraphExploreResponse convertHlrcToInternal(GraphExploreResponse instance) {
|
||||
return new org.elasticsearch.protocol.xpack.graph.GraphExploreResponse(instance.getTookInMillis(), instance.isTimedOut(),
|
||||
instance.getShardFailures(), convertVertices(instance), convertConnections(instance), instance.isReturnDetailedInfo());
|
||||
}
|
||||
|
||||
public Map<org.elasticsearch.protocol.xpack.graph.Vertex.VertexId, org.elasticsearch.protocol.xpack.graph.Vertex> convertVertices(
|
||||
GraphExploreResponse instance) {
|
||||
final Collection<Vertex.VertexId> vertexIds = instance.getVertexIds();
|
||||
final Map<org.elasticsearch.protocol.xpack.graph.Vertex.VertexId, org.elasticsearch.protocol.xpack.graph.Vertex> vertexMap =
|
||||
new LinkedHashMap<>(vertexIds.size());
|
||||
|
||||
for (Vertex.VertexId vertexId : vertexIds) {
|
||||
final Vertex vertex = instance.getVertex(vertexId);
|
||||
|
||||
vertexMap.put(VERTEX_ID_FUNCTION.apply(vertexId), VERTEX_FUNCTION.apply(vertex));
|
||||
}
|
||||
return vertexMap;
|
||||
}
|
||||
|
||||
public Map<ConnectionId, org.elasticsearch.protocol.xpack.graph.Connection> convertConnections(GraphExploreResponse instance) {
|
||||
final Collection<Connection.ConnectionId> connectionIds = instance.getConnectionIds();
|
||||
final Map<ConnectionId,org.elasticsearch.protocol.xpack.graph.Connection> connectionMap= new LinkedHashMap<>(connectionIds.size());
|
||||
for (Connection.ConnectionId connectionId : connectionIds) {
|
||||
final Connection connection = instance.getConnection(connectionId);
|
||||
final ConnectionId connectionId1 = new ConnectionId(VERTEX_ID_FUNCTION.apply(connectionId.getSource()),
|
||||
VERTEX_ID_FUNCTION.apply(connectionId.getTarget()));
|
||||
final org.elasticsearch.protocol.xpack.graph.Connection connection1 = new org.elasticsearch.protocol.xpack.graph.Connection(
|
||||
VERTEX_FUNCTION.apply(connection.getFrom()), VERTEX_FUNCTION.apply(connection.getTo()), connection.getWeight(),
|
||||
connection.getDocCount());
|
||||
connectionMap.put(connectionId1, connection1);
|
||||
}
|
||||
return connectionMap;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected org.elasticsearch.protocol.xpack.graph.GraphExploreResponse createTestInstance() {
|
||||
return createInstance(0);
|
||||
}
|
||||
|
||||
private static org.elasticsearch.protocol.xpack.graph.GraphExploreResponse createInstance(int numFailures) {
|
||||
int numItems = randomIntBetween(4, 128);
|
||||
boolean timedOut = randomBoolean();
|
||||
boolean showDetails = randomBoolean();
|
||||
long overallTookInMillis = randomNonNegativeLong();
|
||||
Map<org.elasticsearch.protocol.xpack.graph.Vertex.VertexId, org.elasticsearch.protocol.xpack.graph.Vertex> vertices =
|
||||
new HashMap<>();
|
||||
Map<ConnectionId,
|
||||
org.elasticsearch.protocol.xpack.graph.Connection> connections = new HashMap<>();
|
||||
ShardOperationFailedException [] failures = new ShardOperationFailedException [numFailures];
|
||||
for (int i = 0; i < failures.length; i++) {
|
||||
failures[i] = new ShardSearchFailure(new ElasticsearchException("an error"));
|
||||
}
|
||||
|
||||
//Create random set of vertices
|
||||
for (int i = 0; i < numItems; i++) {
|
||||
org.elasticsearch.protocol.xpack.graph.Vertex v = new org.elasticsearch.protocol.xpack.graph.Vertex("field1",
|
||||
randomAlphaOfLength(5), randomDouble(), 0,
|
||||
showDetails? randomIntBetween(100, 200):0,
|
||||
showDetails? randomIntBetween(1, 100):0);
|
||||
vertices.put(v.getId(), v);
|
||||
}
|
||||
|
||||
//Wire up half the vertices randomly
|
||||
org.elasticsearch.protocol.xpack.graph.Vertex[] vs =
|
||||
vertices.values().toArray(new org.elasticsearch.protocol.xpack.graph.Vertex[vertices.size()]);
|
||||
for (int i = 0; i < numItems/2; i++) {
|
||||
org.elasticsearch.protocol.xpack.graph.Vertex v1 = vs[randomIntBetween(0, vs.length-1)];
|
||||
org.elasticsearch.protocol.xpack.graph.Vertex v2 = vs[randomIntBetween(0, vs.length-1)];
|
||||
if(v1 != v2) {
|
||||
org.elasticsearch.protocol.xpack.graph.Connection conn = new org.elasticsearch.protocol.xpack.graph.Connection(v1, v2,
|
||||
randomDouble(), randomLongBetween(1, 10));
|
||||
connections.put(conn.getId(), conn);
|
||||
}
|
||||
}
|
||||
return new org.elasticsearch.protocol.xpack.graph.GraphExploreResponse(overallTookInMillis, timedOut, failures,
|
||||
vertices, connections, showDetails);
|
||||
}
|
||||
|
||||
|
||||
private static org.elasticsearch.protocol.xpack.graph.GraphExploreResponse createTestInstanceWithFailures() {
|
||||
return createInstance(randomIntBetween(1, 128));
|
||||
}
|
||||
|
||||
@Override
|
||||
protected org.elasticsearch.protocol.xpack.graph.GraphExploreResponse doParseInstance(XContentParser parser) throws IOException {
|
||||
return org.elasticsearch.protocol.xpack.graph.GraphExploreResponse.fromXContent(parser);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected boolean supportsUnknownFields() {
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected boolean assertToXContentEquivalence() {
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected String[] getShuffleFieldsExceptions() {
|
||||
return new String[]{"vertices", "connections"};
|
||||
}
|
||||
|
||||
protected Predicate<String> getRandomFieldsExcludeFilterWhenResultHasErrors() {
|
||||
return field -> field.startsWith("responses");
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void assertEqualInstances(org.elasticsearch.protocol.xpack.graph.GraphExploreResponse expectedInstance,
|
||||
org.elasticsearch.protocol.xpack.graph.GraphExploreResponse newInstance) {
|
||||
Assert.assertThat(newInstance.getTook(), equalTo(expectedInstance.getTook()));
|
||||
Assert.assertThat(newInstance.isTimedOut(), equalTo(expectedInstance.isTimedOut()));
|
||||
|
||||
Comparator<org.elasticsearch.protocol.xpack.graph.Connection> connComparator =
|
||||
Comparator.comparing(o -> o.getId().toString());
|
||||
org.elasticsearch.protocol.xpack.graph.Connection[] newConns =
|
||||
newInstance.getConnections().toArray(new org.elasticsearch.protocol.xpack.graph.Connection[0]);
|
||||
org.elasticsearch.protocol.xpack.graph.Connection[] expectedConns =
|
||||
expectedInstance.getConnections().toArray(new org.elasticsearch.protocol.xpack.graph.Connection[0]);
|
||||
Arrays.sort(newConns, connComparator);
|
||||
Arrays.sort(expectedConns, connComparator);
|
||||
Assert.assertArrayEquals(expectedConns, newConns);
|
||||
|
||||
//Sort the vertices lists before equality test (map insertion sequences can cause order differences)
|
||||
Comparator<org.elasticsearch.protocol.xpack.graph.Vertex> comparator = Comparator.comparing(o -> o.getId().toString());
|
||||
org.elasticsearch.protocol.xpack.graph.Vertex[] newVertices =
|
||||
newInstance.getVertices().toArray(new org.elasticsearch.protocol.xpack.graph.Vertex[0]);
|
||||
org.elasticsearch.protocol.xpack.graph.Vertex[] expectedVertices =
|
||||
expectedInstance.getVertices().toArray(new org.elasticsearch.protocol.xpack.graph.Vertex[0]);
|
||||
Arrays.sort(newVertices, comparator);
|
||||
Arrays.sort(expectedVertices, comparator);
|
||||
Assert.assertArrayEquals(expectedVertices, newVertices);
|
||||
|
||||
ShardOperationFailedException[] newFailures = newInstance.getShardFailures();
|
||||
ShardOperationFailedException[] expectedFailures = expectedInstance.getShardFailures();
|
||||
Assert.assertEquals(expectedFailures.length, newFailures.length);
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* Test parsing {@link org.elasticsearch.protocol.xpack.graph.GraphExploreResponse} with inner failures as they
|
||||
* don't support asserting on xcontent equivalence, given exceptions are not parsed back as the same original class.
|
||||
* We run the usual {@link AbstractXContentTestCase#testFromXContent()} without failures, and this other test with
|
||||
* failures where we disable asserting on xcontent equivalence at the end.
|
||||
*/
|
||||
public void testFromXContentWithFailures() throws IOException {
|
||||
Supplier<org.elasticsearch.protocol.xpack.graph.GraphExploreResponse> instanceSupplier =
|
||||
GraphExploreResponseTests::createTestInstanceWithFailures;
|
||||
//with random fields insertion in the inner exceptions, some random stuff may be parsed back as metadata,
|
||||
//but that does not bother our assertions, as we only want to test that we don't break.
|
||||
boolean supportsUnknownFields = true;
|
||||
//exceptions are not of the same type whenever parsed back
|
||||
boolean assertToXContentEquivalence = false;
|
||||
AbstractXContentTestCase.testFromXContent(
|
||||
AbstractXContentTestCase.NUMBER_OF_TEST_RUNS, instanceSupplier, supportsUnknownFields, getShuffleFieldsExceptions(),
|
||||
getRandomFieldsExcludeFilterWhenResultHasErrors(), this::createParser, this::doParseInstance,
|
||||
this::assertEqualInstances, assertToXContentEquivalence, ToXContent.EMPTY_PARAMS);
|
||||
}
|
||||
|
||||
}
|
|
@ -1,14 +1,25 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.protocol.xpack.license;
|
||||
package org.elasticsearch.client.license;
|
||||
|
||||
|
||||
import org.elasticsearch.client.license.GetBasicStatusResponse;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.protocol.AbstractHlrcStreamableXContentTestCase;
|
||||
import org.elasticsearch.client.AbstractHlrcStreamableXContentTestCase;
|
||||
|
||||
public class GetBasicStatusResponseTests
|
||||
extends AbstractHlrcStreamableXContentTestCase<org.elasticsearch.license.GetBasicStatusResponse, GetBasicStatusResponse> {
|
|
@ -1,13 +1,25 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.protocol.xpack.license;
|
||||
package org.elasticsearch.client.license;
|
||||
|
||||
import org.elasticsearch.client.license.GetTrialStatusResponse;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.protocol.AbstractHlrcStreamableXContentTestCase;
|
||||
import org.elasticsearch.client.AbstractHlrcStreamableXContentTestCase;
|
||||
|
||||
public class GetTrialStatusResponseTests extends
|
||||
AbstractHlrcStreamableXContentTestCase<org.elasticsearch.license.GetTrialStatusResponse, GetTrialStatusResponse> {
|
|
@ -0,0 +1,38 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.client.license;
|
||||
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
|
||||
import static org.hamcrest.CoreMatchers.equalTo;
|
||||
|
||||
public class LicenseStatusTests extends ESTestCase {
|
||||
|
||||
public void testCompatibility() {
|
||||
final LicenseStatus[] values = LicenseStatus.values();
|
||||
final LicenseStatus[] hlrcValues = LicenseStatus.values();
|
||||
|
||||
assertThat(values.length, equalTo(hlrcValues.length));
|
||||
|
||||
for (LicenseStatus value : values) {
|
||||
final LicenseStatus licenseStatus = LicenseStatus.fromString(value.label());
|
||||
assertThat(licenseStatus.label(), equalTo(value.label()));
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,140 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.client.license;
|
||||
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.client.AbstractHlrcStreamableXContentTestCase;
|
||||
import org.elasticsearch.protocol.xpack.license.LicensesStatus;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
import java.util.function.Function;
|
||||
import java.util.function.Predicate;
|
||||
|
||||
public class PutLicenseResponseTests extends AbstractHlrcStreamableXContentTestCase<
|
||||
org.elasticsearch.protocol.xpack.license.PutLicenseResponse, org.elasticsearch.client.license.PutLicenseResponse> {
|
||||
|
||||
@Override
|
||||
public org.elasticsearch.client.license.PutLicenseResponse doHlrcParseInstance(XContentParser parser) throws IOException {
|
||||
return org.elasticsearch.client.license.PutLicenseResponse.fromXContent(parser);
|
||||
}
|
||||
|
||||
@Override
|
||||
public org.elasticsearch.protocol.xpack.license.PutLicenseResponse convertHlrcToInternal(
|
||||
org.elasticsearch.client.license.PutLicenseResponse instance) {
|
||||
return new org.elasticsearch.protocol.xpack.license.PutLicenseResponse(instance.isAcknowledged(),
|
||||
org.elasticsearch.protocol.xpack.license.LicensesStatus.valueOf(instance.status().name()),
|
||||
instance.acknowledgeHeader(), instance.acknowledgeMessages());
|
||||
}
|
||||
|
||||
@Override
|
||||
protected boolean supportsUnknownFields() {
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Predicate<String> getRandomFieldsExcludeFilter() {
|
||||
// The structure of the response is such that unknown fields inside acknowledge cannot be supported since they
|
||||
// are treated as messages from new services
|
||||
return p -> p.startsWith("acknowledge");
|
||||
}
|
||||
|
||||
@Override
|
||||
protected org.elasticsearch.protocol.xpack.license.PutLicenseResponse createTestInstance() {
|
||||
boolean acknowledged = randomBoolean();
|
||||
org.elasticsearch.protocol.xpack.license.LicensesStatus status =
|
||||
randomFrom(org.elasticsearch.protocol.xpack.license.LicensesStatus.VALID,
|
||||
org.elasticsearch.protocol.xpack.license.LicensesStatus.INVALID,
|
||||
org.elasticsearch.protocol.xpack.license.LicensesStatus.EXPIRED);
|
||||
String messageHeader;
|
||||
Map<String, String[]> ackMessages;
|
||||
if (randomBoolean()) {
|
||||
messageHeader = randomAlphaOfLength(10);
|
||||
ackMessages = randomAckMessages();
|
||||
} else {
|
||||
messageHeader = null;
|
||||
ackMessages = Collections.emptyMap();
|
||||
}
|
||||
|
||||
return new org.elasticsearch.protocol.xpack.license.PutLicenseResponse(acknowledged, status, messageHeader, ackMessages);
|
||||
}
|
||||
|
||||
private static Map<String, String[]> randomAckMessages() {
|
||||
int nFeatures = randomIntBetween(1, 5);
|
||||
|
||||
Map<String, String[]> ackMessages = new HashMap<>();
|
||||
|
||||
for (int i = 0; i < nFeatures; i++) {
|
||||
String feature = randomAlphaOfLengthBetween(9, 15);
|
||||
int nMessages = randomIntBetween(1, 5);
|
||||
String[] messages = new String[nMessages];
|
||||
for (int j = 0; j < nMessages; j++) {
|
||||
messages[j] = randomAlphaOfLengthBetween(10, 30);
|
||||
}
|
||||
ackMessages.put(feature, messages);
|
||||
}
|
||||
|
||||
return ackMessages;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected org.elasticsearch.protocol.xpack.license.PutLicenseResponse createBlankInstance() {
|
||||
return new org.elasticsearch.protocol.xpack.license.PutLicenseResponse();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected org.elasticsearch.protocol.xpack.license.PutLicenseResponse mutateInstance(
|
||||
org.elasticsearch.protocol.xpack.license.PutLicenseResponse response) {
|
||||
@SuppressWarnings("unchecked")
|
||||
Function<org.elasticsearch.protocol.xpack.license.PutLicenseResponse,
|
||||
org.elasticsearch.protocol.xpack.license.PutLicenseResponse> mutator = randomFrom(
|
||||
r -> new org.elasticsearch.protocol.xpack.license.PutLicenseResponse(
|
||||
r.isAcknowledged() == false,
|
||||
r.status(),
|
||||
r.acknowledgeHeader(),
|
||||
r.acknowledgeMessages()),
|
||||
r -> new org.elasticsearch.protocol.xpack.license.PutLicenseResponse(
|
||||
r.isAcknowledged(),
|
||||
mutateStatus(r.status()),
|
||||
r.acknowledgeHeader(),
|
||||
r.acknowledgeMessages()),
|
||||
r -> {
|
||||
if (r.acknowledgeMessages().isEmpty()) {
|
||||
return new org.elasticsearch.protocol.xpack.license.PutLicenseResponse(
|
||||
r.isAcknowledged(),
|
||||
r.status(),
|
||||
randomAlphaOfLength(10),
|
||||
randomAckMessages()
|
||||
);
|
||||
} else {
|
||||
return new org.elasticsearch.protocol.xpack.license.PutLicenseResponse(r.isAcknowledged(), r.status());
|
||||
}
|
||||
}
|
||||
|
||||
);
|
||||
return mutator.apply(response);
|
||||
}
|
||||
|
||||
private org.elasticsearch.protocol.xpack.license.LicensesStatus mutateStatus(
|
||||
org.elasticsearch.protocol.xpack.license.LicensesStatus status) {
|
||||
return randomValueOtherThan(status, () -> randomFrom(LicensesStatus.values()));
|
||||
}
|
||||
}
|
|
@ -1,14 +1,27 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.protocol.xpack.license;
|
||||
package org.elasticsearch.client.license;
|
||||
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.license.PostStartBasicResponse;
|
||||
import org.elasticsearch.protocol.AbstractHlrcStreamableXContentTestCase;
|
||||
import org.elasticsearch.client.AbstractHlrcStreamableXContentTestCase;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Collections;
|
|
@ -1,13 +1,25 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.xpack.core.ml.action;
|
||||
package org.elasticsearch.client.ml;
|
||||
|
||||
import org.elasticsearch.client.ml.MlInfoResponse;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.protocol.AbstractHlrcStreamableXContentTestCase;
|
||||
import org.elasticsearch.client.AbstractHlrcStreamableXContentTestCase;
|
||||
import org.elasticsearch.xpack.core.ml.action.MlInfoAction.Response;
|
||||
|
||||
import java.io.IOException;
|
|
@ -0,0 +1,82 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.client.ml;
|
||||
|
||||
import com.carrotsearch.randomizedtesting.generators.CodepointSetGenerator;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.client.AbstractHlrcStreamableXContentTestCase;
|
||||
import org.elasticsearch.xpack.core.ml.action.PutCalendarAction;
|
||||
import org.elasticsearch.xpack.core.ml.calendars.Calendar;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
|
||||
public class PutCalendarActionResponseTests
|
||||
extends AbstractHlrcStreamableXContentTestCase<PutCalendarAction.Response, PutCalendarResponse> {
|
||||
|
||||
@Override
|
||||
protected PutCalendarAction.Response createTestInstance() {
|
||||
return new PutCalendarAction.Response(testInstance());
|
||||
}
|
||||
|
||||
@Override
|
||||
protected PutCalendarAction.Response doParseInstance(XContentParser parser) throws IOException {
|
||||
return new PutCalendarAction.Response(Calendar.LENIENT_PARSER.parse(parser, null).build());
|
||||
}
|
||||
|
||||
@Override
|
||||
protected boolean supportsUnknownFields() {
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public PutCalendarResponse doHlrcParseInstance(XContentParser parser) throws IOException {
|
||||
return PutCalendarResponse.fromXContent(parser);
|
||||
}
|
||||
|
||||
@Override
|
||||
public PutCalendarAction.Response convertHlrcToInternal(PutCalendarResponse instance) {
|
||||
org.elasticsearch.client.ml.calendars.Calendar hlrcCalendar = instance.getCalendar();
|
||||
Calendar internalCalendar = new Calendar(hlrcCalendar.getId(), hlrcCalendar.getJobIds(), hlrcCalendar.getDescription());
|
||||
return new PutCalendarAction.Response(internalCalendar);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected PutCalendarAction.Response createBlankInstance() {
|
||||
return new PutCalendarAction.Response();
|
||||
}
|
||||
|
||||
public static Calendar testInstance() {
|
||||
return testInstance(new CodepointSetGenerator("abcdefghijklmnopqrstuvwxyz".toCharArray()).ofCodePointsLength(random(), 10, 10));
|
||||
}
|
||||
|
||||
public static Calendar testInstance(String calendarId) {
|
||||
int size = randomInt(10);
|
||||
List<String> items = new ArrayList<>(size);
|
||||
for (int i = 0; i < size; i++) {
|
||||
items.add(randomAlphaOfLengthBetween(1, 20));
|
||||
}
|
||||
String description = null;
|
||||
if (randomBoolean()) {
|
||||
description = randomAlphaOfLength(20);
|
||||
}
|
||||
return new Calendar(calendarId, items, description);
|
||||
}
|
||||
}
|
|
@ -1,12 +1,27 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.xpack.core.security.action.user;
|
||||
package org.elasticsearch.client.security.hlrc;
|
||||
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.client.security.HasPrivilegesResponse;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.collect.MapBuilder;
|
||||
import org.elasticsearch.common.io.stream.BytesStreamOutput;
|
||||
|
@ -16,10 +31,11 @@ import org.elasticsearch.common.xcontent.ToXContent;
|
|||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.protocol.AbstractHlrcStreamableXContentTestCase;
|
||||
import org.elasticsearch.client.AbstractHlrcStreamableXContentTestCase;
|
||||
import org.elasticsearch.test.VersionUtils;
|
||||
import org.elasticsearch.xpack.core.security.authz.permission.ResourcePrivileges;
|
||||
import org.hamcrest.Matchers;
|
||||
import org.junit.Assert;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
|
@ -35,32 +51,36 @@ import java.util.stream.Collectors;
|
|||
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
|
||||
public class HasPrivilegesResponseTests
|
||||
extends AbstractHlrcStreamableXContentTestCase<HasPrivilegesResponse, org.elasticsearch.client.security.HasPrivilegesResponse> {
|
||||
public class HasPrivilegesResponseTests extends AbstractHlrcStreamableXContentTestCase<
|
||||
org.elasticsearch.xpack.core.security.action.user.HasPrivilegesResponse,
|
||||
HasPrivilegesResponse> {
|
||||
|
||||
public void testSerializationV64OrV65() throws IOException {
|
||||
final HasPrivilegesResponse original = randomResponse();
|
||||
final Version version = VersionUtils.randomVersionBetween(random(), Version.V_6_4_0, Version.V_6_5_1);
|
||||
final HasPrivilegesResponse copy = serializeAndDeserialize(original, version);
|
||||
final org.elasticsearch.xpack.core.security.action.user.HasPrivilegesResponse original = randomResponse();
|
||||
final Version version = VersionUtils.randomVersionBetween(LuceneTestCase.random(), Version.V_6_4_0, Version.V_6_5_1);
|
||||
final org.elasticsearch.xpack.core.security.action.user.HasPrivilegesResponse copy = serializeAndDeserialize(original, version);
|
||||
|
||||
assertThat(copy.isCompleteMatch(), equalTo(original.isCompleteMatch()));
|
||||
assertThat(copy.getClusterPrivileges().entrySet(), Matchers.emptyIterable());
|
||||
assertThat(copy.getIndexPrivileges(), equalTo(original.getIndexPrivileges()));
|
||||
assertThat(copy.getApplicationPrivileges(), equalTo(original.getApplicationPrivileges()));
|
||||
Assert.assertThat(copy.isCompleteMatch(), equalTo(original.isCompleteMatch()));
|
||||
Assert.assertThat(copy.getClusterPrivileges().entrySet(), Matchers.emptyIterable());
|
||||
Assert.assertThat(copy.getIndexPrivileges(), equalTo(original.getIndexPrivileges()));
|
||||
Assert.assertThat(copy.getApplicationPrivileges(), equalTo(original.getApplicationPrivileges()));
|
||||
}
|
||||
|
||||
public void testSerializationV63() throws IOException {
|
||||
final HasPrivilegesResponse original = randomResponse();
|
||||
final HasPrivilegesResponse copy = serializeAndDeserialize(original, Version.V_6_3_0);
|
||||
final org.elasticsearch.xpack.core.security.action.user.HasPrivilegesResponse original = randomResponse();
|
||||
final org.elasticsearch.xpack.core.security.action.user.HasPrivilegesResponse copy =
|
||||
serializeAndDeserialize(original, Version.V_6_3_0);
|
||||
|
||||
assertThat(copy.isCompleteMatch(), equalTo(original.isCompleteMatch()));
|
||||
assertThat(copy.getClusterPrivileges().entrySet(), Matchers.emptyIterable());
|
||||
assertThat(copy.getIndexPrivileges(), equalTo(original.getIndexPrivileges()));
|
||||
assertThat(copy.getApplicationPrivileges(), equalTo(Collections.emptyMap()));
|
||||
Assert.assertThat(copy.isCompleteMatch(), equalTo(original.isCompleteMatch()));
|
||||
Assert.assertThat(copy.getClusterPrivileges().entrySet(), Matchers.emptyIterable());
|
||||
Assert.assertThat(copy.getIndexPrivileges(), equalTo(original.getIndexPrivileges()));
|
||||
Assert.assertThat(copy.getApplicationPrivileges(), equalTo(Collections.emptyMap()));
|
||||
}
|
||||
|
||||
public void testToXContent() throws Exception {
|
||||
final HasPrivilegesResponse response = new HasPrivilegesResponse("daredevil", false, Collections.singletonMap("manage", true),
|
||||
final org.elasticsearch.xpack.core.security.action.user.HasPrivilegesResponse response =
|
||||
new org.elasticsearch.xpack.core.security.action.user.HasPrivilegesResponse("daredevil",
|
||||
false, Collections.singletonMap("manage", true),
|
||||
Arrays.asList(
|
||||
ResourcePrivileges.builder("staff")
|
||||
.addPrivileges(MapBuilder.<String, Boolean>newMapBuilder(new LinkedHashMap<>()).put("read", true)
|
||||
|
@ -77,7 +97,7 @@ public class HasPrivilegesResponseTests
|
|||
BytesReference bytes = BytesReference.bytes(builder);
|
||||
|
||||
final String json = bytes.utf8ToString();
|
||||
assertThat(json, equalTo("{" +
|
||||
Assert.assertThat(json, equalTo("{" +
|
||||
"\"username\":\"daredevil\"," +
|
||||
"\"has_all_requested\":false," +
|
||||
"\"cluster\":{\"manage\":true}," +
|
||||
|
@ -96,23 +116,23 @@ public class HasPrivilegesResponseTests
|
|||
}
|
||||
|
||||
@Override
|
||||
protected HasPrivilegesResponse createBlankInstance() {
|
||||
return new HasPrivilegesResponse();
|
||||
protected org.elasticsearch.xpack.core.security.action.user.HasPrivilegesResponse createBlankInstance() {
|
||||
return new org.elasticsearch.xpack.core.security.action.user.HasPrivilegesResponse();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected HasPrivilegesResponse createTestInstance() {
|
||||
protected org.elasticsearch.xpack.core.security.action.user.HasPrivilegesResponse createTestInstance() {
|
||||
return randomResponse();
|
||||
}
|
||||
|
||||
@Override
|
||||
public org.elasticsearch.client.security.HasPrivilegesResponse doHlrcParseInstance(XContentParser parser) throws IOException {
|
||||
return org.elasticsearch.client.security.HasPrivilegesResponse.fromXContent(parser);
|
||||
public HasPrivilegesResponse doHlrcParseInstance(XContentParser parser) throws IOException {
|
||||
return HasPrivilegesResponse.fromXContent(parser);
|
||||
}
|
||||
|
||||
@Override
|
||||
public HasPrivilegesResponse convertHlrcToInternal(org.elasticsearch.client.security.HasPrivilegesResponse hlrc) {
|
||||
return new HasPrivilegesResponse(
|
||||
public org.elasticsearch.xpack.core.security.action.user.HasPrivilegesResponse convertHlrcToInternal(HasPrivilegesResponse hlrc) {
|
||||
return new org.elasticsearch.xpack.core.security.action.user.HasPrivilegesResponse(
|
||||
hlrc.getUsername(),
|
||||
hlrc.hasAllRequested(),
|
||||
hlrc.getClusterPrivileges(),
|
||||
|
@ -128,21 +148,23 @@ public class HasPrivilegesResponseTests
|
|||
.collect(Collectors.toList());
|
||||
}
|
||||
|
||||
private HasPrivilegesResponse serializeAndDeserialize(HasPrivilegesResponse original, Version version) throws IOException {
|
||||
private org.elasticsearch.xpack.core.security.action.user.HasPrivilegesResponse serializeAndDeserialize(
|
||||
org.elasticsearch.xpack.core.security.action.user.HasPrivilegesResponse original, Version version) throws IOException {
|
||||
logger.info("Test serialize/deserialize with version {}", version);
|
||||
final BytesStreamOutput out = new BytesStreamOutput();
|
||||
out.setVersion(version);
|
||||
original.writeTo(out);
|
||||
|
||||
final HasPrivilegesResponse copy = new HasPrivilegesResponse();
|
||||
final org.elasticsearch.xpack.core.security.action.user.HasPrivilegesResponse copy =
|
||||
new org.elasticsearch.xpack.core.security.action.user.HasPrivilegesResponse();
|
||||
final StreamInput in = out.bytes().streamInput();
|
||||
in.setVersion(version);
|
||||
copy.readFrom(in);
|
||||
assertThat(in.read(), equalTo(-1));
|
||||
Assert.assertThat(in.read(), equalTo(-1));
|
||||
return copy;
|
||||
}
|
||||
|
||||
private HasPrivilegesResponse randomResponse() {
|
||||
private org.elasticsearch.xpack.core.security.action.user.HasPrivilegesResponse randomResponse() {
|
||||
final String username = randomAlphaOfLengthBetween(4, 12);
|
||||
final Map<String, Boolean> cluster = new HashMap<>();
|
||||
for (String priv : randomArray(1, 6, String[]::new, () -> randomAlphaOfLengthBetween(3, 12))) {
|
||||
|
@ -150,16 +172,19 @@ public class HasPrivilegesResponseTests
|
|||
}
|
||||
final Collection<ResourcePrivileges> index = randomResourcePrivileges();
|
||||
final Map<String, Collection<ResourcePrivileges>> application = new HashMap<>();
|
||||
for (String app : randomArray(1, 3, String[]::new, () -> randomAlphaOfLengthBetween(3, 6).toLowerCase(Locale.ROOT))) {
|
||||
for (String app : randomArray(1, 3, String[]::new,
|
||||
() -> randomAlphaOfLengthBetween(3, 6).toLowerCase(Locale.ROOT))) {
|
||||
application.put(app, randomResourcePrivileges());
|
||||
}
|
||||
return new HasPrivilegesResponse(username, randomBoolean(), cluster, index, application);
|
||||
return new org.elasticsearch.xpack.core.security.action.user.HasPrivilegesResponse(username, randomBoolean(),
|
||||
cluster, index, application);
|
||||
}
|
||||
|
||||
private Collection<ResourcePrivileges> randomResourcePrivileges() {
|
||||
final Collection<ResourcePrivileges> list = new ArrayList<>();
|
||||
// Use hash set to force a unique set of resources
|
||||
for (String resource : Sets.newHashSet(randomArray(1, 3, String[]::new, () -> randomAlphaOfLengthBetween(2, 6)))) {
|
||||
for (String resource : Sets.newHashSet(randomArray(1, 3, String[]::new,
|
||||
() -> randomAlphaOfLengthBetween(2, 6)))) {
|
||||
final Map<String, Boolean> privileges = new HashMap<>();
|
||||
for (String priv : randomArray(1, 5, String[]::new, () -> randomAlphaOfLengthBetween(3, 8))) {
|
||||
privileges.put(priv, randomBoolean());
|
|
@ -1,10 +1,24 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.protocol.xpack.watcher;
|
||||
package org.elasticsearch.client.watcher;
|
||||
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.xcontent.DeprecationHandler;
|
||||
import org.elasticsearch.common.xcontent.NamedXContentRegistry;
|
||||
|
@ -14,7 +28,7 @@ import org.elasticsearch.common.xcontent.XContentBuilder;
|
|||
import org.elasticsearch.common.xcontent.XContentFactory;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.protocol.AbstractHlrcStreamableXContentTestCase;
|
||||
import org.elasticsearch.client.AbstractHlrcStreamableXContentTestCase;
|
||||
import org.elasticsearch.xpack.core.watcher.actions.ActionStatus;
|
||||
import org.elasticsearch.xpack.core.watcher.execution.ExecutionState;
|
||||
import org.elasticsearch.xpack.core.watcher.support.xcontent.XContentSource;
|
||||
|
@ -90,7 +104,7 @@ public class GetWatchResponseTests extends
|
|||
@Override
|
||||
protected GetWatchResponse createTestInstance() {
|
||||
String id = randomAlphaOfLength(10);
|
||||
if (rarely()) {
|
||||
if (LuceneTestCase.rarely()) {
|
||||
return new GetWatchResponse(id);
|
||||
}
|
||||
long version = randomLongBetween(0, 10);
|
||||
|
@ -128,8 +142,8 @@ public class GetWatchResponseTests extends
|
|||
long version = randomLongBetween(-1, Long.MAX_VALUE);
|
||||
WatchStatus.State state = new WatchStatus.State(randomBoolean(), nowWithMillisResolution());
|
||||
ExecutionState executionState = randomFrom(ExecutionState.values());
|
||||
ZonedDateTime lastChecked = rarely() ? null : nowWithMillisResolution();
|
||||
ZonedDateTime lastMetCondition = rarely() ? null : nowWithMillisResolution();
|
||||
ZonedDateTime lastChecked = LuceneTestCase.rarely() ? null : nowWithMillisResolution();
|
||||
ZonedDateTime lastMetCondition = LuceneTestCase.rarely() ? null : nowWithMillisResolution();
|
||||
int size = randomIntBetween(0, 5);
|
||||
Map<String, ActionStatus> actionMap = new HashMap<>();
|
||||
for (int i = 0; i < size; i++) {
|
|
@ -0,0 +1,58 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.client.watcher.hlrc;
|
||||
|
||||
import org.elasticsearch.client.watcher.DeleteWatchResponse;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.client.AbstractHlrcXContentTestCase;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
public class DeleteWatchResponseTests extends AbstractHlrcXContentTestCase<
|
||||
org.elasticsearch.protocol.xpack.watcher.DeleteWatchResponse, DeleteWatchResponse> {
|
||||
|
||||
@Override
|
||||
protected org.elasticsearch.protocol.xpack.watcher.DeleteWatchResponse createTestInstance() {
|
||||
String id = randomAlphaOfLength(10);
|
||||
long version = randomLongBetween(1, 10);
|
||||
boolean found = randomBoolean();
|
||||
return new org.elasticsearch.protocol.xpack.watcher.DeleteWatchResponse(id, version, found);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected org.elasticsearch.protocol.xpack.watcher.DeleteWatchResponse doParseInstance(XContentParser parser) throws IOException {
|
||||
return org.elasticsearch.protocol.xpack.watcher.DeleteWatchResponse.fromXContent(parser);
|
||||
}
|
||||
|
||||
@Override
|
||||
public DeleteWatchResponse doHlrcParseInstance(XContentParser parser) throws IOException {
|
||||
return DeleteWatchResponse.fromXContent(parser);
|
||||
}
|
||||
|
||||
@Override
|
||||
public org.elasticsearch.protocol.xpack.watcher.DeleteWatchResponse convertHlrcToInternal(DeleteWatchResponse instance) {
|
||||
return new org.elasticsearch.protocol.xpack.watcher.DeleteWatchResponse(instance.getId(), instance.getVersion(),
|
||||
instance.isFound());
|
||||
}
|
||||
|
||||
@Override
|
||||
protected boolean supportsUnknownFields() {
|
||||
return false;
|
||||
}
|
||||
}
|
|
@ -1,17 +1,30 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.protocol.xpack.watcher;
|
||||
package org.elasticsearch.client.watcher.hlrc;
|
||||
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentFactory;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.protocol.AbstractHlrcXContentTestCase;
|
||||
import org.elasticsearch.client.AbstractHlrcXContentTestCase;
|
||||
import org.elasticsearch.xpack.core.watcher.transport.actions.execute.ExecuteWatchResponse;
|
||||
|
||||
import java.io.IOException;
|
|
@ -0,0 +1,60 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.client.watcher.hlrc;
|
||||
|
||||
import org.elasticsearch.client.watcher.PutWatchResponse;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.client.AbstractHlrcXContentTestCase;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
public class PutWatchResponseTests extends AbstractHlrcXContentTestCase<
|
||||
org.elasticsearch.protocol.xpack.watcher.PutWatchResponse, PutWatchResponse> {
|
||||
|
||||
@Override
|
||||
protected org.elasticsearch.protocol.xpack.watcher.PutWatchResponse createTestInstance() {
|
||||
String id = randomAlphaOfLength(10);
|
||||
long seqNo = randomNonNegativeLong();
|
||||
long primaryTerm = randomLongBetween(1, 20);
|
||||
long version = randomLongBetween(1, 10);
|
||||
boolean created = randomBoolean();
|
||||
return new org.elasticsearch.protocol.xpack.watcher.PutWatchResponse(id, version, seqNo, primaryTerm, created);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected org.elasticsearch.protocol.xpack.watcher.PutWatchResponse doParseInstance(XContentParser parser) throws IOException {
|
||||
return org.elasticsearch.protocol.xpack.watcher.PutWatchResponse.fromXContent(parser);
|
||||
}
|
||||
|
||||
@Override
|
||||
public PutWatchResponse doHlrcParseInstance(XContentParser parser) throws IOException {
|
||||
return org.elasticsearch.client.watcher.PutWatchResponse.fromXContent(parser);
|
||||
}
|
||||
|
||||
@Override
|
||||
public org.elasticsearch.protocol.xpack.watcher.PutWatchResponse convertHlrcToInternal(PutWatchResponse instance) {
|
||||
return new org.elasticsearch.protocol.xpack.watcher.PutWatchResponse(instance.getId(), instance.getVersion(),
|
||||
instance.getSeqNo(), instance.getPrimaryTerm(), instance.isCreated());
|
||||
}
|
||||
|
||||
@Override
|
||||
protected boolean supportsUnknownFields() {
|
||||
return false;
|
||||
}
|
||||
}
|
|
@ -53,4 +53,4 @@ dependenciesInfo.enabled = false
|
|||
|
||||
//we aren't releasing this jar
|
||||
thirdPartyAudit.enabled = false
|
||||
unitTest.enabled = false
|
||||
test.enabled = false
|
||||
|
|
|
@ -26,7 +26,7 @@ integTestRunner {
|
|||
* when running against an external cluster.
|
||||
*/
|
||||
if (System.getProperty("tests.rest.cluster") == null) {
|
||||
systemProperty 'tests.logfile',
|
||||
nonInputProperties.systemProperty 'tests.logfile',
|
||||
"${ -> integTest.nodes[0].homeDir}/logs/${ -> integTest.nodes[0].clusterName }_server.json"
|
||||
} else {
|
||||
systemProperty 'tests.logfile', '--external--'
|
||||
|
|
|
@ -120,46 +120,8 @@ bwcVersions.forPreviousUnreleased { BwcVersions.UnreleasedVersionInfo unreleased
|
|||
}
|
||||
}
|
||||
|
||||
Map<String, File> artifactFiles = [:]
|
||||
List<String> projectDirs = []
|
||||
List<String> projects = ['deb', 'rpm']
|
||||
if (bwcVersion.onOrAfter('7.0.0')) {
|
||||
projects.addAll(['windows-zip', 'darwin-tar', 'linux-tar'])
|
||||
} else {
|
||||
projects.add('zip')
|
||||
}
|
||||
|
||||
for (String projectName : projects) {
|
||||
String baseDir = "distribution"
|
||||
String classifier = ""
|
||||
String extension = projectName
|
||||
if (bwcVersion.onOrAfter('7.0.0') && (projectName.contains('zip') || projectName.contains('tar'))) {
|
||||
int index = projectName.indexOf('-')
|
||||
classifier = "-${projectName.substring(0, index)}-x86_64"
|
||||
extension = projectName.substring(index + 1)
|
||||
if (extension.equals('tar')) {
|
||||
extension += '.gz'
|
||||
}
|
||||
}
|
||||
if (bwcVersion.onOrAfter('7.0.0') && projectName.contains('deb')) {
|
||||
classifier = "-amd64"
|
||||
}
|
||||
if (bwcVersion.onOrAfter('7.0.0') && projectName.contains('rpm')) {
|
||||
classifier = "-x86_64"
|
||||
}
|
||||
if (bwcVersion.onOrAfter('6.3.0')) {
|
||||
baseDir += projectName.endsWith('zip') || projectName.endsWith('tar') ? '/archives' : '/packages'
|
||||
// add oss variant first
|
||||
projectDirs.add("${baseDir}/oss-${projectName}")
|
||||
artifactFiles.put("oss-" + projectName, file("${checkoutDir}/${baseDir}/oss-${projectName}/build/distributions/elasticsearch-oss-${bwcVersion}-SNAPSHOT${classifier}.${extension}"))
|
||||
}
|
||||
projectDirs.add("${baseDir}/${projectName}")
|
||||
artifactFiles.put(projectName,
|
||||
file("${checkoutDir}/${baseDir}/${projectName}/build/distributions/elasticsearch-${bwcVersion}-SNAPSHOT${classifier}.${extension}"))
|
||||
}
|
||||
|
||||
Closure createRunBwcGradleTask = { name, extraConfig ->
|
||||
task "$name"(type: Exec) {
|
||||
return tasks.create(name: "$name", type: Exec) {
|
||||
dependsOn checkoutBwcBranch, writeBuildMetadata
|
||||
workingDir = checkoutDir
|
||||
doFirst {
|
||||
|
@ -217,28 +179,75 @@ bwcVersions.forPreviousUnreleased { BwcVersions.UnreleasedVersionInfo unreleased
|
|||
}
|
||||
}
|
||||
|
||||
createRunBwcGradleTask("buildBwcVersion") {
|
||||
for (String dir : projectDirs) {
|
||||
args ":${dir.replace('/', ':')}:assemble"
|
||||
}
|
||||
doLast {
|
||||
List missing = artifactFiles.values().grep { file ->
|
||||
false == file.exists()
|
||||
}
|
||||
if (false == missing.empty) {
|
||||
throw new InvalidUserDataException("Building ${bwcVersion} didn't generate expected files ${missing}")
|
||||
}
|
||||
}
|
||||
Closure buildBwcTaskName = { projectName ->
|
||||
return "buildBwc${projectName.replaceAll(/-\w/){ it[1].toUpperCase() }.capitalize()}"
|
||||
}
|
||||
|
||||
task buildBwc {}
|
||||
|
||||
Closure createBuildBwcTask = { projectName, projectDir, projectArtifact ->
|
||||
Task bwcTask = createRunBwcGradleTask(buildBwcTaskName(projectName)) {
|
||||
args ":${projectDir.replace('/', ':')}:assemble"
|
||||
doLast {
|
||||
if (projectArtifact.exists() == false) {
|
||||
throw new InvalidUserDataException("Building ${bwcVersion} didn't generate expected file ${projectArtifact}")
|
||||
}
|
||||
}
|
||||
}
|
||||
buildBwc.dependsOn bwcTask
|
||||
}
|
||||
|
||||
Map<String, File> artifactFiles = [:]
|
||||
List<String> projectDirs = []
|
||||
List<String> projects = ['deb', 'rpm']
|
||||
if (bwcVersion.onOrAfter('7.0.0')) {
|
||||
projects.addAll(['windows-zip', 'darwin-tar', 'linux-tar'])
|
||||
} else {
|
||||
projects.add('zip')
|
||||
}
|
||||
|
||||
for (String projectName : projects) {
|
||||
String baseDir = "distribution"
|
||||
String classifier = ""
|
||||
String extension = projectName
|
||||
if (bwcVersion.onOrAfter('7.0.0') && (projectName.contains('zip') || projectName.contains('tar'))) {
|
||||
int index = projectName.indexOf('-')
|
||||
classifier = "-${projectName.substring(0, index)}-x86_64"
|
||||
extension = projectName.substring(index + 1)
|
||||
if (extension.equals('tar')) {
|
||||
extension += '.gz'
|
||||
}
|
||||
}
|
||||
if (bwcVersion.onOrAfter('7.0.0') && projectName.contains('deb')) {
|
||||
classifier = "-amd64"
|
||||
}
|
||||
if (bwcVersion.onOrAfter('7.0.0') && projectName.contains('rpm')) {
|
||||
classifier = "-x86_64"
|
||||
}
|
||||
if (bwcVersion.onOrAfter('6.3.0')) {
|
||||
baseDir += projectName.endsWith('zip') || projectName.endsWith('tar') ? '/archives' : '/packages'
|
||||
// add oss variant first
|
||||
projectDirs.add("${baseDir}/oss-${projectName}")
|
||||
File ossProjectArtifact = file("${checkoutDir}/${baseDir}/oss-${projectName}/build/distributions/elasticsearch-oss-${bwcVersion}-SNAPSHOT${classifier}.${extension}")
|
||||
artifactFiles.put("oss-" + projectName, ossProjectArtifact)
|
||||
createBuildBwcTask("oss-${projectName}", "${baseDir}/oss-${projectName}", ossProjectArtifact)
|
||||
}
|
||||
projectDirs.add("${baseDir}/${projectName}")
|
||||
File projectArtifact = file("${checkoutDir}/${baseDir}/${projectName}/build/distributions/elasticsearch-${bwcVersion}-SNAPSHOT${classifier}.${extension}")
|
||||
artifactFiles.put(projectName, projectArtifact)
|
||||
|
||||
createBuildBwcTask(projectName, "${baseDir}/${projectName}", projectArtifact)
|
||||
}
|
||||
|
||||
|
||||
createRunBwcGradleTask("resolveAllBwcDependencies") {
|
||||
args 'resolveAllDependencies'
|
||||
}
|
||||
|
||||
resolveAllDependencies.dependsOn resolveAllBwcDependencies
|
||||
|
||||
for (e in artifactFiles) {
|
||||
String projectName = e.key
|
||||
String buildBwcTask = buildBwcTaskName(projectName)
|
||||
File artifactFile = e.value
|
||||
String artifactFileName = artifactFile.name
|
||||
String artifactName = artifactFileName.contains('oss') ? 'elasticsearch-oss' : 'elasticsearch'
|
||||
|
@ -251,7 +260,7 @@ bwcVersions.forPreviousUnreleased { BwcVersions.UnreleasedVersionInfo unreleased
|
|||
}
|
||||
configurations.create(projectName)
|
||||
artifacts {
|
||||
it.add(projectName, [file: artifactFile, name: artifactName, classifier: classifier, type: suffix, builtBy: buildBwcVersion])
|
||||
it.add(projectName, [file: artifactFile, name: artifactName, classifier: classifier, type: suffix, builtBy: buildBwcTask])
|
||||
}
|
||||
}
|
||||
// make sure no dependencies were added to assemble; we want it to be a no-op
|
||||
|
|
|
@ -23,7 +23,7 @@ ext.expansions = { oss ->
|
|||
return [
|
||||
'elasticsearch' : elasticsearch,
|
||||
'license' : oss ? 'Apache-2.0' : 'Elastic License',
|
||||
'source_elasticsearch': local() ? "COPY $elasticsearch /opt/" : "RUN curl --retry 8 -s -L -O https://artifacts.elastic.co/downloads/elasticsearch/${elasticsearch}",
|
||||
'source_elasticsearch': local() ? "COPY $elasticsearch /opt/" : "RUN cd /opt && curl --retry 8 -s -L -O https://artifacts.elastic.co/downloads/elasticsearch/${elasticsearch} && cd -",
|
||||
'version' : VersionProperties.elasticsearch
|
||||
]
|
||||
}
|
||||
|
|
|
@ -7,7 +7,7 @@ forbiddenApisMain {
|
|||
replaceSignatureFiles 'jdk-signatures'
|
||||
}
|
||||
|
||||
unitTest.enabled = false
|
||||
test.enabled = false
|
||||
javadoc.enabled = false
|
||||
loggerUsageCheck.enabled = false
|
||||
jarHell.enabled = false
|
||||
|
|
|
@ -35,7 +35,7 @@ dependencyLicenses {
|
|||
mapping from: /bc.*/, to: 'bouncycastle'
|
||||
}
|
||||
|
||||
unitTest {
|
||||
test {
|
||||
// TODO: find a way to add permissions for the tests in this module
|
||||
systemProperty 'tests.security.manager', 'false'
|
||||
}
|
||||
|
|
|
@ -6,7 +6,7 @@ See: https://github.com/elastic/docs
|
|||
Snippets marked with `// CONSOLE` are automatically annotated with "VIEW IN
|
||||
CONSOLE" and "COPY AS CURL" in the documentation and are automatically tested
|
||||
by the command `gradle :docs:check`. To test just the docs from a single page,
|
||||
use e.g. `gradle :docs:check -Dtests.method="\*rollover*"`.
|
||||
use e.g. `./gradlew :docs:integTestRunner --tests "*rollover*"`.
|
||||
|
||||
NOTE: If you have an elasticsearch-extra folder alongside your elasticsearch
|
||||
folder, you must temporarily rename it when you are testing 6.3 or later branches.
|
||||
|
|
|
@ -152,7 +152,7 @@ Some settings are sensitive and must be stored in the
|
|||
|
||||
[source,sh]
|
||||
----
|
||||
bin/elasticsearch-keystore add-file gcs.client.default.credentials_file
|
||||
bin/elasticsearch-keystore add-file gcs.client.default.credentials_file /path/service-account.json
|
||||
----
|
||||
|
||||
The following are the available client settings. Those that must be stored in the keystore
|
||||
|
|
|
@ -278,7 +278,7 @@ POST /sales/_search?size=0
|
|||
"time_zone": "CET",
|
||||
"ranges": [
|
||||
{ "to": "2016/02/01" }, <1>
|
||||
{ "from": "2016/02/01", "to" : "now/d" <2>},
|
||||
{ "from": "2016/02/01", "to" : "now/d" }, <2>
|
||||
{ "from": "now/d" }
|
||||
]
|
||||
}
|
||||
|
|
|
@ -542,6 +542,12 @@ It is possible (although rarely required) to filter the values for which buckets
|
|||
`exclude` parameters which are based on a regular expression string or arrays of exact terms. This functionality mirrors the features
|
||||
described in the <<search-aggregations-bucket-terms-aggregation,terms aggregation>> documentation.
|
||||
|
||||
==== Collect mode
|
||||
|
||||
To avoid memory issues, the `significant_terms` aggregation always computes child aggregations in `breadth_first` mode.
|
||||
A description of the different collection modes can be found in the
|
||||
<<search-aggregations-bucket-terms-aggregation-collect, terms aggregation>> documentation.
|
||||
|
||||
==== Execution hint
|
||||
|
||||
There are different mechanisms by which terms aggregations can be executed:
|
||||
|
|
|
@ -775,6 +775,7 @@ fields, then use `copy_to` in your mapping to create a new dedicated field at
|
|||
index time which contains the values from both fields. You can aggregate on
|
||||
this single field, which will benefit from the global ordinals optimization.
|
||||
|
||||
[[search-aggregations-bucket-terms-aggregation-collect]]
|
||||
==== Collect mode
|
||||
|
||||
Deferring calculation of child aggregations
|
||||
|
|
|
@ -195,13 +195,19 @@ Will return, for example:
|
|||
},
|
||||
"discovery_types": {
|
||||
...
|
||||
}
|
||||
},
|
||||
"packaging_types": [
|
||||
{
|
||||
...
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
--------------------------------------------------
|
||||
// TESTRESPONSE[s/"plugins": \[[^\]]*\]/"plugins": $body.$_path/]
|
||||
// TESTRESPONSE[s/"network_types": \{[^\}]*\}/"network_types": $body.$_path/]
|
||||
// TESTRESPONSE[s/"discovery_types": \{[^\}]*\}/"discovery_types": $body.$_path/]
|
||||
// TESTRESPONSE[s/"packaging_types": \[[^\]]*\]/"packaging_types": $body.$_path/]
|
||||
// TESTRESPONSE[s/: true|false/: $body.$_path/]
|
||||
// TESTRESPONSE[s/: (\-)?[0-9]+/: $body.$_path/]
|
||||
// TESTRESPONSE[s/: "[^"]*"/: $body.$_path/]
|
||||
|
@ -209,7 +215,8 @@ Will return, for example:
|
|||
// 1. Ignore the contents of the `plugins` object because we don't know all of
|
||||
// the plugins that will be in it. And because we figure folks don't need to
|
||||
// see an exhaustive list anyway.
|
||||
// 2. Similarly, ignore the contents of `network_types` and `discovery_types`.
|
||||
// 2. Similarly, ignore the contents of `network_types`, `discovery_types`, and
|
||||
// `packaging_types`.
|
||||
// 3. All of the numbers and strings on the right hand side of *every* field in
|
||||
// the response are ignored. So we're really only asserting things about the
|
||||
// the shape of this response, not the values in it.
|
||||
|
|
|
@ -1,15 +1,17 @@
|
|||
[[node-tool]]
|
||||
== elasticsearch-node
|
||||
|
||||
The `elasticsearch-node` command enables you to perform unsafe operations that
|
||||
risk data loss but which may help to recover some data in a disaster.
|
||||
The `elasticsearch-node` command enables you to perform certain unsafe
|
||||
operations on a node that are only possible while it is shut down. This command
|
||||
allows you to adjust the <<modules-node,role>> of a node and may be able to
|
||||
recover some data after a disaster.
|
||||
|
||||
[float]
|
||||
=== Synopsis
|
||||
|
||||
[source,shell]
|
||||
--------------------------------------------------
|
||||
bin/elasticsearch-node unsafe-bootstrap|detach-cluster
|
||||
bin/elasticsearch-node repurpose|unsafe-bootstrap|detach-cluster
|
||||
[--ordinal <Integer>] [-E <KeyValuePair>]
|
||||
[-h, --help] ([-s, --silent] | [-v, --verbose])
|
||||
--------------------------------------------------
|
||||
|
@ -17,6 +19,60 @@ bin/elasticsearch-node unsafe-bootstrap|detach-cluster
|
|||
[float]
|
||||
=== Description
|
||||
|
||||
This tool has three modes:
|
||||
|
||||
* `elasticsearch-node repurpose` can be used to delete unwanted data from a
|
||||
node if it used to be a <<data-node,data node>> or a
|
||||
<<master-node,master-eligible node>> but has been repurposed not to have one
|
||||
or other of these roles.
|
||||
|
||||
* `elasticsearch-node unsafe-bootstrap` can be used to perform _unsafe cluster
|
||||
bootstrapping_. It forces one of the nodes to form a brand-new cluster on
|
||||
its own, using its local copy of the cluster metadata.
|
||||
|
||||
* `elasticsearch-node detach-cluster` enables you to move nodes from one
|
||||
cluster to another. This can be used to move nodes into a new cluster
|
||||
created with the `elasticsearch-node unsafe-bootstap` command. If unsafe
|
||||
cluster bootstrapping was not possible, it also enables you to move nodes
|
||||
into a brand-new cluster.
|
||||
|
||||
[[node-tool-repurpose]]
|
||||
[float]
|
||||
==== Changing the role of a node
|
||||
|
||||
There may be situations where you want to repurpose a node without following
|
||||
the <<change-node-role,proper repurposing processes>>. The `elasticsearch-node
|
||||
repurpose` tool allows you to delete any excess on-disk data and start a node
|
||||
after repurposing it.
|
||||
|
||||
The intended use is:
|
||||
|
||||
* Stop the node
|
||||
* Update `elasticsearch.yml` by setting `node.master` and `node.data` as
|
||||
desired.
|
||||
* Run `elasticsearch-node repurpose` on the node
|
||||
* Start the node
|
||||
|
||||
If you run `elasticsearch-node repurpose` on a node with `node.data: false` and
|
||||
`node.master: true` then it will delete any remaining shard data on that node,
|
||||
but it will leave the index and cluster metadata alone. If you run
|
||||
`elasticsearch-node repurpose` on a node with `node.data: false` and
|
||||
`node.master: false` then it will delete any remaining shard data and index
|
||||
metadata, but it will leave the cluster metadata alone.
|
||||
|
||||
[WARNING]
|
||||
Running this command can lead to data loss for the indices mentioned if the
|
||||
data contained is not available on other nodes in the cluster. Only run this
|
||||
tool if you understand and accept the possible consequences, and only after
|
||||
determining that the node cannot be repurposed cleanly.
|
||||
|
||||
The tool provides a summary of the data to be deleted and asks for confirmation
|
||||
before making any changes. You can get detailed information about the affected
|
||||
indices and shards by passing the verbose (`-v`) option.
|
||||
|
||||
[float]
|
||||
==== Recovering data after a disaster
|
||||
|
||||
Sometimes {es} nodes are temporarily stopped, perhaps because of the need to
|
||||
perform some maintenance activity or perhaps because of a hardware failure.
|
||||
After you resolve the temporary condition and restart the node,
|
||||
|
@ -53,22 +109,10 @@ way forward that does not risk data loss, but it may be possible to use the
|
|||
`elasticsearch-node` tool to construct a new cluster that contains some of the
|
||||
data from the failed cluster.
|
||||
|
||||
This tool has two modes:
|
||||
|
||||
* `elastisearch-node unsafe-bootstap` can be used if there is at least one
|
||||
remaining master-eligible node. It forces one of the remaining nodes to form
|
||||
a brand-new cluster on its own, using its local copy of the cluster metadata.
|
||||
This is known as _unsafe cluster bootstrapping_.
|
||||
|
||||
* `elastisearch-node detach-cluster` enables you to move nodes from one cluster
|
||||
to another. This can be used to move nodes into the new cluster created with
|
||||
the `elastisearch-node unsafe-bootstap` command. If unsafe cluster bootstrapping was not
|
||||
possible, it also enables you to
|
||||
move nodes into a brand-new cluster.
|
||||
|
||||
[[node-tool-unsafe-bootstrap]]
|
||||
[float]
|
||||
==== Unsafe cluster bootstrapping
|
||||
===== Unsafe cluster bootstrapping
|
||||
|
||||
If there is at least one remaining master-eligible node, but it is not possible
|
||||
to restart a majority of them, then the `elasticsearch-node unsafe-bootstrap`
|
||||
|
@ -143,7 +187,7 @@ job.
|
|||
|
||||
[[node-tool-detach-cluster]]
|
||||
[float]
|
||||
==== Detaching nodes from their cluster
|
||||
===== Detaching nodes from their cluster
|
||||
|
||||
It is unsafe for nodes to move between clusters, because different clusters
|
||||
have completely different cluster metadata. There is no way to safely merge the
|
||||
|
@ -206,9 +250,12 @@ The message `Node was successfully detached from the cluster` does not mean
|
|||
that there has been no data loss, it just means that tool was able to complete
|
||||
its job.
|
||||
|
||||
|
||||
[float]
|
||||
=== Parameters
|
||||
|
||||
`repurpose`:: Delete excess data when a node's roles are changed.
|
||||
|
||||
`unsafe-bootstrap`:: Specifies to unsafely bootstrap this node as a new
|
||||
one-node cluster.
|
||||
|
||||
|
@ -230,6 +277,51 @@ to `0`, meaning to use the first node in the data path.
|
|||
[float]
|
||||
=== Examples
|
||||
|
||||
[float]
|
||||
==== Repurposing a node as a dedicated master node (master: true, data: false)
|
||||
|
||||
In this example, a former data node is repurposed as a dedicated master node.
|
||||
First update the node's settings to `node.master: true` and `node.data: false`
|
||||
in its `elasticsearch.yml` config file. Then run the `elasticsearch-node
|
||||
repurpose` command to find and remove excess shard data:
|
||||
|
||||
[source,txt]
|
||||
----
|
||||
node$ ./bin/elasticsearch-node repurpose
|
||||
|
||||
WARNING: Elasticsearch MUST be stopped before running this tool.
|
||||
|
||||
Found 2 shards in 2 indices to clean up
|
||||
Use -v to see list of paths and indices affected
|
||||
Node is being re-purposed as master and no-data. Clean-up of shard data will be performed.
|
||||
Do you want to proceed?
|
||||
Confirm [y/N] y
|
||||
Node successfully repurposed to master and no-data.
|
||||
----
|
||||
|
||||
[float]
|
||||
==== Repurposing a node as a coordinating-only node (master: false, data: false)
|
||||
|
||||
In this example, a node that previously held data is repurposed as a
|
||||
coordinating-only node. First update the node's settings to `node.master:
|
||||
false` and `node.data: false` in its `elasticsearch.yml` config file. Then run
|
||||
the `elasticsearch-node repurpose` command to find and remove excess shard data
|
||||
and index metadata:
|
||||
|
||||
[source,txt]
|
||||
----
|
||||
node$./bin/elasticsearch-node repurpose
|
||||
|
||||
WARNING: Elasticsearch MUST be stopped before running this tool.
|
||||
|
||||
Found 2 indices (2 shards and 2 index meta data) to clean up
|
||||
Use -v to see list of paths and indices affected
|
||||
Node is being re-purposed as no-master and no-data. Clean-up of index data will be performed.
|
||||
Do you want to proceed?
|
||||
Confirm [y/N] y
|
||||
Node successfully repurposed to no-master and no-data.
|
||||
----
|
||||
|
||||
[float]
|
||||
==== Unsafe cluster bootstrapping
|
||||
|
||||
|
@ -331,4 +423,3 @@ Do you want to proceed?
|
|||
Confirm [y/N] y
|
||||
Node was successfully detached from the cluster
|
||||
----
|
||||
|
||||
|
|
|
@ -91,3 +91,16 @@ become meaningless. Elasticsearch makes it easy to check how many documents
|
|||
have malformed fields by using `exist` or `term` queries on the special
|
||||
<<mapping-ignored-field,`_ignored`>> field.
|
||||
|
||||
==== Limits for JSON Objects
|
||||
You can't use `ignore_malformed` with the following datatypes:
|
||||
|
||||
* <<nested, Nested datatype>>
|
||||
* <<object, Object datatype>>
|
||||
* <<range, Range datatypes>>
|
||||
|
||||
You also can't use `ignore_malformed` to ignore JSON objects submitted to fields
|
||||
of the wrong datatype. A JSON object is any data surrounded by curly brackets
|
||||
`"{}"` and includes data mapped to the nested, object, and range datatypes.
|
||||
|
||||
If you submit a JSON object to an unsupported field, {es} will return an error
|
||||
and reject the entire document regardless of the `ignore_malformed` setting.
|
|
@ -1,11 +1,11 @@
|
|||
[[removal-of-types]]
|
||||
== Removal of mapping types
|
||||
|
||||
IMPORTANT: Indices created in Elasticsearch 6.0.0 or later may only contain a
|
||||
single <<mapping-type,mapping type>>. Indices created in 5.x with multiple
|
||||
mapping types will continue to function as before in Elasticsearch 6.x.
|
||||
Types will be deprecated in APIs in Elasticsearch 7.0.0, and completely
|
||||
removed in 8.0.0.
|
||||
IMPORTANT: Indices created in Elasticsearch 7.0.0 or later no longer accept a
|
||||
`_default_` mapping. Indices created in 6.x will continue to function as before
|
||||
in Elasticsearch 6.x. Types are deprecated in APIs in 7.0, with breaking changes
|
||||
to the index creation, put mapping, get mapping, put template, get template and
|
||||
get field mappings APIs.
|
||||
|
||||
[float]
|
||||
=== What are mapping types?
|
||||
|
|
|
@ -13,7 +13,9 @@ See also <<release-highlights>> and <<es-release-notes>>.
|
|||
* <<breaking_70_cluster_changes>>
|
||||
* <<breaking_70_discovery_changes>>
|
||||
* <<breaking_70_indices_changes>>
|
||||
* <<breaking_70_ingest_changes>>
|
||||
* <<breaking_70_mappings_changes>>
|
||||
* <<breaking_70_ml_changes>>
|
||||
* <<breaking_70_search_changes>>
|
||||
* <<breaking_70_suggesters_changes>>
|
||||
* <<breaking_70_packaging_changes>>
|
||||
|
@ -50,8 +52,10 @@ include::migrate_7_0/aggregations.asciidoc[]
|
|||
include::migrate_7_0/analysis.asciidoc[]
|
||||
include::migrate_7_0/cluster.asciidoc[]
|
||||
include::migrate_7_0/discovery.asciidoc[]
|
||||
include::migrate_7_0/ingest.asciidoc[]
|
||||
include::migrate_7_0/indices.asciidoc[]
|
||||
include::migrate_7_0/mappings.asciidoc[]
|
||||
include::migrate_7_0/ml.asciidoc[]
|
||||
include::migrate_7_0/search.asciidoc[]
|
||||
include::migrate_7_0/suggesters.asciidoc[]
|
||||
include::migrate_7_0/packaging.asciidoc[]
|
||||
|
|
|
@ -47,3 +47,11 @@ explicitly defining how their data is processed.
|
|||
The `percentiles` and `percentile_ranks` aggregations used to return `NaN` in
|
||||
the response if they were applied to an empty set of values. Because `NaN` is
|
||||
not officially supported by JSON, it has been replaced with `null`.
|
||||
|
||||
[float]
|
||||
==== `stats` and `extended_stats` now return 0 instead of `null` for zero docs
|
||||
|
||||
When the `stats` and `extended_stats` aggregations collected zero docs (`doc_count: 0`),
|
||||
their value would be `null`. This was in contrast with the `sum` aggregation which
|
||||
would return `0`. The `stats` and `extended_stats` aggs are now consistent with
|
||||
`sum` and also return zero.
|
||||
|
|
|
@ -76,10 +76,32 @@ pools. Note that `core` and `max` will be populated for scaling thread pools,
|
|||
and `size` will be populated for fixed thread pools.
|
||||
|
||||
[float]
|
||||
==== The parameter `fields` deprecated in 6.x has been removed from Bulk request
|
||||
and Update request. The Update API returns `400 - Bad request` if request contains
|
||||
==== The parameter `fields` deprecated in 6.x has been removed from Bulk request
|
||||
and Update request. The Update API returns `400 - Bad request` if request contains
|
||||
unknown parameters (instead of ignored in the previous version).
|
||||
|
||||
[float]
|
||||
==== PUT Document with Version error message changed when document is missing
|
||||
|
||||
If you attempt to `PUT` a document with versioning (e.g. `PUT /test/_doc/1?version=4`)
|
||||
but the document does not exist, a cryptic message is returned:
|
||||
|
||||
[source,text]
|
||||
----------
|
||||
version conflict, current version [-1] is different than the one provided [4]
|
||||
----------
|
||||
|
||||
Now if the document is missing a more helpful message is returned:
|
||||
|
||||
[source,text]
|
||||
----------
|
||||
document does not exist (expected version [4])
|
||||
----------
|
||||
|
||||
Although exceptions messages are liable to change and not generally subject to
|
||||
backwards compatibility, the nature of this message might mean clients are relying
|
||||
on parsing the version numbers and so the format change might impact some users.
|
||||
|
||||
[float]
|
||||
[[remove-suggest-metric]]
|
||||
==== Remove support for `suggest` metric/index metric in indices stats and nodes stats APIs
|
||||
|
@ -168,3 +190,39 @@ privilege).
|
|||
|
||||
The `_cache/clear` API no longer supports the `GET` HTTP verb. It must be called
|
||||
with `POST`.
|
||||
|
||||
[float]
|
||||
==== Cluster state size metrics removed from Cluster State API Response
|
||||
|
||||
The `compressed_size` / `compressed_size_in_bytes` fields were removed from
|
||||
the Cluster State API response. The calculation of the size was expensive and had
|
||||
dubious value, so the field was removed from the response.
|
||||
|
||||
[float]
|
||||
==== Migration Assistance API has been removed
|
||||
|
||||
The Migration Assistance API has been functionally replaced by the
|
||||
Deprecation Info API, and the Migration Upgrade API is not used for the
|
||||
transition from ES 6.x to 7.x, and does not need to be kept around to
|
||||
repair indices that were not properly upgraded before upgrading the
|
||||
cluster, as was the case in 6.
|
||||
|
||||
[float]
|
||||
==== Changes to thread pool naming in Node and Cat APIs
|
||||
The `thread_pool` information returned from the Nodes and Cat APIs has been
|
||||
standardized to use the same terminology as the thread pool configurations.
|
||||
This means the response will align with the configuration instead of being
|
||||
the same across all the thread pools, regardless of type.
|
||||
|
||||
[float]
|
||||
==== Return 200 when cluster has valid read-only blocks
|
||||
If the cluster was configured with `no_master_block: write` and lost its master,
|
||||
it would return a `503` status code from a main request (`GET /`) even though
|
||||
there are viable read-only nodes available. The cluster now returns 200 status
|
||||
in this situation.
|
||||
|
||||
[float]
|
||||
==== Clearing indices cache is now POST-only
|
||||
Clearing the cache indices could previously be done via GET and POST. As GET should
|
||||
only support read only non state-changing operations, this is no longer allowed.
|
||||
Only POST can be used to clear the cache.
|
||||
|
|
|
@ -0,0 +1,27 @@
|
|||
[float]
|
||||
[[breaking_70_ingest_changes]]
|
||||
=== API changes
|
||||
|
||||
//NOTE: The notable-breaking-changes tagged regions are re-used in the
|
||||
//Installation and Upgrade Guide
|
||||
|
||||
//tag::notable-breaking-changes[]
|
||||
|
||||
[float]
|
||||
==== Ingest configuration exception information is now transmitted in metadata field
|
||||
|
||||
Previously, some ingest configuration exception information about ingest processors
|
||||
was sent to the client in the HTTP headers, which is inconsistent with how
|
||||
exceptions are conveyed in other parts of Elasticsearch.
|
||||
|
||||
Configuration exception information is now conveyed as a field in the response
|
||||
body.
|
||||
//end::notable-breaking-changes[]
|
||||
[float]
|
||||
==== Ingest plugin special handling has been removed
|
||||
There was some special handling for installing and removing the `ingest-geoip` and
|
||||
`ingest-user-agent` plugins after they were converted to modules. This special handling
|
||||
was done to minimize breaking users in a minor release, and would exit with a status code
|
||||
zero to avoid breaking automation.
|
||||
|
||||
This special handling has now been removed.
|
|
@ -29,3 +29,8 @@ backwards compatibility.
|
|||
|
||||
We deprecated `setHosts` in 6.4.0 in favor of `setNodes` because it supports
|
||||
host metadata used by the `NodeSelector`.
|
||||
|
||||
[float]
|
||||
==== Minimum compiler version change
|
||||
The minimum compiler version on the low-level REST client has been bumped
|
||||
to JDK 8.
|
||||
|
|
|
@ -21,12 +21,14 @@ This field used to index a composite key formed of the `_type` and the `_id`.
|
|||
Now that indices cannot have multiple types, this has been removed in favour
|
||||
of `_id`.
|
||||
|
||||
//tag::notable-breaking-changes[]
|
||||
[float]
|
||||
==== The `_default_` mapping is no longer allowed
|
||||
|
||||
The `_default_` mapping has been deprecated in 6.0 and is now no longer allowed
|
||||
in 7.0. Trying to configure a `_default_` mapping on 7.x indices will result in
|
||||
an error.
|
||||
//end::notable-breaking-changes[]
|
||||
|
||||
[float]
|
||||
==== `index_options` for numeric fields has been removed
|
||||
|
@ -85,4 +87,9 @@ will be removed in a future version.
|
|||
|
||||
The maximum allowed number of completion contexts in a mapping will be limited
|
||||
to 10 in the next major version. Completion fields that define more than 10
|
||||
contexts in a mapping will log a deprecation warning in this version.
|
||||
contexts in a mapping will log a deprecation warning in this version.
|
||||
|
||||
[float]
|
||||
==== `include_type_name` now defaults to `false`
|
||||
The default for `include_type_name` is now `false` for all APIs that accept
|
||||
the parameter.
|
||||
|
|
|
@ -0,0 +1,15 @@
|
|||
[float]
|
||||
[[breaking_70_ml_changes]]
|
||||
=== ML changes
|
||||
|
||||
//NOTE: The notable-breaking-changes tagged regions are re-used in the
|
||||
//Installation and Upgrade Guide
|
||||
|
||||
//tag::notable-breaking-changes[]
|
||||
|
||||
// end::notable-breaking-changes[]
|
||||
|
||||
[float]
|
||||
==== Types in Datafeed config are no longer valid
|
||||
Types have been removed from the datafeed config and are no longer
|
||||
valid parameters.
|
|
@ -29,3 +29,9 @@ for windows. These files have been removed. Use the `zip` package instead.
|
|||
|
||||
Ubuntu 14.04 will reach end-of-life on April 30, 2019. As such, we are no longer
|
||||
supporting Ubuntu 14.04.
|
||||
|
||||
[float]
|
||||
==== CLI secret prompting is no longer supported
|
||||
The ability to use `${prompt.secret}` and `${prompt.text}` to collect secrets
|
||||
from the CLI at server start is no longer supported. Secure settings have replaced
|
||||
the need for these prompts.
|
||||
|
|
|
@ -212,6 +212,7 @@ on whether queries need to access score or not. As a result `bool` queries with
|
|||
`minimum_should_match` to 1. This behavior has been deprecated in the previous
|
||||
major version.
|
||||
|
||||
//tag::notable-breaking-changes[]
|
||||
[float]
|
||||
==== `hits.total` is now an object in the search response
|
||||
|
||||
|
@ -241,6 +242,7 @@ You can also retrieve `hits.total` as a number in the rest response by adding
|
|||
`rest_total_hits_as_int=true` in the request parameter of the search request.
|
||||
This parameter has been added to ease the transition to the new format and
|
||||
will be removed in the next major version (8.0).
|
||||
//end::notable-breaking-changes[]
|
||||
|
||||
[float]
|
||||
==== `hits.total` is omitted in the response if `track_total_hits` is disabled (false)
|
||||
|
@ -250,6 +252,7 @@ will set `hits.total` to null and the object will not be displayed in the rest
|
|||
layer. You can add `rest_total_hits_as_int=true` in the search request parameters
|
||||
to get the old format back (`"total": -1`).
|
||||
|
||||
//tag::notable-breaking-changes[]
|
||||
[float]
|
||||
==== `track_total_hits` defaults to 10,000
|
||||
|
||||
|
@ -280,3 +283,22 @@ documents. If the total number of hits that match the query is greater than this
|
|||
|
||||
You can force the count to always be accurate by setting `"track_total_hits`
|
||||
to true explicitly in the search request.
|
||||
//end::notable-breaking-changes[]
|
||||
|
||||
[float]
|
||||
==== Limitations on Similarities
|
||||
Lucene 8 introduced more constraints on similarities, in particular:
|
||||
|
||||
- scores must not be negative,
|
||||
- scores must not decrease when term freq increases,
|
||||
- scores must not increase when norm (interpreted as an unsigned long) increases.
|
||||
|
||||
[float]
|
||||
==== Weights in Function Score must be positive
|
||||
Negative `weight` parameters in the `function_score` are no longer allowed.
|
||||
|
||||
[float]
|
||||
==== Query string and Simple query string limit expansion of fields to 1024
|
||||
The number of automatically expanded fields for the "all fields"
|
||||
mode (`"default_field": "*"`) for the query_string and simple_query_string
|
||||
queries is now 1024 fields.
|
||||
|
|
|
@ -211,3 +211,22 @@ Elastic Stack to handle the indexing part.
|
|||
==== Ingest User Agent processor defaults uses `ecs` output format
|
||||
https://github.com/elastic/ecs[ECS] format is now the default.
|
||||
The `ecs` setting for the user agent ingest processor now defaults to true.
|
||||
|
||||
[float]
|
||||
[[remove-action-master-force_local]]
|
||||
==== Remove `action.master.force_local`
|
||||
|
||||
The `action.master.force_local` setting was an undocumented setting, used
|
||||
internally by the tribe node to force reads to local cluster state (instead of
|
||||
forwarding to a master, which tribe nodes did not have). Since the tribe
|
||||
node was removed, this setting was removed too.
|
||||
|
||||
[float]
|
||||
==== Enforce cluster-wide shard limit
|
||||
The cluster-wide shard limit is now enforced and not optional. The limit can
|
||||
still be adjusted as desired using the cluster settings API.
|
||||
|
||||
[float]
|
||||
==== HTTP Max content length setting is no longer parsed leniently
|
||||
Previously, `http.max_content_length` would reset to `100mb` if the setting was
|
||||
`Integer.MAX_VALUE`. This leniency has been removed.
|
||||
|
|
|
@ -14,3 +14,8 @@
|
|||
|
||||
Plugins must now explicitly indicate the type of suggestion that they produce.
|
||||
|
||||
[float]
|
||||
==== Phrase suggester now multiples alpha
|
||||
Previously, the laplace smoothing used by the phrase suggester added `alpha`,
|
||||
when it should instead multiply. This behavior has been changed and will
|
||||
affect suggester scores.
|
||||
|
|
|
@ -204,6 +204,49 @@ NOTE: These settings apply only when {xpack} is not installed. To create a
|
|||
dedicated coordinating node when {xpack} is installed, see <<modules-node-xpack,{xpack} node settings>>.
|
||||
endif::include-xpack[]
|
||||
|
||||
[float]
|
||||
[[change-node-role]]
|
||||
=== Changing the role of a node
|
||||
|
||||
Each data node maintains the following data on disk:
|
||||
|
||||
* the shard data for every shard allocated to that node,
|
||||
* the index metadata corresponding with every shard allocated to that node, and
|
||||
* the cluster-wide metadata, such as settings and index templates.
|
||||
|
||||
Similarly, each master-eligible node maintains the following data on disk:
|
||||
|
||||
* the index metadata for every index in the cluster, and
|
||||
* the cluster-wide metadata, such as settings and index templates.
|
||||
|
||||
Each node checks the contents of its data path at startup. If it discovers
|
||||
unexpected data then it will refuse to start. This is to avoid importing
|
||||
unwanted <<modules-gateway-dangling-indices,dangling indices>> which can lead
|
||||
to a red cluster health. To be more precise, nodes with `node.data: false` will
|
||||
refuse to start if they find any shard data on disk at startup, and nodes with
|
||||
both `node.master: false` and `node.data: false` will refuse to start if they
|
||||
have any index metadata on disk at startup.
|
||||
|
||||
It is possible to change the roles of a node by adjusting its
|
||||
`elasticsearch.yml` file and restarting it. This is known as _repurposing_ a
|
||||
node. In order to satisfy the checks for unexpected data described above, you
|
||||
must perform some extra steps to prepare a node for repurposing when setting
|
||||
its `node.data` or `node.master` roles to `false`:
|
||||
|
||||
* If you want to repurpose a data node by changing `node.data` to `false` then
|
||||
you should first use an <<allocation-filtering,allocation filter>> to safely
|
||||
migrate all the shard data onto other nodes in the cluster.
|
||||
|
||||
* If you want to repurpose a node to have both `node.master: false` and
|
||||
`node.data: false` then it is simplest to start a brand-new node with an
|
||||
empty data path and the desired roles. You may find it safest to use an
|
||||
<<allocation-filtering,allocation filter>> to migrate the shard data
|
||||
elsewhere in the cluster first.
|
||||
|
||||
If it is not possible to follow these extra steps then you may be able to use
|
||||
the <<node-tool-repurpose,`elasticsearch-node repurpose`>> tool to delete any
|
||||
excess data that prevents a node from starting.
|
||||
|
||||
[float]
|
||||
== Node data path settings
|
||||
|
||||
|
|
|
@ -1,8 +1,9 @@
|
|||
[[query-dsl-ids-query]]
|
||||
=== Ids Query
|
||||
Returns documents based on their IDs. This query uses document IDs stored in
|
||||
the <<mapping-id-field,`_id`>> field.
|
||||
|
||||
Filters documents that only have the provided ids. Note, this query
|
||||
uses the <<mapping-id-field,_id>> field.
|
||||
==== Example request
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
|
@ -16,3 +17,11 @@ GET /_search
|
|||
}
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
|
||||
==== Top-level parameters for `ids`
|
||||
|
||||
[cols="v,v",options="header"]
|
||||
|======
|
||||
|Parameter |Description
|
||||
|`values` |An array of <<mapping-id-field, document IDs>>.
|
||||
|======
|
File diff suppressed because it is too large
Load Diff
|
@ -188,46 +188,48 @@ to the client. This means it includes the time spent waiting in thread pools,
|
|||
executing a distributed search across the whole cluster and gathering all the
|
||||
results.
|
||||
|
||||
include::request/query.asciidoc[]
|
||||
|
||||
include::request/from-size.asciidoc[]
|
||||
|
||||
include::request/sort.asciidoc[]
|
||||
|
||||
include::request/track-total-hits.asciidoc[]
|
||||
|
||||
include::request/source-filtering.asciidoc[]
|
||||
|
||||
include::request/stored-fields.asciidoc[]
|
||||
|
||||
include::request/script-fields.asciidoc[]
|
||||
|
||||
include::request/docvalue-fields.asciidoc[]
|
||||
|
||||
include::request/post-filter.asciidoc[]
|
||||
|
||||
include::request/highlighting.asciidoc[]
|
||||
|
||||
include::request/rescore.asciidoc[]
|
||||
|
||||
include::request/search-type.asciidoc[]
|
||||
|
||||
include::request/scroll.asciidoc[]
|
||||
|
||||
include::request/preference.asciidoc[]
|
||||
|
||||
include::request/explain.asciidoc[]
|
||||
|
||||
include::request/version-and-seq-no.asciidoc[]
|
||||
include::request/collapse.asciidoc[]
|
||||
|
||||
include::request/from-size.asciidoc[]
|
||||
|
||||
include::request/highlighting.asciidoc[]
|
||||
|
||||
include::request/index-boost.asciidoc[]
|
||||
|
||||
include::request/inner-hits.asciidoc[]
|
||||
|
||||
include::request/min-score.asciidoc[]
|
||||
|
||||
include::request/named-queries-and-filters.asciidoc[]
|
||||
|
||||
include::request/inner-hits.asciidoc[]
|
||||
include::request/post-filter.asciidoc[]
|
||||
|
||||
include::request/collapse.asciidoc[]
|
||||
include::request/preference.asciidoc[]
|
||||
|
||||
include::request/query.asciidoc[]
|
||||
|
||||
include::request/rescore.asciidoc[]
|
||||
|
||||
include::request/script-fields.asciidoc[]
|
||||
|
||||
include::request/scroll.asciidoc[]
|
||||
|
||||
include::request/search-after.asciidoc[]
|
||||
|
||||
include::request/search-type.asciidoc[]
|
||||
|
||||
include::request/seq-no.asciidoc[]
|
||||
|
||||
include::request/sort.asciidoc[]
|
||||
|
||||
include::request/source-filtering.asciidoc[]
|
||||
|
||||
include::request/stored-fields.asciidoc[]
|
||||
|
||||
include::request/track-total-hits.asciidoc[]
|
||||
|
||||
include::request/version.asciidoc[]
|
||||
|
|
|
@ -15,20 +15,3 @@ GET /_search
|
|||
}
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
|
||||
[[search-request-version]]
|
||||
=== Version
|
||||
|
||||
Returns a version for each search hit.
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
GET /_search
|
||||
{
|
||||
"version": true,
|
||||
"query" : {
|
||||
"term" : { "user" : "kimchy" }
|
||||
}
|
||||
}
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
|
@ -1,5 +1,5 @@
|
|||
[[search-request-stored-fields]]
|
||||
=== Fields
|
||||
=== Stored Fields
|
||||
|
||||
WARNING: The `stored_fields` parameter is about fields that are explicitly marked as
|
||||
stored in the mapping, which is off by default and generally not recommended.
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue