mirror of
https://github.com/honeymoose/OpenSearch.git
synced 2025-03-09 14:34:43 +00:00
Merge branch 'master' into feature/rank-eval
This commit is contained in:
commit
96fc3aaf6f
.dir-locals.el
.github
CONTRIBUTING.mdREADME.textilebuildSrc
build.gradleversion.properties
src/main
gradle-2.13-groovy/org/elasticsearch/gradle
gradle-2.14-groovy/org/elasticsearch/gradle
groovy
com/carrotsearch/gradle/junit4
org/elasticsearch/gradle
resources
client/rest-high-level/src/test/java/org/elasticsearch/client/documentation
core
licenses
src/main/java/org
apache/lucene/search/uhighlight
elasticsearch
action
admin/indices/validate/query
QueryExplanation.javaTransportValidateQueryAction.javaValidateQueryRequest.javaValidateQueryRequestBuilder.java
bulk/byscroll
search
AbstractSearchAsyncAction.javaSearchDfsQueryThenFetchAsyncAction.javaSearchQueryThenFetchAsyncAction.javaTransportSearchAction.java
support
bootstrap
cluster
common/settings
discovery/zen
index
monitor/fs
plugins
rest/action/admin/indices
search
aggregations
InternalAggregation.java
bucket
metrics
avg
cardinality
max
min
percentiles
sum
valuecount
pipeline
fetch/subphase/highlight
suggest/completion
snapshots
tasks
@ -83,6 +83,6 @@
|
||||
))
|
||||
(c-basic-offset . 4)
|
||||
(c-comment-only-line-offset . (0 . 0))
|
||||
(fill-column . 140)
|
||||
(fci-rule-column . 140)
|
||||
(fill-column . 100)
|
||||
(fci-rule-column . 100)
|
||||
(compile-command . "gradle compileTestJava"))))
|
||||
|
1
.github/PULL_REQUEST_TEMPLATE.md
vendored
1
.github/PULL_REQUEST_TEMPLATE.md
vendored
@ -11,3 +11,4 @@ attention.
|
||||
- If submitting code, have you built your formula locally prior to submission with `gradle check`?
|
||||
- If submitting code, is your pull request against master? Unless there is a good reason otherwise, we prefer pull requests against master and will backport as needed.
|
||||
- If submitting code, have you checked that your submission is for an [OS that we support](https://www.elastic.co/support/matrix#show_os)?
|
||||
- If you are submitting this code for a class then read our [policy](https://github.com/elastic/elasticsearch/blob/master/CONTRIBUTING.md#contributing-as-part-of-a-class) for that.
|
||||
|
@ -88,8 +88,8 @@ Contributing to the Elasticsearch codebase
|
||||
**Repository:** [https://github.com/elastic/elasticsearch](https://github.com/elastic/elasticsearch)
|
||||
|
||||
Make sure you have [Gradle](http://gradle.org) installed, as
|
||||
Elasticsearch uses it as its build system. Gradle must be version 2.13 _exactly_ in
|
||||
order to build successfully.
|
||||
Elasticsearch uses it as its build system. Gradle must be at least
|
||||
version 3.3 in order to build successfully.
|
||||
|
||||
Eclipse users can automatically configure their IDE: `gradle eclipse`
|
||||
then `File: Import: Existing Projects into Workspace`. Select the
|
||||
@ -139,3 +139,32 @@ Before submitting your changes, run the test suite to make sure that nothing is
|
||||
```sh
|
||||
gradle check
|
||||
```
|
||||
|
||||
Contributing as part of a class
|
||||
-------------------------------
|
||||
In general Elasticsearch is happy to accept contributions that were created as
|
||||
part of a class but strongly advise against making the contribution as part of
|
||||
the class. So if you have code you wrote for a class feel free to submit it.
|
||||
|
||||
Please, please, please do not assign contributing to Elasticsearch as part of a
|
||||
class. If you really want to assign writing code for Elasticsearch as an
|
||||
assignment then the code contributions should be made to your private clone and
|
||||
opening PRs against the primary Elasticsearch clone must be optional, fully
|
||||
voluntary, not for a grade, and without any deadlines.
|
||||
|
||||
Because:
|
||||
|
||||
* While the code review process is likely very educational, it can take wildly
|
||||
varying amounts of time depending on who is available, where the change is, and
|
||||
how deep the change is. There is no way to predict how long it will take unless
|
||||
we rush.
|
||||
* We do not rush reviews without a very, very good reason. Class deadlines
|
||||
aren't a good enough reason for us to rush reviews.
|
||||
* We deeply discourage opening a PR you don't intend to work through the entire
|
||||
code review process because it wastes our time.
|
||||
* We don't have the capacity to absorb an entire class full of new contributors,
|
||||
especially when they are unlikely to become long time contributors.
|
||||
|
||||
Finally, we require that you run `gradle check` before submitting a
|
||||
non-documentation contribution. This is mentioned above, but it is worth
|
||||
repeating in this section because it has come up in this context.
|
||||
|
@ -200,7 +200,7 @@ We have just covered a very small portion of what Elasticsearch is all about. Fo
|
||||
|
||||
h3. Building from Source
|
||||
|
||||
Elasticsearch uses "Gradle":https://gradle.org for its build system. You'll need to have version 2.13 of Gradle installed.
|
||||
Elasticsearch uses "Gradle":https://gradle.org for its build system. You'll need to have at least version 3.3 of Gradle installed.
|
||||
|
||||
In order to create a distribution, simply run the @gradle assemble@ command in the cloned directory.
|
||||
|
||||
|
@ -23,8 +23,8 @@ apply plugin: 'groovy'
|
||||
|
||||
group = 'org.elasticsearch.gradle'
|
||||
|
||||
if (GradleVersion.current() < GradleVersion.version('2.13')) {
|
||||
throw new GradleException('Gradle 2.13+ is required to build elasticsearch')
|
||||
if (GradleVersion.current() < GradleVersion.version('3.3')) {
|
||||
throw new GradleException('Gradle 3.3+ is required to build elasticsearch')
|
||||
}
|
||||
|
||||
if (JavaVersion.current() < JavaVersion.VERSION_1_8) {
|
||||
@ -96,23 +96,12 @@ dependencies {
|
||||
compile 'org.apache.rat:apache-rat:0.11'
|
||||
}
|
||||
|
||||
// Gradle version-specific options (allows build to run with Gradle 2.13 as well as 2.14+/3.+)
|
||||
if (GradleVersion.current() == GradleVersion.version("2.13")) {
|
||||
// ProgressLogger(-Factory) classes are part of the public Gradle API
|
||||
sourceSets.main.groovy.srcDir 'src/main/gradle-2.13-groovy'
|
||||
// Gradle 2.14+ removed ProgressLogger(-Factory) classes from the public APIs
|
||||
// Use logging dependency instead
|
||||
|
||||
dependencies {
|
||||
compile 'ru.vyarus:gradle-animalsniffer-plugin:1.0.1' // last version compatible with Gradle 2.13
|
||||
}
|
||||
} else {
|
||||
// Gradle 2.14+ removed ProgressLogger(-Factory) classes from the public APIs
|
||||
// Use logging dependency instead
|
||||
sourceSets.main.groovy.srcDir 'src/main/gradle-2.14-groovy'
|
||||
|
||||
dependencies {
|
||||
compileOnly "org.gradle:gradle-logging:${GradleVersion.current().getVersion()}"
|
||||
compile 'ru.vyarus:gradle-animalsniffer-plugin:1.2.0' // Gradle 2.14 requires a version > 1.0.1
|
||||
}
|
||||
dependencies {
|
||||
compileOnly "org.gradle:gradle-logging:${GradleVersion.current().getVersion()}"
|
||||
compile 'ru.vyarus:gradle-animalsniffer-plugin:1.2.0' // Gradle 2.14 requires a version > 1.0.1
|
||||
}
|
||||
|
||||
/*****************************************************************************
|
||||
|
@ -1,31 +0,0 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.gradle
|
||||
|
||||
/**
|
||||
* Wraps a ProgressLogger so that code in src/main/groovy does not need to
|
||||
* define imports on Gradle 2.13/2.14+ ProgressLoggers
|
||||
*/
|
||||
class ProgressLogger {
|
||||
@Delegate org.gradle.logging.ProgressLogger progressLogger
|
||||
|
||||
ProgressLogger(org.gradle.logging.ProgressLogger progressLogger) {
|
||||
this.progressLogger = progressLogger
|
||||
}
|
||||
}
|
35
buildSrc/src/main/gradle-2.13-groovy/org/elasticsearch/gradle/ProgressLoggerFactoryInjection.groovy
35
buildSrc/src/main/gradle-2.13-groovy/org/elasticsearch/gradle/ProgressLoggerFactoryInjection.groovy
@ -1,35 +0,0 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.gradle
|
||||
|
||||
import org.gradle.logging.ProgressLoggerFactory
|
||||
|
||||
import javax.inject.Inject
|
||||
|
||||
/**
|
||||
* Allows to inject a ProgressLoggerFactory to tasks in src/main/groovy
|
||||
* without requiring the corresponding import of ProgressLoggerFactory,
|
||||
* making it compatible with both Gradle 2.13 and 2.14+.
|
||||
*/
|
||||
trait ProgressLoggerFactoryInjection {
|
||||
@Inject
|
||||
ProgressLoggerFactory getProgressLoggerFactory() {
|
||||
throw new UnsupportedOperationException()
|
||||
}
|
||||
}
|
@ -1,31 +0,0 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.gradle
|
||||
|
||||
/**
|
||||
* Wraps a ProgressLogger so that code in src/main/groovy does not need to
|
||||
* define imports on Gradle 2.13/2.14+ ProgressLoggers
|
||||
*/
|
||||
class ProgressLogger {
|
||||
@Delegate org.gradle.internal.logging.progress.ProgressLogger progressLogger
|
||||
|
||||
ProgressLogger(org.gradle.internal.logging.progress.ProgressLogger progressLogger) {
|
||||
this.progressLogger = progressLogger
|
||||
}
|
||||
}
|
35
buildSrc/src/main/gradle-2.14-groovy/org/elasticsearch/gradle/ProgressLoggerFactoryInjection.groovy
35
buildSrc/src/main/gradle-2.14-groovy/org/elasticsearch/gradle/ProgressLoggerFactoryInjection.groovy
@ -1,35 +0,0 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.gradle
|
||||
|
||||
import org.gradle.internal.logging.progress.ProgressLoggerFactory
|
||||
|
||||
import javax.inject.Inject
|
||||
|
||||
/**
|
||||
* Allows to inject a ProgressLoggerFactory to tasks in src/main/groovy
|
||||
* without requiring the corresponding import of ProgressLoggerFactory,
|
||||
* making it compatible with both Gradle 2.13 and 2.14+.
|
||||
*/
|
||||
trait ProgressLoggerFactoryInjection {
|
||||
@Inject
|
||||
ProgressLoggerFactory getProgressLoggerFactory() {
|
||||
throw new UnsupportedOperationException()
|
||||
}
|
||||
}
|
@ -8,7 +8,6 @@ import org.apache.tools.ant.BuildException
|
||||
import org.apache.tools.ant.DefaultLogger
|
||||
import org.apache.tools.ant.RuntimeConfigurable
|
||||
import org.apache.tools.ant.UnknownElement
|
||||
import org.elasticsearch.gradle.ProgressLoggerFactoryInjection
|
||||
import org.gradle.api.DefaultTask
|
||||
import org.gradle.api.file.FileCollection
|
||||
import org.gradle.api.file.FileTreeElement
|
||||
@ -20,9 +19,12 @@ import org.gradle.api.tasks.Optional
|
||||
import org.gradle.api.tasks.TaskAction
|
||||
import org.gradle.api.tasks.util.PatternFilterable
|
||||
import org.gradle.api.tasks.util.PatternSet
|
||||
import org.gradle.internal.logging.progress.ProgressLoggerFactory
|
||||
import org.gradle.util.ConfigureUtil
|
||||
|
||||
class RandomizedTestingTask extends DefaultTask implements ProgressLoggerFactoryInjection {
|
||||
import javax.inject.Inject
|
||||
|
||||
class RandomizedTestingTask extends DefaultTask {
|
||||
|
||||
// TODO: change to "executable" to match gradle test params?
|
||||
@Optional
|
||||
@ -92,6 +94,11 @@ class RandomizedTestingTask extends DefaultTask implements ProgressLoggerFactory
|
||||
listenersConfig.listeners.add(new TestReportLogger(logger: logger, config: testLoggingConfig))
|
||||
}
|
||||
|
||||
@Inject
|
||||
ProgressLoggerFactory getProgressLoggerFactory() {
|
||||
throw new UnsupportedOperationException()
|
||||
}
|
||||
|
||||
void jvmArgs(Iterable<String> arguments) {
|
||||
jvmArgs.addAll(arguments)
|
||||
}
|
||||
|
@ -25,7 +25,8 @@ import com.carrotsearch.ant.tasks.junit4.events.aggregated.AggregatedStartEvent
|
||||
import com.carrotsearch.ant.tasks.junit4.events.aggregated.AggregatedSuiteResultEvent
|
||||
import com.carrotsearch.ant.tasks.junit4.events.aggregated.AggregatedTestResultEvent
|
||||
import com.carrotsearch.ant.tasks.junit4.listeners.AggregatedEventListener
|
||||
import org.elasticsearch.gradle.ProgressLogger
|
||||
import org.gradle.internal.logging.progress.ProgressLogger
|
||||
import org.gradle.internal.logging.progress.ProgressLoggerFactory
|
||||
|
||||
import static com.carrotsearch.ant.tasks.junit4.FormattingUtils.formatDurationInSeconds
|
||||
import static com.carrotsearch.ant.tasks.junit4.events.aggregated.TestStatus.ERROR
|
||||
@ -51,6 +52,8 @@ import static java.lang.Math.max
|
||||
* quick.
|
||||
*/
|
||||
class TestProgressLogger implements AggregatedEventListener {
|
||||
/** Factory to build a progress logger when testing starts */
|
||||
ProgressLoggerFactory factory
|
||||
ProgressLogger progressLogger
|
||||
int totalSuites
|
||||
int totalSlaves
|
||||
@ -74,17 +77,14 @@ class TestProgressLogger implements AggregatedEventListener {
|
||||
/** Have we finished a whole suite yet? */
|
||||
volatile boolean suiteFinished = false
|
||||
/* Note that we probably overuse volatile here but it isn't hurting us and
|
||||
lets us move things around without worying about breaking things. */
|
||||
|
||||
TestProgressLogger(Map args) {
|
||||
progressLogger = new ProgressLogger(args.factory.newOperation(TestProgressLogger))
|
||||
progressLogger.setDescription('Randomized test runner')
|
||||
}
|
||||
lets us move things around without worrying about breaking things. */
|
||||
|
||||
@Subscribe
|
||||
void onStart(AggregatedStartEvent e) throws IOException {
|
||||
totalSuites = e.suiteCount
|
||||
totalSlaves = e.slaveCount
|
||||
progressLogger = factory.newOperation(TestProgressLogger)
|
||||
progressLogger.setDescription('Randomized test runner')
|
||||
progressLogger.started()
|
||||
progressLogger.progress(
|
||||
"Starting JUnit4 for ${totalSuites} suites on ${totalSlaves} jvms")
|
||||
|
@ -122,7 +122,7 @@ class BuildPlugin implements Plugin<Project> {
|
||||
}
|
||||
|
||||
// enforce gradle version
|
||||
GradleVersion minGradle = GradleVersion.version('2.13')
|
||||
GradleVersion minGradle = GradleVersion.version('3.3')
|
||||
if (GradleVersion.current() < minGradle) {
|
||||
throw new GradleException("${minGradle} or above is required to build elasticsearch")
|
||||
}
|
||||
|
@ -209,9 +209,11 @@ public class ThirdPartyAuditTask extends AntTask {
|
||||
try {
|
||||
ant.thirdPartyAudit(failOnUnsupportedJava: false,
|
||||
failOnMissingClasses: false,
|
||||
signaturesFile: new File(getClass().getResource('/forbidden/third-party-audit.txt').toURI()),
|
||||
classpath: classpath.asPath) {
|
||||
fileset(dir: tmpDir)
|
||||
signatures {
|
||||
string(value: getClass().getResourceAsStream('/forbidden/third-party-audit.txt').getText('UTF-8'))
|
||||
}
|
||||
}
|
||||
} catch (BuildException ignore) {}
|
||||
|
||||
|
@ -125,6 +125,8 @@ class ClusterConfiguration {
|
||||
|
||||
Map<String, Object> settings = new HashMap<>()
|
||||
|
||||
Map<String, String> keystoreSettings = new HashMap<>()
|
||||
|
||||
// map from destination path, to source file
|
||||
Map<String, Object> extraConfigFiles = new HashMap<>()
|
||||
|
||||
@ -144,6 +146,11 @@ class ClusterConfiguration {
|
||||
settings.put(name, value)
|
||||
}
|
||||
|
||||
@Input
|
||||
void keystoreSetting(String name, String value) {
|
||||
keystoreSettings.put(name, value)
|
||||
}
|
||||
|
||||
@Input
|
||||
void plugin(String path) {
|
||||
Project pluginProject = project.project(path)
|
||||
|
@ -38,6 +38,7 @@ import org.gradle.api.tasks.Copy
|
||||
import org.gradle.api.tasks.Delete
|
||||
import org.gradle.api.tasks.Exec
|
||||
|
||||
import java.nio.charset.StandardCharsets
|
||||
import java.nio.file.Paths
|
||||
import java.util.concurrent.TimeUnit
|
||||
|
||||
@ -157,10 +158,14 @@ class ClusterFormationTasks {
|
||||
node.cwd.mkdirs()
|
||||
}
|
||||
}
|
||||
|
||||
setup = configureCheckPreviousTask(taskName(prefix, node, 'checkPrevious'), project, setup, node)
|
||||
setup = configureStopTask(taskName(prefix, node, 'stopPrevious'), project, setup, node)
|
||||
setup = configureExtractTask(taskName(prefix, node, 'extract'), project, setup, node, configuration)
|
||||
setup = configureWriteConfigTask(taskName(prefix, node, 'configure'), project, setup, node, seedNode)
|
||||
setup = configureCreateKeystoreTask(taskName(prefix, node, 'createKeystore'), project, setup, node)
|
||||
setup = configureAddKeystoreSettingTasks(prefix, project, setup, node)
|
||||
|
||||
if (node.config.plugins.isEmpty() == false) {
|
||||
if (node.nodeVersion == VersionProperties.elasticsearch) {
|
||||
setup = configureCopyPluginsTask(taskName(prefix, node, 'copyPlugins'), project, setup, node)
|
||||
@ -303,6 +308,33 @@ class ClusterFormationTasks {
|
||||
}
|
||||
}
|
||||
|
||||
/** Adds a task to create keystore */
|
||||
static Task configureCreateKeystoreTask(String name, Project project, Task setup, NodeInfo node) {
|
||||
if (node.config.keystoreSettings.isEmpty()) {
|
||||
return setup
|
||||
} else {
|
||||
File esKeystoreUtil = Paths.get(node.homeDir.toString(), "bin/" + "elasticsearch-keystore").toFile()
|
||||
return configureExecTask(name, project, setup, node, esKeystoreUtil, 'create')
|
||||
}
|
||||
}
|
||||
|
||||
/** Adds tasks to add settings to the keystore */
|
||||
static Task configureAddKeystoreSettingTasks(String parent, Project project, Task setup, NodeInfo node) {
|
||||
Map kvs = node.config.keystoreSettings
|
||||
File esKeystoreUtil = Paths.get(node.homeDir.toString(), "bin/" + "elasticsearch-keystore").toFile()
|
||||
Task parentTask = setup
|
||||
for (Map.Entry<String, String> entry in kvs) {
|
||||
String key = entry.getKey()
|
||||
String name = taskName(parent, node, 'addToKeystore#' + key)
|
||||
Task t = configureExecTask(name, project, parentTask, node, esKeystoreUtil, 'add', key, '-x')
|
||||
t.doFirst {
|
||||
standardInput = new ByteArrayInputStream(entry.getValue().getBytes(StandardCharsets.UTF_8))
|
||||
}
|
||||
parentTask = t
|
||||
}
|
||||
return parentTask
|
||||
}
|
||||
|
||||
static Task configureExtraConfigFilesTask(String name, Project project, Task setup, NodeInfo node) {
|
||||
if (node.config.extraConfigFiles.isEmpty()) {
|
||||
return setup
|
||||
|
@ -22,10 +22,15 @@ import com.carrotsearch.gradle.junit4.RandomizedTestingTask
|
||||
import org.elasticsearch.gradle.BuildPlugin
|
||||
import org.gradle.api.DefaultTask
|
||||
import org.gradle.api.Task
|
||||
import org.gradle.api.execution.TaskExecutionAdapter
|
||||
import org.gradle.api.internal.tasks.options.Option
|
||||
import org.gradle.api.plugins.JavaBasePlugin
|
||||
import org.gradle.api.tasks.Input
|
||||
import org.gradle.util.ConfigureUtil
|
||||
import org.gradle.api.tasks.TaskState
|
||||
|
||||
import java.nio.charset.StandardCharsets
|
||||
import java.nio.file.Files
|
||||
import java.util.stream.Stream
|
||||
|
||||
/**
|
||||
* A wrapper task around setting up a cluster and running rest tests.
|
||||
@ -71,6 +76,24 @@ public class RestIntegTestTask extends DefaultTask {
|
||||
// both as separate sysprops
|
||||
runner.systemProperty('tests.cluster', "${-> nodes[0].transportUri()}")
|
||||
|
||||
// dump errors and warnings from cluster log on failure
|
||||
TaskExecutionAdapter logDumpListener = new TaskExecutionAdapter() {
|
||||
@Override
|
||||
void afterExecute(Task task, TaskState state) {
|
||||
if (state.failure != null) {
|
||||
for (NodeInfo nodeInfo : nodes) {
|
||||
printLogExcerpt(nodeInfo)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
runner.doFirst {
|
||||
project.gradle.addListener(logDumpListener)
|
||||
}
|
||||
runner.doLast {
|
||||
project.gradle.removeListener(logDumpListener)
|
||||
}
|
||||
|
||||
// copy the rest spec/tests into the test resources
|
||||
RestSpecHack.configureDependencies(project)
|
||||
project.afterEvaluate {
|
||||
@ -126,4 +149,42 @@ public class RestIntegTestTask extends DefaultTask {
|
||||
public Task mustRunAfter(Object... tasks) {
|
||||
clusterInit.mustRunAfter(tasks)
|
||||
}
|
||||
|
||||
/** Print out an excerpt of the log from the given node. */
|
||||
protected static void printLogExcerpt(NodeInfo nodeInfo) {
|
||||
File logFile = new File(nodeInfo.homeDir, "logs/${nodeInfo.clusterName}.log")
|
||||
println("\nCluster ${nodeInfo.clusterName} - node ${nodeInfo.nodeNum} log excerpt:")
|
||||
println("(full log at ${logFile})")
|
||||
println('-----------------------------------------')
|
||||
Stream<String> stream = Files.lines(logFile.toPath(), StandardCharsets.UTF_8)
|
||||
try {
|
||||
boolean inStartup = true
|
||||
boolean inExcerpt = false
|
||||
int linesSkipped = 0
|
||||
for (String line : stream) {
|
||||
if (line.startsWith("[")) {
|
||||
inExcerpt = false // clear with the next log message
|
||||
}
|
||||
if (line =~ /(\[WARN\])|(\[ERROR\])/) {
|
||||
inExcerpt = true // show warnings and errors
|
||||
}
|
||||
if (inStartup || inExcerpt) {
|
||||
if (linesSkipped != 0) {
|
||||
println("... SKIPPED ${linesSkipped} LINES ...")
|
||||
}
|
||||
println(line)
|
||||
linesSkipped = 0
|
||||
} else {
|
||||
++linesSkipped
|
||||
}
|
||||
if (line =~ /recovered \[\d+\] indices into cluster_state/) {
|
||||
inStartup = false
|
||||
}
|
||||
}
|
||||
} finally {
|
||||
stream.close()
|
||||
}
|
||||
println('=========================================')
|
||||
|
||||
}
|
||||
}
|
||||
|
@ -19,10 +19,9 @@
|
||||
package org.elasticsearch.gradle.vagrant
|
||||
|
||||
import com.carrotsearch.gradle.junit4.LoggingOutputStream
|
||||
import groovy.transform.PackageScope
|
||||
import org.elasticsearch.gradle.ProgressLogger
|
||||
import org.gradle.api.GradleScriptException
|
||||
import org.gradle.api.logging.Logger
|
||||
import org.gradle.internal.logging.progress.ProgressLogger
|
||||
|
||||
import java.util.regex.Matcher
|
||||
|
||||
@ -48,7 +47,7 @@ public class TapLoggerOutputStream extends LoggingOutputStream {
|
||||
|
||||
TapLoggerOutputStream(Map args) {
|
||||
logger = args.logger
|
||||
progressLogger = new ProgressLogger(args.factory.newOperation(VagrantLoggerOutputStream))
|
||||
progressLogger = args.factory.newOperation(VagrantLoggerOutputStream)
|
||||
progressLogger.setDescription("TAP output for `${args.command}`")
|
||||
}
|
||||
|
||||
|
@ -19,15 +19,17 @@
|
||||
package org.elasticsearch.gradle.vagrant
|
||||
|
||||
import org.apache.commons.io.output.TeeOutputStream
|
||||
import org.elasticsearch.gradle.ProgressLoggerFactoryInjection
|
||||
import org.elasticsearch.gradle.LoggedExec
|
||||
import org.gradle.api.tasks.Input
|
||||
import org.gradle.internal.logging.progress.ProgressLoggerFactory
|
||||
|
||||
import javax.inject.Inject
|
||||
|
||||
/**
|
||||
* Runs a vagrant command. Pretty much like Exec task but with a nicer output
|
||||
* formatter and defaults to `vagrant` as first part of commandLine.
|
||||
*/
|
||||
public class VagrantCommandTask extends LoggedExec implements ProgressLoggerFactoryInjection {
|
||||
public class VagrantCommandTask extends LoggedExec {
|
||||
|
||||
@Input
|
||||
String boxName
|
||||
@ -47,6 +49,11 @@ public class VagrantCommandTask extends LoggedExec implements ProgressLoggerFact
|
||||
}
|
||||
}
|
||||
|
||||
@Inject
|
||||
ProgressLoggerFactory getProgressLoggerFactory() {
|
||||
throw new UnsupportedOperationException()
|
||||
}
|
||||
|
||||
protected OutputStream createLoggerOutputStream() {
|
||||
return new VagrantLoggerOutputStream(
|
||||
command: commandLine.join(' '),
|
||||
|
@ -19,7 +19,7 @@
|
||||
package org.elasticsearch.gradle.vagrant
|
||||
|
||||
import com.carrotsearch.gradle.junit4.LoggingOutputStream
|
||||
import org.elasticsearch.gradle.ProgressLogger
|
||||
import org.gradle.internal.logging.progress.ProgressLogger
|
||||
|
||||
/**
|
||||
* Adapts an OutputStream being written to by vagrant into a ProcessLogger. It
|
||||
@ -53,7 +53,7 @@ public class VagrantLoggerOutputStream extends LoggingOutputStream {
|
||||
private String heading = ''
|
||||
|
||||
VagrantLoggerOutputStream(Map args) {
|
||||
progressLogger = new ProgressLogger(args.factory.newOperation(VagrantLoggerOutputStream))
|
||||
progressLogger = args.factory.newOperation(VagrantLoggerOutputStream)
|
||||
progressLogger.setDescription("Vagrant output for `$args.command`")
|
||||
squashedPrefix = args.squashedPrefix
|
||||
}
|
||||
|
@ -22,7 +22,7 @@
|
||||
suppress the check there but enforce it everywhere else. This prevents the list from getting longer even if it is
|
||||
unfair. -->
|
||||
<module name="LineLength">
|
||||
<property name="max" value="140"/>
|
||||
<property name="max" value="100"/>
|
||||
</module>
|
||||
|
||||
<module name="AvoidStarImport" />
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -1,6 +1,5 @@
|
||||
eclipse.preferences.version=1
|
||||
|
||||
# previous configuration from maven build
|
||||
# this is merged with gradle's generated properties during 'gradle eclipse'
|
||||
|
||||
# NOTE: null pointer analysis etc is not enabled currently, it seems very unstable
|
||||
@ -17,6 +16,6 @@ eclipse.preferences.version=1
|
||||
# org.eclipse.jdt.core.compiler.problem.potentialNullReference=warning
|
||||
|
||||
org.eclipse.jdt.core.compiler.problem.forbiddenReference=warning
|
||||
org.eclipse.jdt.core.formatter.lineSplit=140
|
||||
org.eclipse.jdt.core.formatter.lineSplit=100
|
||||
org.eclipse.jdt.core.formatter.tabulation.char=space
|
||||
org.eclipse.jdt.core.formatter.tabulation.size=4
|
||||
|
@ -10,7 +10,7 @@ snakeyaml = 1.15
|
||||
# When updating log4j, please update also docs/java-api/index.asciidoc
|
||||
log4j = 2.7
|
||||
slf4j = 1.6.2
|
||||
jna = 4.2.2
|
||||
jna = 4.4.0
|
||||
|
||||
# test dependencies
|
||||
randomizedrunner = 2.5.0
|
||||
|
@ -56,35 +56,35 @@ public class DeleteDocumentationIT extends ESRestHighLevelClientTestCase {
|
||||
public void testDelete() throws IOException {
|
||||
RestHighLevelClient client = highLevelClient();
|
||||
|
||||
// tag::delete-request[]
|
||||
// tag::delete-request
|
||||
DeleteRequest request = new DeleteRequest(
|
||||
"index", // <1>
|
||||
"type", // <2>
|
||||
"id"); // <3>
|
||||
// end::delete-request[]
|
||||
// end::delete-request
|
||||
|
||||
// tag::delete-request-props[]
|
||||
// tag::delete-request-props
|
||||
request.timeout(TimeValue.timeValueSeconds(1)); // <1>
|
||||
request.timeout("1s"); // <2>
|
||||
request.setRefreshPolicy(WriteRequest.RefreshPolicy.WAIT_UNTIL); // <3>
|
||||
request.setRefreshPolicy("wait_for"); // <4>
|
||||
request.version(2); // <5>
|
||||
request.versionType(VersionType.EXTERNAL); // <6>
|
||||
// end::delete-request-props[]
|
||||
// end::delete-request-props
|
||||
|
||||
// tag::delete-execute[]
|
||||
// tag::delete-execute
|
||||
DeleteResponse response = client.delete(request);
|
||||
// end::delete-execute[]
|
||||
// end::delete-execute
|
||||
|
||||
try {
|
||||
// tag::delete-notfound[]
|
||||
// tag::delete-notfound
|
||||
if (response.getResult().equals(DocWriteResponse.Result.NOT_FOUND)) {
|
||||
throw new Exception("Can't find document to be removed"); // <1>
|
||||
}
|
||||
// end::delete-notfound[]
|
||||
// end::delete-notfound
|
||||
} catch (Exception ignored) { }
|
||||
|
||||
// tag::delete-execute-async[]
|
||||
// tag::delete-execute-async
|
||||
client.deleteAsync(request, new ActionListener<DeleteResponse>() {
|
||||
@Override
|
||||
public void onResponse(DeleteResponse deleteResponse) {
|
||||
@ -96,9 +96,9 @@ public class DeleteDocumentationIT extends ESRestHighLevelClientTestCase {
|
||||
// <2>
|
||||
}
|
||||
});
|
||||
// end::delete-execute-async[]
|
||||
// end::delete-execute-async
|
||||
|
||||
// tag::delete-conflict[]
|
||||
// tag::delete-conflict
|
||||
try {
|
||||
client.delete(request);
|
||||
} catch (ElasticsearchException exception) {
|
||||
@ -106,7 +106,7 @@ public class DeleteDocumentationIT extends ESRestHighLevelClientTestCase {
|
||||
// <1>
|
||||
}
|
||||
}
|
||||
// end::delete-conflict[]
|
||||
// end::delete-conflict
|
||||
|
||||
}
|
||||
}
|
||||
|
@ -1 +0,0 @@
|
||||
5012450aee579c3118ff09461d5ce210e0cdc2a9
|
1
core/licenses/jna-4.4.0.jar.sha1
Normal file
1
core/licenses/jna-4.4.0.jar.sha1
Normal file
@ -0,0 +1 @@
|
||||
cb208278274bf12ebdb56c61bd7407e6f774d65a
|
171
core/src/main/java/org/apache/lucene/search/uhighlight/BoundedBreakIteratorScanner.java
Normal file
171
core/src/main/java/org/apache/lucene/search/uhighlight/BoundedBreakIteratorScanner.java
Normal file
@ -0,0 +1,171 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.apache.lucene.search.uhighlight;
|
||||
|
||||
import java.text.BreakIterator;
|
||||
import java.text.CharacterIterator;
|
||||
import java.util.Locale;
|
||||
|
||||
/**
|
||||
* A custom break iterator that scans text to find break-delimited passages bounded by
|
||||
* a provided maximum length. This class delegates the boundary search to a first level
|
||||
* break iterator. When this break iterator finds a passage greater than the maximum length
|
||||
* a secondary break iterator is used to re-split the passage at the first boundary after
|
||||
* maximum length.
|
||||
* This is useful to split passages created by {@link BreakIterator}s like `sentence` that
|
||||
* can create big outliers on semi-structured text.
|
||||
*
|
||||
* WARNING: This break iterator is designed to work with the {@link UnifiedHighlighter}.
|
||||
**/
|
||||
public class BoundedBreakIteratorScanner extends BreakIterator {
|
||||
private final BreakIterator mainBreak;
|
||||
private final BreakIterator innerBreak;
|
||||
private final int maxLen;
|
||||
|
||||
private int lastPrecedingOffset = -1;
|
||||
private int windowStart = -1;
|
||||
private int windowEnd = -1;
|
||||
private int innerStart = -1;
|
||||
private int innerEnd = 0;
|
||||
|
||||
private BoundedBreakIteratorScanner(BreakIterator mainBreak,
|
||||
BreakIterator innerBreak,
|
||||
int maxLen) {
|
||||
this.mainBreak = mainBreak;
|
||||
this.innerBreak = innerBreak;
|
||||
this.maxLen = maxLen;
|
||||
}
|
||||
|
||||
@Override
|
||||
public CharacterIterator getText() {
|
||||
return mainBreak.getText();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setText(CharacterIterator newText) {
|
||||
reset();
|
||||
mainBreak.setText(newText);
|
||||
innerBreak.setText(newText);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setText(String newText) {
|
||||
reset();
|
||||
mainBreak.setText(newText);
|
||||
innerBreak.setText(newText);
|
||||
}
|
||||
|
||||
private void reset() {
|
||||
lastPrecedingOffset = -1;
|
||||
windowStart = -1;
|
||||
windowEnd = -1;
|
||||
innerStart = -1;
|
||||
innerEnd = 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Must be called with increasing offset. See {@link FieldHighlighter} for usage.
|
||||
*/
|
||||
@Override
|
||||
public int preceding(int offset) {
|
||||
if (offset < lastPrecedingOffset) {
|
||||
throw new IllegalArgumentException("offset < lastPrecedingOffset: " +
|
||||
"usage doesn't look like UnifiedHighlighter");
|
||||
}
|
||||
if (offset > windowStart && offset < windowEnd) {
|
||||
innerStart = innerEnd;
|
||||
innerEnd = windowEnd;
|
||||
} else {
|
||||
windowStart = innerStart = mainBreak.preceding(offset);
|
||||
windowEnd = innerEnd = mainBreak.following(offset-1);
|
||||
}
|
||||
|
||||
if (innerEnd - innerStart > maxLen) {
|
||||
// the current split is too big,
|
||||
// so starting from the current term we try to find boundaries on the left first
|
||||
if (offset - maxLen > innerStart) {
|
||||
innerStart = Math.max(innerStart,
|
||||
innerBreak.preceding(offset - maxLen));
|
||||
}
|
||||
// and then we try to expand the passage to the right with the remaining size
|
||||
int remaining = Math.max(0, maxLen - (offset - innerStart));
|
||||
if (offset + remaining < windowEnd) {
|
||||
innerEnd = Math.min(windowEnd,
|
||||
innerBreak.following(offset + remaining));
|
||||
}
|
||||
}
|
||||
lastPrecedingOffset = offset - 1;
|
||||
return innerStart;
|
||||
}
|
||||
|
||||
/**
|
||||
* Can be invoked only after a call to preceding(offset+1).
|
||||
* See {@link FieldHighlighter} for usage.
|
||||
*/
|
||||
@Override
|
||||
public int following(int offset) {
|
||||
if (offset != lastPrecedingOffset || innerEnd == -1) {
|
||||
throw new IllegalArgumentException("offset != lastPrecedingOffset: " +
|
||||
"usage doesn't look like UnifiedHighlighter");
|
||||
}
|
||||
return innerEnd;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns a {@link BreakIterator#getSentenceInstance(Locale)} bounded to maxLen.
|
||||
* Secondary boundaries are found using a {@link BreakIterator#getWordInstance(Locale)}.
|
||||
*/
|
||||
public static BreakIterator getSentence(Locale locale, int maxLen) {
|
||||
final BreakIterator sBreak = BreakIterator.getSentenceInstance(locale);
|
||||
final BreakIterator wBreak = BreakIterator.getWordInstance(locale);
|
||||
return new BoundedBreakIteratorScanner(sBreak, wBreak, maxLen);
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public int current() {
|
||||
// Returns the last offset of the current split
|
||||
return this.innerEnd;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int first() {
|
||||
throw new IllegalStateException("first() should not be called in this context");
|
||||
}
|
||||
|
||||
@Override
|
||||
public int next() {
|
||||
throw new IllegalStateException("next() should not be called in this context");
|
||||
}
|
||||
|
||||
@Override
|
||||
public int last() {
|
||||
throw new IllegalStateException("last() should not be called in this context");
|
||||
}
|
||||
|
||||
@Override
|
||||
public int next(int n) {
|
||||
throw new IllegalStateException("next(n) should not be called in this context");
|
||||
}
|
||||
|
||||
@Override
|
||||
public int previous() {
|
||||
throw new IllegalStateException("previous() should not be called in this context");
|
||||
}
|
||||
}
|
@ -0,0 +1,79 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.apache.lucene.search.uhighlight;
|
||||
|
||||
import java.text.BreakIterator;
|
||||
import java.util.Locale;
|
||||
|
||||
import static org.apache.lucene.search.uhighlight.CustomUnifiedHighlighter.MULTIVAL_SEP_CHAR;
|
||||
|
||||
/**
|
||||
* Custom {@link FieldHighlighter} that creates a single passage bounded to {@code noMatchSize} when
|
||||
* no highlights were found.
|
||||
*/
|
||||
class CustomFieldHighlighter extends FieldHighlighter {
|
||||
private static final Passage[] EMPTY_PASSAGE = new Passage[0];
|
||||
|
||||
private final Locale breakIteratorLocale;
|
||||
private final int noMatchSize;
|
||||
private final String fieldValue;
|
||||
|
||||
CustomFieldHighlighter(String field, FieldOffsetStrategy fieldOffsetStrategy,
|
||||
Locale breakIteratorLocale, BreakIterator breakIterator,
|
||||
PassageScorer passageScorer, int maxPassages, int maxNoHighlightPassages,
|
||||
PassageFormatter passageFormatter, int noMatchSize, String fieldValue) {
|
||||
super(field, fieldOffsetStrategy, breakIterator, passageScorer, maxPassages,
|
||||
maxNoHighlightPassages, passageFormatter);
|
||||
this.breakIteratorLocale = breakIteratorLocale;
|
||||
this.noMatchSize = noMatchSize;
|
||||
this.fieldValue = fieldValue;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Passage[] getSummaryPassagesNoHighlight(int maxPassages) {
|
||||
if (noMatchSize > 0) {
|
||||
int pos = 0;
|
||||
while (pos < fieldValue.length() && fieldValue.charAt(pos) == MULTIVAL_SEP_CHAR) {
|
||||
pos ++;
|
||||
}
|
||||
if (pos < fieldValue.length()) {
|
||||
int end = fieldValue.indexOf(MULTIVAL_SEP_CHAR, pos);
|
||||
if (end == -1) {
|
||||
end = fieldValue.length();
|
||||
}
|
||||
if (noMatchSize+pos < end) {
|
||||
BreakIterator bi = BreakIterator.getWordInstance(breakIteratorLocale);
|
||||
bi.setText(fieldValue);
|
||||
// Finds the next word boundary **after** noMatchSize.
|
||||
end = bi.following(noMatchSize + pos);
|
||||
if (end == BreakIterator.DONE) {
|
||||
end = fieldValue.length();
|
||||
}
|
||||
}
|
||||
Passage passage = new Passage();
|
||||
passage.setScore(Float.NaN);
|
||||
passage.setStartOffset(pos);
|
||||
passage.setEndOffset(end);
|
||||
return new Passage[]{passage};
|
||||
}
|
||||
}
|
||||
return EMPTY_PASSAGE;
|
||||
}
|
||||
}
|
@ -33,6 +33,8 @@ import org.apache.lucene.search.spans.SpanNearQuery;
|
||||
import org.apache.lucene.search.spans.SpanOrQuery;
|
||||
import org.apache.lucene.search.spans.SpanQuery;
|
||||
import org.apache.lucene.search.spans.SpanTermQuery;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.apache.lucene.util.automaton.CharacterRunAutomaton;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.lucene.all.AllTermQuery;
|
||||
import org.elasticsearch.common.lucene.search.MultiPhrasePrefixQuery;
|
||||
@ -47,6 +49,7 @@ import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.Locale;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
||||
/**
|
||||
* Subclass of the {@link UnifiedHighlighter} that works for a single field in a single document.
|
||||
@ -57,37 +60,41 @@ import java.util.Map;
|
||||
* Supports both returning empty snippets and non highlighted snippets when no highlighting can be performed.
|
||||
*/
|
||||
public class CustomUnifiedHighlighter extends UnifiedHighlighter {
|
||||
public static final char MULTIVAL_SEP_CHAR = (char) 0;
|
||||
private static final Snippet[] EMPTY_SNIPPET = new Snippet[0];
|
||||
|
||||
private final String fieldValue;
|
||||
private final PassageFormatter passageFormatter;
|
||||
private final BreakIterator breakIterator;
|
||||
private final boolean returnNonHighlightedSnippets;
|
||||
private final Locale breakIteratorLocale;
|
||||
private final int noMatchSize;
|
||||
|
||||
/**
|
||||
* Creates a new instance of {@link CustomUnifiedHighlighter}
|
||||
*
|
||||
* @param analyzer the analyzer used for the field at index time, used for multi term queries internally
|
||||
* @param passageFormatter our own {@link CustomPassageFormatter}
|
||||
* which generates snippets in forms of {@link Snippet} objects
|
||||
* which generates snippets in forms of {@link Snippet} objects
|
||||
* @param breakIteratorLocale the {@link Locale} to use for dividing text into passages.
|
||||
* If null {@link Locale#ROOT} is used
|
||||
* @param breakIterator the {@link BreakIterator} to use for dividing text into passages.
|
||||
* If null {@link BreakIterator#getSentenceInstance(Locale)} is used.
|
||||
* @param fieldValue the original field values as constructor argument, loaded from the _source field or
|
||||
* the relevant stored field.
|
||||
* @param returnNonHighlightedSnippets whether non highlighted snippets should be
|
||||
* returned rather than empty snippets when no highlighting can be performed
|
||||
* If null {@link BreakIterator#getSentenceInstance(Locale)} is used.
|
||||
* @param fieldValue the original field values delimited by MULTIVAL_SEP_CHAR
|
||||
* @param noMatchSize The size of the text that should be returned when no highlighting can be performed
|
||||
*/
|
||||
public CustomUnifiedHighlighter(IndexSearcher searcher,
|
||||
Analyzer analyzer,
|
||||
PassageFormatter passageFormatter,
|
||||
@Nullable Locale breakIteratorLocale,
|
||||
@Nullable BreakIterator breakIterator,
|
||||
String fieldValue,
|
||||
boolean returnNonHighlightedSnippets) {
|
||||
int noMatchSize) {
|
||||
super(searcher, analyzer);
|
||||
this.breakIterator = breakIterator;
|
||||
this.breakIteratorLocale = breakIteratorLocale == null ? Locale.ROOT : breakIteratorLocale;
|
||||
this.passageFormatter = passageFormatter;
|
||||
this.fieldValue = fieldValue;
|
||||
this.returnNonHighlightedSnippets = returnNonHighlightedSnippets;
|
||||
this.noMatchSize = noMatchSize;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -111,16 +118,13 @@ public class CustomUnifiedHighlighter extends UnifiedHighlighter {
|
||||
@Override
|
||||
protected List<CharSequence[]> loadFieldValues(String[] fields, DocIdSetIterator docIter,
|
||||
int cacheCharsThreshold) throws IOException {
|
||||
//we only highlight one field, one document at a time
|
||||
// we only highlight one field, one document at a time
|
||||
return Collections.singletonList(new String[]{fieldValue});
|
||||
}
|
||||
|
||||
@Override
|
||||
protected BreakIterator getBreakIterator(String field) {
|
||||
if (breakIterator != null) {
|
||||
return breakIterator;
|
||||
}
|
||||
return super.getBreakIterator(field);
|
||||
return breakIterator;
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -129,11 +133,18 @@ public class CustomUnifiedHighlighter extends UnifiedHighlighter {
|
||||
}
|
||||
|
||||
@Override
|
||||
protected int getMaxNoHighlightPassages(String field) {
|
||||
if (returnNonHighlightedSnippets) {
|
||||
return 1;
|
||||
}
|
||||
return 0;
|
||||
protected FieldHighlighter getFieldHighlighter(String field, Query query, Set<Term> allTerms, int maxPassages) {
|
||||
BytesRef[] terms = filterExtractedTerms(getFieldMatcher(field), allTerms);
|
||||
Set<HighlightFlag> highlightFlags = getFlags(field);
|
||||
PhraseHelper phraseHelper = getPhraseHelper(field, query, highlightFlags);
|
||||
CharacterRunAutomaton[] automata = getAutomata(field, query, highlightFlags);
|
||||
OffsetSource offsetSource = getOptimizedOffsetSource(field, terms, phraseHelper, automata);
|
||||
BreakIterator breakIterator = new SplittingBreakIterator(getBreakIterator(field),
|
||||
UnifiedHighlighter.MULTIVAL_SEP_CHAR);
|
||||
FieldOffsetStrategy strategy =
|
||||
getOffsetStrategy(offsetSource, field, terms, phraseHelper, automata, highlightFlags);
|
||||
return new CustomFieldHighlighter(field, strategy, breakIteratorLocale, breakIterator,
|
||||
getScorer(field), maxPassages, (noMatchSize > 0 ? 1 : 0), getFormatter(field), noMatchSize, fieldValue);
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -146,7 +157,6 @@ public class CustomUnifiedHighlighter extends UnifiedHighlighter {
|
||||
return rewriteCustomQuery(query);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Translate custom queries in queries that are supported by the unified highlighter.
|
||||
*/
|
||||
|
@ -19,6 +19,7 @@
|
||||
|
||||
package org.elasticsearch.action.admin.indices.validate.query;
|
||||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.io.stream.Streamable;
|
||||
@ -27,20 +28,26 @@ import java.io.IOException;
|
||||
|
||||
public class QueryExplanation implements Streamable {
|
||||
|
||||
public static final int RANDOM_SHARD = -1;
|
||||
|
||||
private String index;
|
||||
|
||||
|
||||
private int shard = RANDOM_SHARD;
|
||||
|
||||
private boolean valid;
|
||||
|
||||
|
||||
private String explanation;
|
||||
|
||||
|
||||
private String error;
|
||||
|
||||
QueryExplanation() {
|
||||
|
||||
|
||||
}
|
||||
|
||||
public QueryExplanation(String index, boolean valid, String explanation, String error) {
|
||||
|
||||
public QueryExplanation(String index, int shard, boolean valid, String explanation,
|
||||
String error) {
|
||||
this.index = index;
|
||||
this.shard = shard;
|
||||
this.valid = valid;
|
||||
this.explanation = explanation;
|
||||
this.error = error;
|
||||
@ -50,6 +57,10 @@ public class QueryExplanation implements Streamable {
|
||||
return this.index;
|
||||
}
|
||||
|
||||
public int getShard() {
|
||||
return this.shard;
|
||||
}
|
||||
|
||||
public boolean isValid() {
|
||||
return this.valid;
|
||||
}
|
||||
@ -65,6 +76,11 @@ public class QueryExplanation implements Streamable {
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
index = in.readString();
|
||||
if (in.getVersion().onOrAfter(Version.V_5_4_0_UNRELEASED)) {
|
||||
shard = in.readInt();
|
||||
} else {
|
||||
shard = RANDOM_SHARD;
|
||||
}
|
||||
valid = in.readBoolean();
|
||||
explanation = in.readOptionalString();
|
||||
error = in.readOptionalString();
|
||||
@ -73,6 +89,9 @@ public class QueryExplanation implements Streamable {
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
out.writeString(index);
|
||||
if (out.getVersion().onOrAfter(Version.V_5_4_0_UNRELEASED)) {
|
||||
out.writeInt(shard);
|
||||
}
|
||||
out.writeBoolean(valid);
|
||||
out.writeOptionalString(explanation);
|
||||
out.writeOptionalString(error);
|
||||
|
@ -89,8 +89,14 @@ public class TransportValidateQueryAction extends TransportBroadcastAction<Valid
|
||||
|
||||
@Override
|
||||
protected GroupShardsIterator shards(ClusterState clusterState, ValidateQueryRequest request, String[] concreteIndices) {
|
||||
// Hard-code routing to limit request to a single shard, but still, randomize it...
|
||||
Map<String, Set<String>> routingMap = indexNameExpressionResolver.resolveSearchRouting(clusterState, Integer.toString(Randomness.get().nextInt(1000)), request.indices());
|
||||
final String routing;
|
||||
if (request.allShards()) {
|
||||
routing = null;
|
||||
} else {
|
||||
// Random routing to limit request to a single shard
|
||||
routing = Integer.toString(Randomness.get().nextInt(1000));
|
||||
}
|
||||
Map<String, Set<String>> routingMap = indexNameExpressionResolver.resolveSearchRouting(clusterState, routing, request.indices());
|
||||
return clusterService.operationRouting().searchShards(clusterState, concreteIndices, routingMap, "_local");
|
||||
}
|
||||
|
||||
@ -124,12 +130,13 @@ public class TransportValidateQueryAction extends TransportBroadcastAction<Valid
|
||||
} else {
|
||||
ShardValidateQueryResponse validateQueryResponse = (ShardValidateQueryResponse) shardResponse;
|
||||
valid = valid && validateQueryResponse.isValid();
|
||||
if (request.explain() || request.rewrite()) {
|
||||
if (request.explain() || request.rewrite() || request.allShards()) {
|
||||
if (queryExplanations == null) {
|
||||
queryExplanations = new ArrayList<>();
|
||||
}
|
||||
queryExplanations.add(new QueryExplanation(
|
||||
validateQueryResponse.getIndex(),
|
||||
request.allShards() ? validateQueryResponse.getShardId().getId() : QueryExplanation.RANDOM_SHARD,
|
||||
validateQueryResponse.isValid(),
|
||||
validateQueryResponse.getExplanation(),
|
||||
validateQueryResponse.getError()
|
||||
|
24
core/src/main/java/org/elasticsearch/action/admin/indices/validate/query/ValidateQueryRequest.java
24
core/src/main/java/org/elasticsearch/action/admin/indices/validate/query/ValidateQueryRequest.java
@ -19,6 +19,7 @@
|
||||
|
||||
package org.elasticsearch.action.admin.indices.validate.query;
|
||||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.ActionRequestValidationException;
|
||||
import org.elasticsearch.action.ValidateActions;
|
||||
import org.elasticsearch.action.support.IndicesOptions;
|
||||
@ -43,6 +44,7 @@ public class ValidateQueryRequest extends BroadcastRequest<ValidateQueryRequest>
|
||||
|
||||
private boolean explain;
|
||||
private boolean rewrite;
|
||||
private boolean allShards;
|
||||
|
||||
private String[] types = Strings.EMPTY_ARRAY;
|
||||
|
||||
@ -125,6 +127,20 @@ public class ValidateQueryRequest extends BroadcastRequest<ValidateQueryRequest>
|
||||
return rewrite;
|
||||
}
|
||||
|
||||
/**
|
||||
* Indicates whether the query should be validated on all shards instead of one random shard
|
||||
*/
|
||||
public void allShards(boolean allShards) {
|
||||
this.allShards = allShards;
|
||||
}
|
||||
|
||||
/**
|
||||
* Indicates whether the query should be validated on all shards instead of one random shard
|
||||
*/
|
||||
public boolean allShards() {
|
||||
return allShards;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
@ -138,6 +154,9 @@ public class ValidateQueryRequest extends BroadcastRequest<ValidateQueryRequest>
|
||||
}
|
||||
explain = in.readBoolean();
|
||||
rewrite = in.readBoolean();
|
||||
if (in.getVersion().onOrAfter(Version.V_5_4_0_UNRELEASED)) {
|
||||
allShards = in.readBoolean();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -150,11 +169,14 @@ public class ValidateQueryRequest extends BroadcastRequest<ValidateQueryRequest>
|
||||
}
|
||||
out.writeBoolean(explain);
|
||||
out.writeBoolean(rewrite);
|
||||
if (out.getVersion().onOrAfter(Version.V_5_4_0_UNRELEASED)) {
|
||||
out.writeBoolean(allShards);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "[" + Arrays.toString(indices) + "]" + Arrays.toString(types) + ", query[" + query + "], explain:" + explain +
|
||||
", rewrite:" + rewrite;
|
||||
", rewrite:" + rewrite + ", all_shards:" + allShards;
|
||||
}
|
||||
}
|
||||
|
@ -64,4 +64,12 @@ public class ValidateQueryRequestBuilder extends BroadcastOperationRequestBuilde
|
||||
request.rewrite(rewrite);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Indicates whether the query should be validated on all shards
|
||||
*/
|
||||
public ValidateQueryRequestBuilder setAllShards(boolean rewrite) {
|
||||
request.allShards(rewrite);
|
||||
return this;
|
||||
}
|
||||
}
|
||||
|
@ -465,14 +465,18 @@ public abstract class AbstractAsyncBulkByScrollAction<Request extends AbstractBu
|
||||
* @param searchFailures any search failures accumulated during the request
|
||||
* @param timedOut have any of the sub-requests timed out?
|
||||
*/
|
||||
protected void finishHim(Exception failure, List<Failure> indexingFailures, List<SearchFailure> searchFailures, boolean timedOut) {
|
||||
scrollSource.close();
|
||||
if (failure == null) {
|
||||
listener.onResponse(
|
||||
buildResponse(timeValueNanos(System.nanoTime() - startTime.get()), indexingFailures, searchFailures, timedOut));
|
||||
} else {
|
||||
listener.onFailure(failure);
|
||||
}
|
||||
protected void finishHim(Exception failure, List<Failure> indexingFailures,
|
||||
List<SearchFailure> searchFailures, boolean timedOut) {
|
||||
scrollSource.close(() -> {
|
||||
if (failure == null) {
|
||||
BulkByScrollResponse response = buildResponse(
|
||||
timeValueNanos(System.nanoTime() - startTime.get()),
|
||||
indexingFailures, searchFailures, timedOut);
|
||||
listener.onResponse(response);
|
||||
} else {
|
||||
listener.onFailure(failure);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -113,8 +113,8 @@ public class ClientScrollableHitSource extends ScrollableHitSource {
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void cleanup() {
|
||||
// Nothing to do
|
||||
protected void cleanup(Runnable onCompletion) {
|
||||
onCompletion.run();
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -47,7 +47,7 @@ import static java.util.Objects.requireNonNull;
|
||||
/**
|
||||
* A scrollable source of results.
|
||||
*/
|
||||
public abstract class ScrollableHitSource implements Closeable {
|
||||
public abstract class ScrollableHitSource {
|
||||
private final AtomicReference<String> scrollId = new AtomicReference<>();
|
||||
|
||||
protected final Logger logger;
|
||||
@ -82,25 +82,31 @@ public abstract class ScrollableHitSource implements Closeable {
|
||||
}
|
||||
protected abstract void doStartNextScroll(String scrollId, TimeValue extraKeepAlive, Consumer<? super Response> onResponse);
|
||||
|
||||
@Override
|
||||
public final void close() {
|
||||
public final void close(Runnable onCompletion) {
|
||||
String scrollId = this.scrollId.get();
|
||||
if (Strings.hasLength(scrollId)) {
|
||||
clearScroll(scrollId, this::cleanup);
|
||||
clearScroll(scrollId, () -> cleanup(onCompletion));
|
||||
} else {
|
||||
cleanup();
|
||||
cleanup(onCompletion);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Called to clear a scroll id.
|
||||
*
|
||||
* @param scrollId the id to clear
|
||||
* @param onCompletion implementers must call this after completing the clear whether they are successful or not
|
||||
* @param onCompletion implementers must call this after completing the clear whether they are
|
||||
* successful or not
|
||||
*/
|
||||
protected abstract void clearScroll(String scrollId, Runnable onCompletion);
|
||||
/**
|
||||
* Called after the process has been totally finished to clean up any resources the process needed like remote connections.
|
||||
* Called after the process has been totally finished to clean up any resources the process
|
||||
* needed like remote connections.
|
||||
*
|
||||
* @param onCompletion implementers must call this after completing the cleanup whether they are
|
||||
* successful or not
|
||||
*/
|
||||
protected abstract void cleanup();
|
||||
protected abstract void cleanup(Runnable onCompletion);
|
||||
|
||||
/**
|
||||
* Set the id of the last scroll. Used for debugging.
|
||||
|
@ -43,6 +43,7 @@ import org.elasticsearch.transport.Transport;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.Executor;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
import java.util.function.Function;
|
||||
import java.util.stream.Collectors;
|
||||
@ -67,17 +68,17 @@ abstract class AbstractSearchAsyncAction<Result extends SearchPhaseResult> exten
|
||||
private final SetOnce<AtomicArray<ShardSearchFailure>> shardFailures = new SetOnce<>();
|
||||
private final Object shardFailuresMutex = new Object();
|
||||
private final AtomicInteger successfulOps = new AtomicInteger();
|
||||
private final long startTime;
|
||||
private final TransportSearchAction.SearchTimeProvider timeProvider;
|
||||
|
||||
|
||||
protected AbstractSearchAsyncAction(String name, Logger logger, SearchTransportService searchTransportService,
|
||||
Function<String, Transport.Connection> nodeIdToConnection,
|
||||
Map<String, AliasFilter> aliasFilter, Map<String, Float> concreteIndexBoosts,
|
||||
Executor executor, SearchRequest request,
|
||||
ActionListener<SearchResponse> listener, GroupShardsIterator shardsIts, long startTime,
|
||||
ActionListener<SearchResponse> listener, GroupShardsIterator shardsIts, TransportSearchAction.SearchTimeProvider timeProvider,
|
||||
long clusterStateVersion, SearchTask task, SearchPhaseResults<Result> resultConsumer) {
|
||||
super(name, request, shardsIts, logger);
|
||||
this.startTime = startTime;
|
||||
this.timeProvider = timeProvider;
|
||||
this.logger = logger;
|
||||
this.searchTransportService = searchTransportService;
|
||||
this.executor = executor;
|
||||
@ -94,10 +95,9 @@ abstract class AbstractSearchAsyncAction<Result extends SearchPhaseResult> exten
|
||||
/**
|
||||
* Builds how long it took to execute the search.
|
||||
*/
|
||||
private long buildTookInMillis() {
|
||||
// protect ourselves against time going backwards
|
||||
// negative values don't make sense and we want to be able to serialize that thing as a vLong
|
||||
return Math.max(1, System.currentTimeMillis() - startTime);
|
||||
long buildTookInMillis() {
|
||||
return TimeUnit.NANOSECONDS.toMillis(
|
||||
timeProvider.getRelativeCurrentNanos() - timeProvider.getRelativeStartNanos());
|
||||
}
|
||||
|
||||
/**
|
||||
@ -122,7 +122,8 @@ abstract class AbstractSearchAsyncAction<Result extends SearchPhaseResult> exten
|
||||
if (successfulOps.get() == 0) { // we have 0 successful results that means we shortcut stuff and return a failure
|
||||
if (logger.isDebugEnabled()) {
|
||||
final ShardOperationFailedException[] shardSearchFailures = ExceptionsHelper.groupBy(buildShardFailures());
|
||||
Throwable cause = ElasticsearchException.guessRootCauses(shardSearchFailures[0].getCause())[0];
|
||||
Throwable cause = shardSearchFailures.length == 0 ? null :
|
||||
ElasticsearchException.guessRootCauses(shardSearchFailures[0].getCause())[0];
|
||||
logger.debug((Supplier<?>) () -> new ParameterizedMessage("All shards failed for phase: [{}]", getName()),
|
||||
cause);
|
||||
}
|
||||
@ -300,7 +301,7 @@ abstract class AbstractSearchAsyncAction<Result extends SearchPhaseResult> exten
|
||||
assert filter != null;
|
||||
float indexBoost = concreteIndexBoosts.getOrDefault(shard.index().getUUID(), DEFAULT_INDEX_BOOST);
|
||||
return new ShardSearchTransportRequest(request, shardIt.shardId(), getNumShards(),
|
||||
filter, indexBoost, startTime);
|
||||
filter, indexBoost, timeProvider.getAbsoluteStartMillis());
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -33,28 +33,59 @@ import java.util.concurrent.Executor;
|
||||
import java.util.function.Function;
|
||||
|
||||
final class SearchDfsQueryThenFetchAsyncAction extends AbstractSearchAsyncAction<DfsSearchResult> {
|
||||
|
||||
private final SearchPhaseController searchPhaseController;
|
||||
|
||||
SearchDfsQueryThenFetchAsyncAction(Logger logger, SearchTransportService searchTransportService,
|
||||
Function<String, Transport.Connection> nodeIdToConnection,
|
||||
Map<String, AliasFilter> aliasFilter, Map<String, Float> concreteIndexBoosts,
|
||||
SearchPhaseController searchPhaseController, Executor executor, SearchRequest request,
|
||||
ActionListener<SearchResponse> listener, GroupShardsIterator shardsIts, long startTime,
|
||||
long clusterStateVersion, SearchTask task) {
|
||||
super("dfs", logger, searchTransportService, nodeIdToConnection, aliasFilter, concreteIndexBoosts, executor,
|
||||
request, listener, shardsIts, startTime, clusterStateVersion, task, new SearchPhaseResults<>(shardsIts.size()));
|
||||
SearchDfsQueryThenFetchAsyncAction(
|
||||
final Logger logger,
|
||||
final SearchTransportService searchTransportService,
|
||||
final Function<String, Transport.Connection> nodeIdToConnection,
|
||||
final Map<String, AliasFilter> aliasFilter,
|
||||
final Map<String, Float> concreteIndexBoosts,
|
||||
final SearchPhaseController searchPhaseController,
|
||||
final Executor executor,
|
||||
final SearchRequest request,
|
||||
final ActionListener<SearchResponse> listener,
|
||||
final GroupShardsIterator shardsIts,
|
||||
final TransportSearchAction.SearchTimeProvider timeProvider,
|
||||
final long clusterStateVersion,
|
||||
final SearchTask task) {
|
||||
super(
|
||||
"dfs",
|
||||
logger,
|
||||
searchTransportService,
|
||||
nodeIdToConnection,
|
||||
aliasFilter,
|
||||
concreteIndexBoosts,
|
||||
executor,
|
||||
request,
|
||||
listener,
|
||||
shardsIts,
|
||||
timeProvider,
|
||||
clusterStateVersion,
|
||||
task,
|
||||
new SearchPhaseResults<>(shardsIts.size()));
|
||||
this.searchPhaseController = searchPhaseController;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void executePhaseOnShard(ShardIterator shardIt, ShardRouting shard, ActionListener listener) {
|
||||
protected void executePhaseOnShard(
|
||||
final ShardIterator shardIt,
|
||||
final ShardRouting shard,
|
||||
final ActionListener<DfsSearchResult> listener) {
|
||||
getSearchTransport().sendExecuteDfs(getConnection(shard.currentNodeId()),
|
||||
buildShardSearchRequest(shardIt, shard) , getTask(), listener);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected SearchPhase getNextPhase(SearchPhaseResults<DfsSearchResult> results, SearchPhaseContext context) {
|
||||
return new DfsQueryPhase(results.results, searchPhaseController,
|
||||
(queryResults) -> new FetchSearchPhase(queryResults, searchPhaseController, context), context);
|
||||
protected SearchPhase getNextPhase(
|
||||
final SearchPhaseResults<DfsSearchResult> results, final SearchPhaseContext context) {
|
||||
return new DfsQueryPhase(
|
||||
results.results,
|
||||
searchPhaseController,
|
||||
(queryResults) ->
|
||||
new FetchSearchPhase(queryResults, searchPhaseController, context),
|
||||
context);
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -32,30 +32,60 @@ import java.util.Map;
|
||||
import java.util.concurrent.Executor;
|
||||
import java.util.function.Function;
|
||||
|
||||
final class SearchQueryThenFetchAsyncAction extends AbstractSearchAsyncAction<QuerySearchResultProvider> {
|
||||
final class SearchQueryThenFetchAsyncAction
|
||||
extends AbstractSearchAsyncAction<QuerySearchResultProvider> {
|
||||
|
||||
private final SearchPhaseController searchPhaseController;
|
||||
|
||||
SearchQueryThenFetchAsyncAction(Logger logger, SearchTransportService searchTransportService,
|
||||
Function<String, Transport.Connection> nodeIdToConnection,
|
||||
Map<String, AliasFilter> aliasFilter, Map<String, Float> concreteIndexBoosts,
|
||||
SearchPhaseController searchPhaseController, Executor executor,
|
||||
SearchRequest request, ActionListener<SearchResponse> listener,
|
||||
GroupShardsIterator shardsIts, long startTime, long clusterStateVersion,
|
||||
SearchTask task) {
|
||||
super("query", logger, searchTransportService, nodeIdToConnection, aliasFilter, concreteIndexBoosts, executor,
|
||||
request, listener, shardsIts, startTime, clusterStateVersion, task,
|
||||
searchPhaseController.newSearchPhaseResults(request, shardsIts.size()));
|
||||
SearchQueryThenFetchAsyncAction(
|
||||
final Logger logger,
|
||||
final SearchTransportService searchTransportService,
|
||||
final Function<String, Transport.Connection> nodeIdToConnection,
|
||||
final Map<String, AliasFilter> aliasFilter,
|
||||
final Map<String, Float> concreteIndexBoosts,
|
||||
final SearchPhaseController searchPhaseController,
|
||||
final Executor executor,
|
||||
final SearchRequest request,
|
||||
final ActionListener<SearchResponse> listener,
|
||||
final GroupShardsIterator shardsIts,
|
||||
final TransportSearchAction.SearchTimeProvider timeProvider,
|
||||
long clusterStateVersion,
|
||||
SearchTask task) {
|
||||
super(
|
||||
"query",
|
||||
logger,
|
||||
searchTransportService,
|
||||
nodeIdToConnection,
|
||||
aliasFilter,
|
||||
concreteIndexBoosts,
|
||||
executor,
|
||||
request,
|
||||
listener,
|
||||
shardsIts,
|
||||
timeProvider,
|
||||
clusterStateVersion,
|
||||
task,
|
||||
searchPhaseController.newSearchPhaseResults(request, shardsIts.size()));
|
||||
this.searchPhaseController = searchPhaseController;
|
||||
}
|
||||
|
||||
|
||||
protected void executePhaseOnShard(ShardIterator shardIt, ShardRouting shard, ActionListener listener) {
|
||||
getSearchTransport().sendExecuteQuery(getConnection(shard.currentNodeId()),
|
||||
buildShardSearchRequest(shardIt, shard), getTask(), listener);
|
||||
protected void executePhaseOnShard(
|
||||
final ShardIterator shardIt,
|
||||
final ShardRouting shard,
|
||||
final ActionListener<QuerySearchResultProvider> listener) {
|
||||
getSearchTransport().sendExecuteQuery(
|
||||
getConnection(shard.currentNodeId()),
|
||||
buildShardSearchRequest(shardIt, shard),
|
||||
getTask(),
|
||||
listener);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected SearchPhase getNextPhase(SearchPhaseResults<QuerySearchResultProvider> results, SearchPhaseContext context) {
|
||||
protected SearchPhase getNextPhase(
|
||||
final SearchPhaseResults<QuerySearchResultProvider> results,
|
||||
final SearchPhaseContext context) {
|
||||
return new FetchSearchPhase(results, searchPhaseController, context);
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -52,6 +52,7 @@ import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.Executor;
|
||||
import java.util.function.Function;
|
||||
import java.util.function.LongSupplier;
|
||||
|
||||
import static org.elasticsearch.action.search.SearchType.QUERY_THEN_FETCH;
|
||||
|
||||
@ -116,10 +117,62 @@ public class TransportSearchAction extends HandledTransportAction<SearchRequest,
|
||||
return Collections.unmodifiableMap(concreteIndexBoosts);
|
||||
}
|
||||
|
||||
/**
|
||||
* Search operations need two clocks. One clock is to fulfill real clock needs (e.g., resolving
|
||||
* "now" to an index name). Another clock is needed for measuring how long a search operation
|
||||
* took. These two uses are at odds with each other. There are many issues with using a real
|
||||
* clock for measuring how long an operation took (they often lack precision, they are subject
|
||||
* to moving backwards due to NTP and other such complexities, etc.). There are also issues with
|
||||
* using a relative clock for reporting real time. Thus, we simply separate these two uses.
|
||||
*/
|
||||
static class SearchTimeProvider {
|
||||
|
||||
private final long absoluteStartMillis;
|
||||
private final long relativeStartNanos;
|
||||
private final LongSupplier relativeCurrentNanosProvider;
|
||||
|
||||
/**
|
||||
* Instantiates a new search time provider. The absolute start time is the real clock time
|
||||
* used for resolving index expressions that include dates. The relative start time is the
|
||||
* start of the search operation according to a relative clock. The total time the search
|
||||
* operation took can be measured against the provided relative clock and the relative start
|
||||
* time.
|
||||
*
|
||||
* @param absoluteStartMillis the absolute start time in milliseconds since the epoch
|
||||
* @param relativeStartNanos the relative start time in nanoseconds
|
||||
* @param relativeCurrentNanosProvider provides the current relative time
|
||||
*/
|
||||
SearchTimeProvider(
|
||||
final long absoluteStartMillis,
|
||||
final long relativeStartNanos,
|
||||
final LongSupplier relativeCurrentNanosProvider) {
|
||||
this.absoluteStartMillis = absoluteStartMillis;
|
||||
this.relativeStartNanos = relativeStartNanos;
|
||||
this.relativeCurrentNanosProvider = relativeCurrentNanosProvider;
|
||||
}
|
||||
|
||||
long getAbsoluteStartMillis() {
|
||||
return absoluteStartMillis;
|
||||
}
|
||||
|
||||
long getRelativeStartNanos() {
|
||||
return relativeStartNanos;
|
||||
}
|
||||
|
||||
long getRelativeCurrentNanos() {
|
||||
return relativeCurrentNanosProvider.getAsLong();
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
protected void doExecute(Task task, SearchRequest searchRequest, ActionListener<SearchResponse> listener) {
|
||||
// pure paranoia if time goes backwards we are at least positive
|
||||
final long startTimeInMillis = Math.max(0, System.currentTimeMillis());
|
||||
final long absoluteStartMillis = System.currentTimeMillis();
|
||||
final long relativeStartNanos = System.nanoTime();
|
||||
final SearchTimeProvider timeProvider =
|
||||
new SearchTimeProvider(absoluteStartMillis, relativeStartNanos, System::nanoTime);
|
||||
|
||||
final String[] localIndices;
|
||||
final Map<String, List<String>> remoteClusterIndices;
|
||||
final ClusterState clusterState = clusterService.state();
|
||||
@ -134,7 +187,7 @@ public class TransportSearchAction extends HandledTransportAction<SearchRequest,
|
||||
}
|
||||
|
||||
if (remoteClusterIndices.isEmpty()) {
|
||||
executeSearch((SearchTask)task, startTimeInMillis, searchRequest, localIndices, Collections.emptyList(),
|
||||
executeSearch((SearchTask)task, timeProvider, searchRequest, localIndices, Collections.emptyList(),
|
||||
(nodeId) -> null, clusterState, Collections.emptyMap(), listener);
|
||||
} else {
|
||||
remoteClusterService.collectSearchShards(searchRequest, remoteClusterIndices,
|
||||
@ -143,13 +196,13 @@ public class TransportSearchAction extends HandledTransportAction<SearchRequest,
|
||||
Map<String, AliasFilter> remoteAliasFilters = new HashMap<>();
|
||||
Function<String, Transport.Connection> connectionFunction = remoteClusterService.processRemoteShards(
|
||||
searchShardsResponses, remoteShardIterators, remoteAliasFilters);
|
||||
executeSearch((SearchTask)task, startTimeInMillis, searchRequest, localIndices, remoteShardIterators,
|
||||
executeSearch((SearchTask)task, timeProvider, searchRequest, localIndices, remoteShardIterators,
|
||||
connectionFunction, clusterState, remoteAliasFilters, listener);
|
||||
}, listener::onFailure));
|
||||
}
|
||||
}
|
||||
|
||||
private void executeSearch(SearchTask task, long startTimeInMillis, SearchRequest searchRequest, String[] localIndices,
|
||||
private void executeSearch(SearchTask task, SearchTimeProvider timeProvider, SearchRequest searchRequest, String[] localIndices,
|
||||
List<ShardIterator> remoteShardIterators, Function<String, Transport.Connection> remoteConnections,
|
||||
ClusterState clusterState, Map<String, AliasFilter> remoteAliasMap,
|
||||
ActionListener<SearchResponse> listener) {
|
||||
@ -163,7 +216,7 @@ public class TransportSearchAction extends HandledTransportAction<SearchRequest,
|
||||
indices = Index.EMPTY_ARRAY; // don't search on _all if only remote indices were specified
|
||||
} else {
|
||||
indices = indexNameExpressionResolver.concreteIndices(clusterState, searchRequest.indicesOptions(),
|
||||
startTimeInMillis, localIndices);
|
||||
timeProvider.getAbsoluteStartMillis(), localIndices);
|
||||
}
|
||||
Map<String, AliasFilter> aliasFilter = buildPerIndexAliasFilter(searchRequest, clusterState, indices, remoteAliasMap);
|
||||
Map<String, Set<String>> routingMap = indexNameExpressionResolver.resolveSearchRouting(clusterState, searchRequest.routing(),
|
||||
@ -211,7 +264,7 @@ public class TransportSearchAction extends HandledTransportAction<SearchRequest,
|
||||
return connection;
|
||||
};
|
||||
|
||||
searchAsyncAction(task, searchRequest, shardIterators, startTimeInMillis, connectionLookup, clusterState.version(),
|
||||
searchAsyncAction(task, searchRequest, shardIterators, timeProvider, connectionLookup, clusterState.version(),
|
||||
Collections.unmodifiableMap(aliasFilter), concreteIndexBoosts, listener).start();
|
||||
}
|
||||
|
||||
@ -236,7 +289,7 @@ public class TransportSearchAction extends HandledTransportAction<SearchRequest,
|
||||
}
|
||||
|
||||
private AbstractSearchAsyncAction searchAsyncAction(SearchTask task, SearchRequest searchRequest, GroupShardsIterator shardIterators,
|
||||
long startTime, Function<String, Transport.Connection> connectionLookup,
|
||||
SearchTimeProvider timeProvider, Function<String, Transport.Connection> connectionLookup,
|
||||
long clusterStateVersion, Map<String, AliasFilter> aliasFilter,
|
||||
Map<String, Float> concreteIndexBoosts,
|
||||
ActionListener<SearchResponse> listener) {
|
||||
@ -245,12 +298,12 @@ public class TransportSearchAction extends HandledTransportAction<SearchRequest,
|
||||
switch(searchRequest.searchType()) {
|
||||
case DFS_QUERY_THEN_FETCH:
|
||||
searchAsyncAction = new SearchDfsQueryThenFetchAsyncAction(logger, searchTransportService, connectionLookup,
|
||||
aliasFilter, concreteIndexBoosts, searchPhaseController, executor, searchRequest, listener, shardIterators, startTime,
|
||||
aliasFilter, concreteIndexBoosts, searchPhaseController, executor, searchRequest, listener, shardIterators, timeProvider,
|
||||
clusterStateVersion, task);
|
||||
break;
|
||||
case QUERY_THEN_FETCH:
|
||||
searchAsyncAction = new SearchQueryThenFetchAsyncAction(logger, searchTransportService, connectionLookup,
|
||||
aliasFilter, concreteIndexBoosts, searchPhaseController, executor, searchRequest, listener, shardIterators, startTime,
|
||||
aliasFilter, concreteIndexBoosts, searchPhaseController, executor, searchRequest, listener, shardIterators, timeProvider,
|
||||
clusterStateVersion, task);
|
||||
break;
|
||||
default:
|
||||
|
@ -38,6 +38,7 @@ public abstract class AdapterActionFuture<T, L> extends BaseFuture<T> implements
|
||||
try {
|
||||
return get();
|
||||
} catch (InterruptedException e) {
|
||||
Thread.currentThread().interrupt();
|
||||
throw new IllegalStateException("Future got interrupted", e);
|
||||
} catch (ExecutionException e) {
|
||||
throw rethrowExecutionException(e);
|
||||
@ -66,6 +67,7 @@ public abstract class AdapterActionFuture<T, L> extends BaseFuture<T> implements
|
||||
} catch (TimeoutException e) {
|
||||
throw new ElasticsearchTimeoutException(e);
|
||||
} catch (InterruptedException e) {
|
||||
Thread.currentThread().interrupt();
|
||||
throw new IllegalStateException("Future got interrupted", e);
|
||||
} catch (ExecutionException e) {
|
||||
throw rethrowExecutionException(e);
|
||||
@ -100,4 +102,5 @@ public abstract class AdapterActionFuture<T, L> extends BaseFuture<T> implements
|
||||
}
|
||||
|
||||
protected abstract T convert(L listenerResponse);
|
||||
|
||||
}
|
||||
|
@ -48,16 +48,21 @@ import java.util.regex.Matcher;
|
||||
import java.util.regex.Pattern;
|
||||
|
||||
/**
|
||||
* We enforce bootstrap checks once a node has the transport protocol bound to a non-loopback interface. In this case we assume the node is
|
||||
* running in production and all bootstrap checks must pass.
|
||||
* We enforce bootstrap checks once a node has the transport protocol bound to a non-loopback interface or if the system property {@code
|
||||
* es.enforce.bootstrap.checks} is set to {@true}. In this case we assume the node is running in production and all bootstrap checks must
|
||||
* pass.
|
||||
*/
|
||||
final class BootstrapChecks {
|
||||
|
||||
private BootstrapChecks() {
|
||||
}
|
||||
|
||||
static final String ES_ENFORCE_BOOTSTRAP_CHECKS = "es.enforce.bootstrap.checks";
|
||||
|
||||
/**
|
||||
* Executes the bootstrap checks if the node has the transport protocol bound to a non-loopback interface.
|
||||
* Executes the bootstrap checks if the node has the transport protocol bound to a non-loopback interface. If the system property
|
||||
* {@code es.enforce.bootstrap.checks} is set to {@code true} then the bootstrap checks will be enforced regardless of whether or not
|
||||
* the transport protocol is bound to a non-loopback interface.
|
||||
*
|
||||
* @param settings the current node settings
|
||||
* @param boundTransportAddress the node network bindings
|
||||
@ -74,7 +79,9 @@ final class BootstrapChecks {
|
||||
}
|
||||
|
||||
/**
|
||||
* Executes the provided checks and fails the node if {@code enforceLimits} is {@code true}, otherwise logs warnings.
|
||||
* Executes the provided checks and fails the node if {@code enforceLimits} is {@code true}, otherwise logs warnings. If the system
|
||||
* property {@code es.enforce.bootstrap.checks} is set to {@code true} then the bootstrap checks will be enforced regardless of whether
|
||||
* or not the transport protocol is bound to a non-loopback interface.
|
||||
*
|
||||
* @param enforceLimits {@code true} if the checks should be enforced or otherwise warned
|
||||
* @param checks the checks to execute
|
||||
@ -88,7 +95,9 @@ final class BootstrapChecks {
|
||||
}
|
||||
|
||||
/**
|
||||
* Executes the provided checks and fails the node if {@code enforceLimits} is {@code true}, otherwise logs warnings.
|
||||
* Executes the provided checks and fails the node if {@code enforceLimits} is {@code true}, otherwise logs warnings. If the system
|
||||
* property {@code es.enforce.bootstrap.checks }is set to {@code true} then the bootstrap checks will be enforced regardless of whether
|
||||
* or not the transport protocol is bound to a non-loopback interface.
|
||||
*
|
||||
* @param enforceLimits {@code true} if the checks should be enforced or otherwise warned
|
||||
* @param checks the checks to execute
|
||||
@ -101,13 +110,31 @@ final class BootstrapChecks {
|
||||
final List<String> errors = new ArrayList<>();
|
||||
final List<String> ignoredErrors = new ArrayList<>();
|
||||
|
||||
final String esEnforceBootstrapChecks = System.getProperty(ES_ENFORCE_BOOTSTRAP_CHECKS);
|
||||
final boolean enforceBootstrapChecks;
|
||||
if (esEnforceBootstrapChecks == null) {
|
||||
enforceBootstrapChecks = false;
|
||||
} else if (Boolean.TRUE.toString().equals(esEnforceBootstrapChecks)) {
|
||||
enforceBootstrapChecks = true;
|
||||
} else {
|
||||
final String message =
|
||||
String.format(
|
||||
Locale.ROOT,
|
||||
"[%s] must be [true] but was [%s]",
|
||||
ES_ENFORCE_BOOTSTRAP_CHECKS,
|
||||
esEnforceBootstrapChecks);
|
||||
throw new IllegalArgumentException(message);
|
||||
}
|
||||
|
||||
if (enforceLimits) {
|
||||
logger.info("bound or publishing to a non-loopback or non-link-local address, enforcing bootstrap checks");
|
||||
} else if (enforceBootstrapChecks) {
|
||||
logger.info("explicitly enforcing bootstrap checks");
|
||||
}
|
||||
|
||||
for (final BootstrapCheck check : checks) {
|
||||
if (check.check()) {
|
||||
if (!enforceLimits && !check.alwaysEnforce()) {
|
||||
if (!(enforceLimits || enforceBootstrapChecks) && !check.alwaysEnforce()) {
|
||||
ignoredErrors.add(check.errorMessage());
|
||||
} else {
|
||||
errors.add(check.errorMessage());
|
||||
@ -127,7 +154,6 @@ final class BootstrapChecks {
|
||||
errors.stream().map(IllegalStateException::new).forEach(ne::addSuppressed);
|
||||
throw ne;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
static void log(final Logger logger, final String error) {
|
||||
@ -140,9 +166,9 @@ final class BootstrapChecks {
|
||||
* @param boundTransportAddress the node network bindings
|
||||
* @return {@code true} if the checks should be enforced
|
||||
*/
|
||||
static boolean enforceLimits(BoundTransportAddress boundTransportAddress) {
|
||||
Predicate<TransportAddress> isLoopbackOrLinkLocalAddress = t -> t.address().getAddress().isLinkLocalAddress()
|
||||
|| t.address().getAddress().isLoopbackAddress();
|
||||
static boolean enforceLimits(final BoundTransportAddress boundTransportAddress) {
|
||||
Predicate<TransportAddress> isLoopbackOrLinkLocalAddress =
|
||||
t -> t.address().getAddress().isLinkLocalAddress() || t.address().getAddress().isLoopbackAddress();
|
||||
return !(Arrays.stream(boundTransportAddress.boundAddresses()).allMatch(isLoopbackOrLinkLocalAddress) &&
|
||||
isLoopbackOrLinkLocalAddress.test(boundTransportAddress.publishAddress()));
|
||||
}
|
||||
|
@ -31,6 +31,7 @@ import java.security.PermissionCollection;
|
||||
import java.security.Permissions;
|
||||
import java.security.Policy;
|
||||
import java.security.ProtectionDomain;
|
||||
import java.util.Collections;
|
||||
import java.util.Map;
|
||||
import java.util.function.Predicate;
|
||||
|
||||
@ -50,7 +51,7 @@ final class ESPolicy extends Policy {
|
||||
|
||||
ESPolicy(PermissionCollection dynamic, Map<String,Policy> plugins, boolean filterBadDefaults) {
|
||||
this.template = Security.readPolicy(getClass().getResource(POLICY_RESOURCE), JarHell.parseClassPath());
|
||||
this.untrusted = Security.readPolicy(getClass().getResource(UNTRUSTED_RESOURCE), new URL[0]);
|
||||
this.untrusted = Security.readPolicy(getClass().getResource(UNTRUSTED_RESOURCE), Collections.emptySet());
|
||||
if (filterBadDefaults) {
|
||||
this.system = new SystemPolicy(Policy.getPolicy());
|
||||
} else {
|
||||
|
@ -36,9 +36,11 @@ import java.nio.file.Path;
|
||||
import java.nio.file.SimpleFileVisitor;
|
||||
import java.nio.file.attribute.BasicFileAttributes;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.Enumeration;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.LinkedHashSet;
|
||||
import java.util.Locale;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
@ -93,7 +95,7 @@ public class JarHell {
|
||||
* @return array of URLs
|
||||
* @throws IllegalStateException if the classpath contains empty elements
|
||||
*/
|
||||
public static URL[] parseClassPath() {
|
||||
public static Set<URL> parseClassPath() {
|
||||
return parseClassPath(System.getProperty("java.class.path"));
|
||||
}
|
||||
|
||||
@ -104,13 +106,12 @@ public class JarHell {
|
||||
* @throws IllegalStateException if the classpath contains empty elements
|
||||
*/
|
||||
@SuppressForbidden(reason = "resolves against CWD because that is how classpaths work")
|
||||
static URL[] parseClassPath(String classPath) {
|
||||
static Set<URL> parseClassPath(String classPath) {
|
||||
String pathSeparator = System.getProperty("path.separator");
|
||||
String fileSeparator = System.getProperty("file.separator");
|
||||
String elements[] = classPath.split(pathSeparator);
|
||||
URL urlElements[] = new URL[elements.length];
|
||||
for (int i = 0; i < elements.length; i++) {
|
||||
String element = elements[i];
|
||||
Set<URL> urlElements = new LinkedHashSet<>(); // order is already lost, but some filesystems have it
|
||||
for (String element : elements) {
|
||||
// Technically empty classpath element behaves like CWD.
|
||||
// So below is the "correct" code, however in practice with ES, this is usually just a misconfiguration,
|
||||
// from old shell scripts left behind or something:
|
||||
@ -136,13 +137,17 @@ public class JarHell {
|
||||
}
|
||||
// now just parse as ordinary file
|
||||
try {
|
||||
urlElements[i] = PathUtils.get(element).toUri().toURL();
|
||||
URL url = PathUtils.get(element).toUri().toURL();
|
||||
if (urlElements.add(url) == false) {
|
||||
throw new IllegalStateException("jar hell!" + System.lineSeparator() +
|
||||
"duplicate jar on classpath: " + classPath);
|
||||
}
|
||||
} catch (MalformedURLException e) {
|
||||
// should not happen, as we use the filesystem API
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
}
|
||||
return urlElements;
|
||||
return Collections.unmodifiableSet(urlElements);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -150,7 +155,7 @@ public class JarHell {
|
||||
* @throws IllegalStateException if jar hell was found
|
||||
*/
|
||||
@SuppressForbidden(reason = "needs JarFile for speed, just reading entries")
|
||||
public static void checkJarHell(URL urls[]) throws URISyntaxException, IOException {
|
||||
public static void checkJarHell(Set<URL> urls) throws URISyntaxException, IOException {
|
||||
Logger logger = Loggers.getLogger(JarHell.class);
|
||||
// we don't try to be sneaky and use deprecated/internal/not portable stuff
|
||||
// like sun.boot.class.path, and with jigsaw we don't yet have a way to get
|
||||
@ -168,8 +173,8 @@ public class JarHell {
|
||||
}
|
||||
if (path.toString().endsWith(".jar")) {
|
||||
if (!seenJars.add(path)) {
|
||||
logger.debug("excluding duplicate classpath element: {}", path);
|
||||
continue;
|
||||
throw new IllegalStateException("jar hell!" + System.lineSeparator() +
|
||||
"duplicate jar on classpath: " + path);
|
||||
}
|
||||
logger.debug("examining jar: {}", path);
|
||||
try (JarFile file = new JarFile(path.toString())) {
|
||||
@ -198,8 +203,8 @@ public class JarHell {
|
||||
public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException {
|
||||
String entry = root.relativize(file).toString();
|
||||
if (entry.endsWith(".class")) {
|
||||
// normalize with the os separator
|
||||
entry = entry.replace(sep, ".").substring(0, entry.length() - 6);
|
||||
// normalize with the os separator, remove '.class'
|
||||
entry = entry.replace(sep, ".").substring(0, entry.length() - ".class".length());
|
||||
checkClass(clazzes, entry, path);
|
||||
}
|
||||
return super.visitFile(file, attrs);
|
||||
|
@ -48,8 +48,10 @@ import java.security.URIParameter;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.LinkedHashSet;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
||||
/**
|
||||
* Initializes SecurityManager with necessary permissions.
|
||||
@ -127,19 +129,23 @@ final class Security {
|
||||
@SuppressForbidden(reason = "proper use of URL")
|
||||
static Map<String,Policy> getPluginPermissions(Environment environment) throws IOException, NoSuchAlgorithmException {
|
||||
Map<String,Policy> map = new HashMap<>();
|
||||
// collect up lists of plugins and modules
|
||||
List<Path> pluginsAndModules = new ArrayList<>();
|
||||
// collect up set of plugins and modules by listing directories.
|
||||
Set<Path> pluginsAndModules = new LinkedHashSet<>(); // order is already lost, but some filesystems have it
|
||||
if (Files.exists(environment.pluginsFile())) {
|
||||
try (DirectoryStream<Path> stream = Files.newDirectoryStream(environment.pluginsFile())) {
|
||||
for (Path plugin : stream) {
|
||||
pluginsAndModules.add(plugin);
|
||||
if (pluginsAndModules.add(plugin) == false) {
|
||||
throw new IllegalStateException("duplicate plugin: " + plugin);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if (Files.exists(environment.modulesFile())) {
|
||||
try (DirectoryStream<Path> stream = Files.newDirectoryStream(environment.modulesFile())) {
|
||||
for (Path plugin : stream) {
|
||||
pluginsAndModules.add(plugin);
|
||||
for (Path module : stream) {
|
||||
if (pluginsAndModules.add(module) == false) {
|
||||
throw new IllegalStateException("duplicate module: " + module);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -149,15 +155,18 @@ final class Security {
|
||||
if (Files.exists(policyFile)) {
|
||||
// first get a list of URLs for the plugins' jars:
|
||||
// we resolve symlinks so map is keyed on the normalize codebase name
|
||||
List<URL> codebases = new ArrayList<>();
|
||||
Set<URL> codebases = new LinkedHashSet<>(); // order is already lost, but some filesystems have it
|
||||
try (DirectoryStream<Path> jarStream = Files.newDirectoryStream(plugin, "*.jar")) {
|
||||
for (Path jar : jarStream) {
|
||||
codebases.add(jar.toRealPath().toUri().toURL());
|
||||
URL url = jar.toRealPath().toUri().toURL();
|
||||
if (codebases.add(url) == false) {
|
||||
throw new IllegalStateException("duplicate module/plugin: " + url);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// parse the plugin's policy file into a set of permissions
|
||||
Policy policy = readPolicy(policyFile.toUri().toURL(), codebases.toArray(new URL[codebases.size()]));
|
||||
Policy policy = readPolicy(policyFile.toUri().toURL(), codebases);
|
||||
|
||||
// consult this policy for each of the plugin's jars:
|
||||
for (URL url : codebases) {
|
||||
@ -175,24 +184,33 @@ final class Security {
|
||||
/**
|
||||
* Reads and returns the specified {@code policyFile}.
|
||||
* <p>
|
||||
* Resources (e.g. jar files and directories) listed in {@code codebases} location
|
||||
* will be provided to the policy file via a system property of the short name:
|
||||
* e.g. <code>${codebase.joda-convert-1.2.jar}</code> would map to full URL.
|
||||
* Jar files listed in {@code codebases} location will be provided to the policy file via
|
||||
* a system property of the short name: e.g. <code>${codebase.joda-convert-1.2.jar}</code>
|
||||
* would map to full URL.
|
||||
*/
|
||||
@SuppressForbidden(reason = "accesses fully qualified URLs to configure security")
|
||||
static Policy readPolicy(URL policyFile, URL codebases[]) {
|
||||
static Policy readPolicy(URL policyFile, Set<URL> codebases) {
|
||||
try {
|
||||
try {
|
||||
// set codebase properties
|
||||
for (URL url : codebases) {
|
||||
String shortName = PathUtils.get(url.toURI()).getFileName().toString();
|
||||
System.setProperty("codebase." + shortName, url.toString());
|
||||
if (shortName.endsWith(".jar") == false) {
|
||||
continue; // tests :(
|
||||
}
|
||||
String previous = System.setProperty("codebase." + shortName, url.toString());
|
||||
if (previous != null) {
|
||||
throw new IllegalStateException("codebase property already set: " + shortName + "->" + previous);
|
||||
}
|
||||
}
|
||||
return Policy.getInstance("JavaPolicy", new URIParameter(policyFile.toURI()));
|
||||
} finally {
|
||||
// clear codebase properties
|
||||
for (URL url : codebases) {
|
||||
String shortName = PathUtils.get(url.toURI()).getFileName().toString();
|
||||
if (shortName.endsWith(".jar") == false) {
|
||||
continue; // tests :(
|
||||
}
|
||||
System.clearProperty("codebase." + shortName);
|
||||
}
|
||||
}
|
||||
|
@ -66,24 +66,24 @@ import java.util.Set;
|
||||
/**
|
||||
* Represents the current state of the cluster.
|
||||
* <p>
|
||||
* The cluster state object is immutable with an exception of the {@link RoutingNodes} structure, which is
|
||||
* The cluster state object is immutable with the exception of the {@link RoutingNodes} structure, which is
|
||||
* built on demand from the {@link RoutingTable}.
|
||||
* The cluster state can be updated only on the master node. All updates are performed by on a
|
||||
* single thread and controlled by the {@link ClusterService}. After every update the
|
||||
* {@link Discovery#publish} method publishes new version of the cluster state to all other nodes in the
|
||||
* {@link Discovery#publish} method publishes a new version of the cluster state to all other nodes in the
|
||||
* cluster. The actual publishing mechanism is delegated to the {@link Discovery#publish} method and depends on
|
||||
* the type of discovery. In the Zen Discovery it is handled in the {@link PublishClusterStateAction#publish} method. The
|
||||
* publishing mechanism can be overridden by other discovery.
|
||||
* <p>
|
||||
* The cluster state implements the {@link Diffable} interface in order to support publishing of cluster state
|
||||
* differences instead of the entire state on each change. The publishing mechanism should only send differences
|
||||
* to a node if this node was present in the previous version of the cluster state. If a node is not present was
|
||||
* not present in the previous version of the cluster state, such node is unlikely to have the previous cluster
|
||||
* state version and should be sent a complete version. In order to make sure that the differences are applied to
|
||||
* to a node if this node was present in the previous version of the cluster state. If a node was
|
||||
* not present in the previous version of the cluster state, this node is unlikely to have the previous cluster
|
||||
* state version and should be sent a complete version. In order to make sure that the differences are applied to the
|
||||
* correct version of the cluster state, each cluster state version update generates {@link #stateUUID} that uniquely
|
||||
* identifies this version of the state. This uuid is verified by the {@link ClusterStateDiff#apply} method to
|
||||
* makes sure that the correct diffs are applied. If uuids don’t match, the {@link ClusterStateDiff#apply} method
|
||||
* throws the {@link IncompatibleClusterStateVersionException}, which should cause the publishing mechanism to send
|
||||
* make sure that the correct diffs are applied. If uuids don’t match, the {@link ClusterStateDiff#apply} method
|
||||
* throws the {@link IncompatibleClusterStateVersionException}, which causes the publishing mechanism to send
|
||||
* a full version of the cluster state to the node on which this exception was thrown.
|
||||
*/
|
||||
public class ClusterState implements ToXContent, Diffable<ClusterState> {
|
||||
@ -252,8 +252,8 @@ public class ClusterState implements ToXContent, Diffable<ClusterState> {
|
||||
}
|
||||
|
||||
/**
|
||||
* a cluster state supersedes another state iff they are from the same master and the version this state is higher than the other
|
||||
* state.
|
||||
* a cluster state supersedes another state if they are from the same master and the version of this state is higher than that of the
|
||||
* other state.
|
||||
* <p>
|
||||
* In essence that means that all the changes from the other cluster state are also reflected by the current one
|
||||
*/
|
||||
|
@ -89,6 +89,18 @@ public class RestoreInProgress extends AbstractNamedDiffable<Custom> implements
|
||||
return entries.hashCode();
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
StringBuilder builder = new StringBuilder("RestoreInProgress[");
|
||||
for (int i = 0; i < entries.size(); i++) {
|
||||
builder.append(entries.get(i).snapshot().getSnapshotId().getName());
|
||||
if (i + 1 < entries.size()) {
|
||||
builder.append(",");
|
||||
}
|
||||
}
|
||||
return builder.append("]").toString();
|
||||
}
|
||||
|
||||
/**
|
||||
* Restore metadata
|
||||
*/
|
||||
|
@ -150,6 +150,18 @@ public class SnapshotDeletionsInProgress extends AbstractNamedDiffable<Custom> i
|
||||
return builder;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
StringBuilder builder = new StringBuilder("SnapshotDeletionsInProgress[");
|
||||
for (int i = 0; i < entries.size(); i++) {
|
||||
builder.append(entries.get(i).getSnapshot().getSnapshotId().getName());
|
||||
if (i + 1 < entries.size()) {
|
||||
builder.append(",");
|
||||
}
|
||||
}
|
||||
return builder.append("]").toString();
|
||||
}
|
||||
|
||||
/**
|
||||
* A class representing a snapshot deletion request entry in the cluster state.
|
||||
*/
|
||||
|
@ -70,6 +70,18 @@ public class SnapshotsInProgress extends AbstractNamedDiffable<Custom> implement
|
||||
return entries.hashCode();
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
StringBuilder builder = new StringBuilder("SnapshotsInProgress[");
|
||||
for (int i = 0; i < entries.size(); i++) {
|
||||
builder.append(entries.get(i).snapshot().getSnapshotId().getName());
|
||||
if (i + 1 < entries.size()) {
|
||||
builder.append(",");
|
||||
}
|
||||
}
|
||||
return builder.append("]").toString();
|
||||
}
|
||||
|
||||
public static class Entry {
|
||||
private final State state;
|
||||
private final Snapshot snapshot;
|
||||
|
@ -104,6 +104,13 @@ public abstract class SecureSetting<T> extends Setting<T> {
|
||||
|
||||
// TODO: override toXContent
|
||||
|
||||
/**
|
||||
* Overrides the diff operation to make this a no-op for secure settings as they shouldn't be returned in a diff
|
||||
*/
|
||||
@Override
|
||||
public void diff(Settings.Builder builder, Settings source, Settings defaultSettings) {
|
||||
}
|
||||
|
||||
/**
|
||||
* A setting which contains a sensitive string.
|
||||
*
|
||||
|
@ -106,8 +106,39 @@ public final class SecureString implements CharSequence, Closeable {
|
||||
*/
|
||||
@Override
|
||||
public synchronized void close() {
|
||||
Arrays.fill(chars, '\0');
|
||||
chars = null;
|
||||
if (chars != null) {
|
||||
Arrays.fill(chars, '\0');
|
||||
chars = null;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns a new copy of this object that is backed by its own char array. Closing the new instance has no effect on the instance it
|
||||
* was created from. This is useful for APIs which accept a char array and you want to be safe about the API potentially modifying the
|
||||
* char array. For example:
|
||||
*
|
||||
* <pre>
|
||||
* try (SecureString copy = secureString.clone()) {
|
||||
* // pass thee char[] to a external API
|
||||
* PasswordAuthentication auth = new PasswordAuthentication(username, copy.getChars());
|
||||
* ...
|
||||
* }
|
||||
* </pre>
|
||||
*/
|
||||
@Override
|
||||
public synchronized SecureString clone() {
|
||||
ensureNotClosed();
|
||||
return new SecureString(Arrays.copyOf(chars, chars.length));
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the underlying char[]. This is a dangerous operation as the array may be modified while it is being used by other threads
|
||||
* or a consumer may modify the values in the array. For safety, it is preferable to use {@link #clone()} and pass its chars to the
|
||||
* consumer when the chars are needed multiple times.
|
||||
*/
|
||||
public synchronized char[] getChars() {
|
||||
ensureNotClosed();
|
||||
return chars;
|
||||
}
|
||||
|
||||
/** Throw an exception if this string has been closed, indicating something is trying to access the data after being closed. */
|
||||
|
@ -65,6 +65,7 @@ import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.Locale;
|
||||
@ -214,6 +215,9 @@ public class UnicastZenPing extends AbstractComponent implements ZenPing {
|
||||
final List<Future<TransportAddress[]>> futures =
|
||||
executorService.invokeAll(callables, resolveTimeout.nanos(), TimeUnit.NANOSECONDS);
|
||||
final List<DiscoveryNode> discoveryNodes = new ArrayList<>();
|
||||
final Set<TransportAddress> localAddresses = new HashSet<>();
|
||||
localAddresses.add(transportService.boundAddress().publishAddress());
|
||||
localAddresses.addAll(Arrays.asList(transportService.boundAddress().boundAddresses()));
|
||||
// ExecutorService#invokeAll guarantees that the futures are returned in the iteration order of the tasks so we can associate the
|
||||
// hostname with the corresponding task by iterating together
|
||||
final Iterator<String> it = hosts.iterator();
|
||||
@ -225,13 +229,17 @@ public class UnicastZenPing extends AbstractComponent implements ZenPing {
|
||||
final TransportAddress[] addresses = future.get();
|
||||
logger.trace("resolved host [{}] to {}", hostname, addresses);
|
||||
for (int addressId = 0; addressId < addresses.length; addressId++) {
|
||||
discoveryNodes.add(
|
||||
new DiscoveryNode(
|
||||
nodeId_prefix + hostname + "_" + addressId + "#",
|
||||
addresses[addressId],
|
||||
emptyMap(),
|
||||
emptySet(),
|
||||
Version.CURRENT.minimumCompatibilityVersion()));
|
||||
final TransportAddress address = addresses[addressId];
|
||||
// no point in pinging ourselves
|
||||
if (localAddresses.contains(address) == false) {
|
||||
discoveryNodes.add(
|
||||
new DiscoveryNode(
|
||||
nodeId_prefix + hostname + "_" + addressId + "#",
|
||||
address,
|
||||
emptyMap(),
|
||||
emptySet(),
|
||||
Version.CURRENT.minimumCompatibilityVersion()));
|
||||
}
|
||||
}
|
||||
} catch (final ExecutionException e) {
|
||||
assert e.getCause() != null;
|
||||
|
@ -257,11 +257,6 @@ public class DocumentMapper implements ToXContent {
|
||||
return this.objectMappers;
|
||||
}
|
||||
|
||||
// TODO this method looks like it is only used in tests...
|
||||
public ParsedDocument parse(String index, String type, String id, BytesReference source) throws MapperParsingException {
|
||||
return parse(SourceToParse.source(index, type, id, source, XContentType.JSON));
|
||||
}
|
||||
|
||||
public ParsedDocument parse(SourceToParse source) throws MapperParsingException {
|
||||
return documentParser.parseDocument(source);
|
||||
}
|
||||
|
@ -110,7 +110,7 @@ public class MapperService extends AbstractIndexComponent implements Closeable {
|
||||
private volatile Map<String, DocumentMapper> mappers = emptyMap();
|
||||
|
||||
private volatile FieldTypeLookup fieldTypes;
|
||||
private volatile Map<String, ObjectMapper> fullPathObjectMappers = new HashMap<>();
|
||||
private volatile Map<String, ObjectMapper> fullPathObjectMappers = emptyMap();
|
||||
private boolean hasNested = false; // updated dynamically to true when a nested object is added
|
||||
private boolean allEnabled = false; // updated dynamically to true when _all is enabled
|
||||
|
||||
@ -394,6 +394,7 @@ public class MapperService extends AbstractIndexComponent implements Closeable {
|
||||
|
||||
for (ObjectMapper objectMapper : objectMappers) {
|
||||
if (fullPathObjectMappers == this.fullPathObjectMappers) {
|
||||
// first time through the loops
|
||||
fullPathObjectMappers = new HashMap<>(this.fullPathObjectMappers);
|
||||
}
|
||||
fullPathObjectMappers.put(objectMapper.fullPath(), objectMapper);
|
||||
@ -414,6 +415,7 @@ public class MapperService extends AbstractIndexComponent implements Closeable {
|
||||
|
||||
if (oldMapper == null && newMapper.parentFieldMapper().active()) {
|
||||
if (parentTypes == this.parentTypes) {
|
||||
// first time through the loop
|
||||
parentTypes = new HashSet<>(this.parentTypes);
|
||||
}
|
||||
parentTypes.add(mapper.parentFieldMapper().type());
|
||||
@ -456,8 +458,15 @@ public class MapperService extends AbstractIndexComponent implements Closeable {
|
||||
// make structures immutable
|
||||
mappers = Collections.unmodifiableMap(mappers);
|
||||
results = Collections.unmodifiableMap(results);
|
||||
parentTypes = Collections.unmodifiableSet(parentTypes);
|
||||
fullPathObjectMappers = Collections.unmodifiableMap(fullPathObjectMappers);
|
||||
|
||||
// only need to immutably rewrap these if the previous reference was changed.
|
||||
// if not then they are already implicitly immutable.
|
||||
if (fullPathObjectMappers != this.fullPathObjectMappers) {
|
||||
fullPathObjectMappers = Collections.unmodifiableMap(fullPathObjectMappers);
|
||||
}
|
||||
if (parentTypes != this.parentTypes) {
|
||||
parentTypes = Collections.unmodifiableSet(parentTypes);
|
||||
}
|
||||
|
||||
// commit the change
|
||||
if (defaultMappingSource != null) {
|
||||
|
@ -265,11 +265,16 @@ public class ScaledFloatFieldMapper extends FieldMapper {
|
||||
if (stats == null) {
|
||||
return null;
|
||||
}
|
||||
return new FieldStats.Double(stats.getMaxDoc(), stats.getDocCount(),
|
||||
if (stats.hasMinMax()) {
|
||||
return new FieldStats.Double(stats.getMaxDoc(), stats.getDocCount(),
|
||||
stats.getSumDocFreq(), stats.getSumTotalTermFreq(),
|
||||
stats.isSearchable(), stats.isAggregatable(),
|
||||
stats.getMinValue() == null ? null : stats.getMinValue() / scalingFactor,
|
||||
stats.getMaxValue() == null ? null : stats.getMaxValue() / scalingFactor);
|
||||
stats.getMinValue() / scalingFactor,
|
||||
stats.getMaxValue() / scalingFactor);
|
||||
}
|
||||
return new FieldStats.Double(stats.getMaxDoc(), stats.getDocCount(),
|
||||
stats.getSumDocFreq(), stats.getSumTotalTermFreq(),
|
||||
stats.isSearchable(), stats.isAggregatable());
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -263,7 +263,6 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Renames all the given files from the key of the map to the
|
||||
* value of the map. All successfully renamed files are removed from the map in-place.
|
||||
@ -389,7 +388,6 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Reads a MetadataSnapshot from the given index locations or returns an empty snapshot if it can't be read.
|
||||
*
|
||||
@ -597,7 +595,7 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref
|
||||
/**
|
||||
* This method deletes every file in this store that is not contained in the given source meta data or is a
|
||||
* legacy checksum file. After the delete it pulls the latest metadata snapshot from the store and compares it
|
||||
* to the given snapshot. If the snapshots are inconsistent an illegal state exception is thrown
|
||||
* to the given snapshot. If the snapshots are inconsistent an illegal state exception is thrown.
|
||||
*
|
||||
* @param reason the reason for this cleanup operation logged for each deleted file
|
||||
* @param sourceMetaData the metadata used for cleanup. all files in this metadata should be kept around.
|
||||
@ -641,9 +639,9 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref
|
||||
for (StoreFileMetaData meta : recoveryDiff.different) {
|
||||
StoreFileMetaData local = targetMetaData.get(meta.name());
|
||||
StoreFileMetaData remote = sourceMetaData.get(meta.name());
|
||||
// if we have different files the they must have no checksums otherwise something went wrong during recovery.
|
||||
// we have that problem when we have an empty index is only a segments_1 file then we can't tell if it's a Lucene 4.8 file
|
||||
// and therefore no checksum. That isn't much of a problem since we simply copy it over anyway but those files come out as
|
||||
// if we have different files then they must have no checksums; otherwise something went wrong during recovery.
|
||||
// we have that problem when we have an empty index is only a segments_1 file so we can't tell if it's a Lucene 4.8 file
|
||||
// and therefore no checksum is included. That isn't a problem since we simply copy it over anyway but those files come out as
|
||||
// different in the diff. That's why we have to double check here again if the rest of it matches.
|
||||
|
||||
// all is fine this file is just part of a commit or a segment that is different
|
||||
@ -676,7 +674,6 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref
|
||||
this.deletesLogger = deletesLogger;
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public void close() throws IOException {
|
||||
assert false : "Nobody should close this directory except of the Store itself";
|
||||
|
@ -138,8 +138,8 @@ public class FsInfo implements Iterable<FsInfo.Path>, Writeable, ToXContent {
|
||||
|
||||
public void add(Path path) {
|
||||
total = FsProbe.adjustForHugeFilesystems(addLong(total, path.total));
|
||||
free = addLong(free, path.free);
|
||||
available = addLong(available, path.available);
|
||||
free = FsProbe.adjustForHugeFilesystems(addLong(free, path.free));
|
||||
available = FsProbe.adjustForHugeFilesystems(addLong(available, path.available));
|
||||
if (path.spins != null && path.spins.booleanValue()) {
|
||||
// Spinning is contagious!
|
||||
spins = Boolean.TRUE;
|
||||
|
@ -105,6 +105,13 @@ class InstallPluginCommand extends EnvironmentAwareCommand {
|
||||
|
||||
private static final String PROPERTY_STAGING_ID = "es.plugins.staging";
|
||||
|
||||
// exit codes for install
|
||||
/** A plugin with the same name is already installed. */
|
||||
static final int PLUGIN_EXISTS = 1;
|
||||
/** The plugin zip is not properly structured. */
|
||||
static final int PLUGIN_MALFORMED = 2;
|
||||
|
||||
|
||||
/** The builtin modules, which are plugins, but cannot be installed or removed. */
|
||||
static final Set<String> MODULES;
|
||||
static {
|
||||
@ -333,7 +340,8 @@ class InstallPluginCommand extends EnvironmentAwareCommand {
|
||||
byte[] zipbytes = Files.readAllBytes(zip);
|
||||
String gotChecksum = MessageDigests.toHexString(MessageDigests.sha1().digest(zipbytes));
|
||||
if (expectedChecksum.equals(gotChecksum) == false) {
|
||||
throw new UserException(ExitCodes.IO_ERROR, "SHA1 mismatch, expected " + expectedChecksum + " but got " + gotChecksum);
|
||||
throw new UserException(ExitCodes.IO_ERROR,
|
||||
"SHA1 mismatch, expected " + expectedChecksum + " but got " + gotChecksum);
|
||||
}
|
||||
|
||||
return zip;
|
||||
@ -357,12 +365,14 @@ class InstallPluginCommand extends EnvironmentAwareCommand {
|
||||
hasEsDir = true;
|
||||
Path targetFile = target.resolve(entry.getName().substring("elasticsearch/".length()));
|
||||
|
||||
// Using the entry name as a path can result in an entry outside of the plugin dir, either if the
|
||||
// name starts with the root of the filesystem, or it is a relative entry like ../whatever.
|
||||
// This check attempts to identify both cases by first normalizing the path (which removes foo/..)
|
||||
// and ensuring the normalized entry is still rooted with the target plugin directory.
|
||||
// Using the entry name as a path can result in an entry outside of the plugin dir,
|
||||
// either if the name starts with the root of the filesystem, or it is a relative
|
||||
// entry like ../whatever. This check attempts to identify both cases by first
|
||||
// normalizing the path (which removes foo/..) and ensuring the normalized entry
|
||||
// is still rooted with the target plugin directory.
|
||||
if (targetFile.normalize().startsWith(target) == false) {
|
||||
throw new IOException("Zip contains entry name '" + entry.getName() + "' resolving outside of plugin directory");
|
||||
throw new UserException(PLUGIN_MALFORMED, "Zip contains entry name '" +
|
||||
entry.getName() + "' resolving outside of plugin directory");
|
||||
}
|
||||
|
||||
// be on the safe side: do not rely on that directories are always extracted
|
||||
@ -384,7 +394,8 @@ class InstallPluginCommand extends EnvironmentAwareCommand {
|
||||
Files.delete(zip);
|
||||
if (hasEsDir == false) {
|
||||
IOUtils.rm(target);
|
||||
throw new UserException(ExitCodes.DATA_ERROR, "`elasticsearch` directory is missing in the plugin zip");
|
||||
throw new UserException(PLUGIN_MALFORMED,
|
||||
"`elasticsearch` directory is missing in the plugin zip");
|
||||
}
|
||||
return target;
|
||||
}
|
||||
@ -424,10 +435,11 @@ class InstallPluginCommand extends EnvironmentAwareCommand {
|
||||
if (Files.exists(destination)) {
|
||||
final String message = String.format(
|
||||
Locale.ROOT,
|
||||
"plugin directory [%s] already exists; if you need to update the plugin, uninstall it first using command 'remove %s'",
|
||||
"plugin directory [%s] already exists; if you need to update the plugin, " +
|
||||
"uninstall it first using command 'remove %s'",
|
||||
destination.toAbsolutePath(),
|
||||
info.getName());
|
||||
throw new UserException(ExitCodes.CONFIG, message);
|
||||
throw new UserException(PLUGIN_EXISTS, message);
|
||||
}
|
||||
|
||||
terminal.println(VERBOSE, info.toString());
|
||||
@ -435,8 +447,8 @@ class InstallPluginCommand extends EnvironmentAwareCommand {
|
||||
// don't let user install plugin as a module...
|
||||
// they might be unavoidably in maven central and are packaged up the same way)
|
||||
if (MODULES.contains(info.getName())) {
|
||||
throw new UserException(
|
||||
ExitCodes.USAGE, "plugin '" + info.getName() + "' cannot be installed like this, it is a system module");
|
||||
throw new UserException(ExitCodes.USAGE, "plugin '" + info.getName() +
|
||||
"' cannot be installed like this, it is a system module");
|
||||
}
|
||||
|
||||
// check for jar hell before any copying
|
||||
@ -455,8 +467,7 @@ class InstallPluginCommand extends EnvironmentAwareCommand {
|
||||
/** check a candidate plugin for jar hell before installing it */
|
||||
void jarHellCheck(Path candidate, Path pluginsDir) throws Exception {
|
||||
// create list of current jars in classpath
|
||||
final List<URL> jars = new ArrayList<>();
|
||||
jars.addAll(Arrays.asList(JarHell.parseClassPath()));
|
||||
final Set<URL> jars = new HashSet<>(JarHell.parseClassPath());
|
||||
|
||||
// read existing bundles. this does some checks on the installation too.
|
||||
PluginsService.getPluginBundles(pluginsDir);
|
||||
@ -464,13 +475,15 @@ class InstallPluginCommand extends EnvironmentAwareCommand {
|
||||
// add plugin jars to the list
|
||||
Path pluginJars[] = FileSystemUtils.files(candidate, "*.jar");
|
||||
for (Path jar : pluginJars) {
|
||||
jars.add(jar.toUri().toURL());
|
||||
if (jars.add(jar.toUri().toURL()) == false) {
|
||||
throw new IllegalStateException("jar hell! duplicate plugin jar: " + jar);
|
||||
}
|
||||
}
|
||||
// TODO: no jars should be an error
|
||||
// TODO: verify the classname exists in one of the jars!
|
||||
|
||||
// check combined (current classpath + new jars to-be-added)
|
||||
JarHell.checkJarHell(jars.toArray(new URL[jars.size()]));
|
||||
JarHell.checkJarHell(jars);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -533,7 +546,7 @@ class InstallPluginCommand extends EnvironmentAwareCommand {
|
||||
/** Copies the files from {@code tmpBinDir} into {@code destBinDir}, along with permissions from dest dirs parent. */
|
||||
private void installBin(PluginInfo info, Path tmpBinDir, Path destBinDir) throws Exception {
|
||||
if (Files.isDirectory(tmpBinDir) == false) {
|
||||
throw new UserException(ExitCodes.IO_ERROR, "bin in plugin " + info.getName() + " is not a directory");
|
||||
throw new UserException(PLUGIN_MALFORMED, "bin in plugin " + info.getName() + " is not a directory");
|
||||
}
|
||||
Files.createDirectory(destBinDir);
|
||||
setFileAttributes(destBinDir, BIN_DIR_PERMS);
|
||||
@ -541,9 +554,8 @@ class InstallPluginCommand extends EnvironmentAwareCommand {
|
||||
try (DirectoryStream<Path> stream = Files.newDirectoryStream(tmpBinDir)) {
|
||||
for (Path srcFile : stream) {
|
||||
if (Files.isDirectory(srcFile)) {
|
||||
throw new UserException(
|
||||
ExitCodes.DATA_ERROR,
|
||||
"Directories not allowed in bin dir for plugin " + info.getName() + ", found " + srcFile.getFileName());
|
||||
throw new UserException(PLUGIN_MALFORMED, "Directories not allowed in bin dir " +
|
||||
"for plugin " + info.getName() + ", found " + srcFile.getFileName());
|
||||
}
|
||||
|
||||
Path destFile = destBinDir.resolve(tmpBinDir.relativize(srcFile));
|
||||
@ -560,7 +572,8 @@ class InstallPluginCommand extends EnvironmentAwareCommand {
|
||||
*/
|
||||
private void installConfig(PluginInfo info, Path tmpConfigDir, Path destConfigDir) throws Exception {
|
||||
if (Files.isDirectory(tmpConfigDir) == false) {
|
||||
throw new UserException(ExitCodes.IO_ERROR, "config in plugin " + info.getName() + " is not a directory");
|
||||
throw new UserException(PLUGIN_MALFORMED,
|
||||
"config in plugin " + info.getName() + " is not a directory");
|
||||
}
|
||||
|
||||
Files.createDirectories(destConfigDir);
|
||||
@ -576,7 +589,8 @@ class InstallPluginCommand extends EnvironmentAwareCommand {
|
||||
try (DirectoryStream<Path> stream = Files.newDirectoryStream(tmpConfigDir)) {
|
||||
for (Path srcFile : stream) {
|
||||
if (Files.isDirectory(srcFile)) {
|
||||
throw new UserException(ExitCodes.DATA_ERROR, "Directories not allowed in config dir for plugin " + info.getName());
|
||||
throw new UserException(PLUGIN_MALFORMED,
|
||||
"Directories not allowed in config dir for plugin " + info.getName());
|
||||
}
|
||||
|
||||
Path destFile = destConfigDir.resolve(tmpConfigDir.relativize(srcFile));
|
||||
|
@ -56,9 +56,17 @@ class ListPluginsCommand extends EnvironmentAwareCommand {
|
||||
}
|
||||
Collections.sort(plugins);
|
||||
for (final Path plugin : plugins) {
|
||||
terminal.println(plugin.getFileName().toString());
|
||||
PluginInfo info = PluginInfo.readFromProperties(env.pluginsFile().resolve(plugin.toAbsolutePath()));
|
||||
terminal.println(Terminal.Verbosity.VERBOSE, info.toString());
|
||||
terminal.println(Terminal.Verbosity.SILENT, plugin.getFileName().toString());
|
||||
try {
|
||||
PluginInfo info = PluginInfo.readFromProperties(env.pluginsFile().resolve(plugin.toAbsolutePath()));
|
||||
terminal.println(Terminal.Verbosity.VERBOSE, info.toString());
|
||||
} catch (IllegalArgumentException e) {
|
||||
if (e.getMessage().contains("incompatible with Elasticsearch")) {
|
||||
terminal.println("WARNING: " + e.getMessage());
|
||||
} else {
|
||||
throw e;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -58,8 +58,10 @@ import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.LinkedHashSet;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
import java.util.Set;
|
||||
import java.util.function.Function;
|
||||
import java.util.stream.Collectors;
|
||||
@ -107,16 +109,16 @@ public class PluginsService extends AbstractComponent {
|
||||
pluginsList.add(pluginInfo);
|
||||
}
|
||||
|
||||
Set<Bundle> seenBundles = new LinkedHashSet<>();
|
||||
List<PluginInfo> modulesList = new ArrayList<>();
|
||||
// load modules
|
||||
if (modulesDirectory != null) {
|
||||
try {
|
||||
List<Bundle> bundles = getModuleBundles(modulesDirectory);
|
||||
List<Tuple<PluginInfo, Plugin>> loaded = loadBundles(bundles);
|
||||
pluginsLoaded.addAll(loaded);
|
||||
for (Tuple<PluginInfo, Plugin> module : loaded) {
|
||||
modulesList.add(module.v1());
|
||||
Set<Bundle> modules = getModuleBundles(modulesDirectory);
|
||||
for (Bundle bundle : modules) {
|
||||
modulesList.add(bundle.plugin);
|
||||
}
|
||||
seenBundles.addAll(modules);
|
||||
} catch (IOException ex) {
|
||||
throw new IllegalStateException("Unable to initialize modules", ex);
|
||||
}
|
||||
@ -125,17 +127,19 @@ public class PluginsService extends AbstractComponent {
|
||||
// now, find all the ones that are in plugins/
|
||||
if (pluginsDirectory != null) {
|
||||
try {
|
||||
List<Bundle> bundles = getPluginBundles(pluginsDirectory);
|
||||
List<Tuple<PluginInfo, Plugin>> loaded = loadBundles(bundles);
|
||||
pluginsLoaded.addAll(loaded);
|
||||
for (Tuple<PluginInfo, Plugin> plugin : loaded) {
|
||||
pluginsList.add(plugin.v1());
|
||||
Set<Bundle> plugins = getPluginBundles(pluginsDirectory);
|
||||
for (Bundle bundle : plugins) {
|
||||
pluginsList.add(bundle.plugin);
|
||||
}
|
||||
seenBundles.addAll(plugins);
|
||||
} catch (IOException ex) {
|
||||
throw new IllegalStateException("Unable to initialize plugins", ex);
|
||||
}
|
||||
}
|
||||
|
||||
List<Tuple<PluginInfo, Plugin>> loaded = loadBundles(seenBundles);
|
||||
pluginsLoaded.addAll(loaded);
|
||||
|
||||
this.info = new PluginsAndModules(pluginsList, modulesList);
|
||||
this.plugins = Collections.unmodifiableList(pluginsLoaded);
|
||||
|
||||
@ -234,48 +238,70 @@ public class PluginsService extends AbstractComponent {
|
||||
// a "bundle" is a group of plugins in a single classloader
|
||||
// really should be 1-1, but we are not so fortunate
|
||||
static class Bundle {
|
||||
List<PluginInfo> plugins = new ArrayList<>();
|
||||
List<URL> urls = new ArrayList<>();
|
||||
final PluginInfo plugin;
|
||||
final Set<URL> urls;
|
||||
|
||||
Bundle(PluginInfo plugin, Set<URL> urls) {
|
||||
this.plugin = Objects.requireNonNull(plugin);
|
||||
this.urls = Objects.requireNonNull(urls);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
if (this == o) return true;
|
||||
if (o == null || getClass() != o.getClass()) return false;
|
||||
Bundle bundle = (Bundle) o;
|
||||
return Objects.equals(plugin, bundle.plugin);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(plugin);
|
||||
}
|
||||
}
|
||||
|
||||
// similar in impl to getPluginBundles, but DO NOT try to make them share code.
|
||||
// we don't need to inherit all the leniency, and things are different enough.
|
||||
static List<Bundle> getModuleBundles(Path modulesDirectory) throws IOException {
|
||||
static Set<Bundle> getModuleBundles(Path modulesDirectory) throws IOException {
|
||||
// damn leniency
|
||||
if (Files.notExists(modulesDirectory)) {
|
||||
return Collections.emptyList();
|
||||
return Collections.emptySet();
|
||||
}
|
||||
List<Bundle> bundles = new ArrayList<>();
|
||||
Set<Bundle> bundles = new LinkedHashSet<>();
|
||||
try (DirectoryStream<Path> stream = Files.newDirectoryStream(modulesDirectory)) {
|
||||
for (Path module : stream) {
|
||||
if (FileSystemUtils.isHidden(module)) {
|
||||
continue; // skip over .DS_Store etc
|
||||
}
|
||||
PluginInfo info = PluginInfo.readFromProperties(module);
|
||||
Bundle bundle = new Bundle();
|
||||
bundle.plugins.add(info);
|
||||
Set<URL> urls = new LinkedHashSet<>();
|
||||
// gather urls for jar files
|
||||
try (DirectoryStream<Path> jarStream = Files.newDirectoryStream(module, "*.jar")) {
|
||||
for (Path jar : jarStream) {
|
||||
// normalize with toRealPath to get symlinks out of our hair
|
||||
bundle.urls.add(jar.toRealPath().toUri().toURL());
|
||||
URL url = jar.toRealPath().toUri().toURL();
|
||||
if (urls.add(url) == false) {
|
||||
throw new IllegalStateException("duplicate codebase: " + url);
|
||||
}
|
||||
}
|
||||
}
|
||||
bundles.add(bundle);
|
||||
if (bundles.add(new Bundle(info, urls)) == false) {
|
||||
throw new IllegalStateException("duplicate module: " + info);
|
||||
}
|
||||
}
|
||||
}
|
||||
return bundles;
|
||||
}
|
||||
|
||||
static List<Bundle> getPluginBundles(Path pluginsDirectory) throws IOException {
|
||||
static Set<Bundle> getPluginBundles(Path pluginsDirectory) throws IOException {
|
||||
Logger logger = Loggers.getLogger(PluginsService.class);
|
||||
|
||||
// TODO: remove this leniency, but tests bogusly rely on it
|
||||
if (!isAccessibleDirectory(pluginsDirectory, logger)) {
|
||||
return Collections.emptyList();
|
||||
return Collections.emptySet();
|
||||
}
|
||||
|
||||
List<Bundle> bundles = new ArrayList<>();
|
||||
Set<Bundle> bundles = new LinkedHashSet<>();
|
||||
|
||||
try (DirectoryStream<Path> stream = Files.newDirectoryStream(pluginsDirectory)) {
|
||||
for (Path plugin : stream) {
|
||||
@ -292,47 +318,58 @@ public class PluginsService extends AbstractComponent {
|
||||
+ plugin.getFileName() + "]. Was the plugin built before 2.0?", e);
|
||||
}
|
||||
|
||||
List<URL> urls = new ArrayList<>();
|
||||
Set<URL> urls = new LinkedHashSet<>();
|
||||
try (DirectoryStream<Path> jarStream = Files.newDirectoryStream(plugin, "*.jar")) {
|
||||
for (Path jar : jarStream) {
|
||||
// normalize with toRealPath to get symlinks out of our hair
|
||||
urls.add(jar.toRealPath().toUri().toURL());
|
||||
URL url = jar.toRealPath().toUri().toURL();
|
||||
if (urls.add(url) == false) {
|
||||
throw new IllegalStateException("duplicate codebase: " + url);
|
||||
}
|
||||
}
|
||||
}
|
||||
final Bundle bundle = new Bundle();
|
||||
bundles.add(bundle);
|
||||
bundle.plugins.add(info);
|
||||
bundle.urls.addAll(urls);
|
||||
if (bundles.add(new Bundle(info, urls)) == false) {
|
||||
throw new IllegalStateException("duplicate plugin: " + info);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return bundles;
|
||||
}
|
||||
|
||||
private List<Tuple<PluginInfo,Plugin>> loadBundles(List<Bundle> bundles) {
|
||||
private List<Tuple<PluginInfo,Plugin>> loadBundles(Set<Bundle> bundles) {
|
||||
List<Tuple<PluginInfo, Plugin>> plugins = new ArrayList<>();
|
||||
|
||||
for (Bundle bundle : bundles) {
|
||||
// jar-hell check the bundle against the parent classloader
|
||||
// pluginmanager does it, but we do it again, in case lusers mess with jar files manually
|
||||
try {
|
||||
final List<URL> jars = new ArrayList<>();
|
||||
jars.addAll(Arrays.asList(JarHell.parseClassPath()));
|
||||
jars.addAll(bundle.urls);
|
||||
JarHell.checkJarHell(jars.toArray(new URL[0]));
|
||||
Set<URL> classpath = JarHell.parseClassPath();
|
||||
// check we don't have conflicting codebases
|
||||
Set<URL> intersection = new HashSet<>(classpath);
|
||||
intersection.retainAll(bundle.urls);
|
||||
if (intersection.isEmpty() == false) {
|
||||
throw new IllegalStateException("jar hell! duplicate codebases between" +
|
||||
" plugin and core: " + intersection);
|
||||
}
|
||||
// check we don't have conflicting classes
|
||||
Set<URL> union = new HashSet<>(classpath);
|
||||
union.addAll(bundle.urls);
|
||||
JarHell.checkJarHell(union);
|
||||
} catch (Exception e) {
|
||||
throw new IllegalStateException("failed to load bundle " + bundle.urls + " due to jar hell", e);
|
||||
throw new IllegalStateException("failed to load plugin " + bundle.plugin +
|
||||
" due to jar hell", e);
|
||||
}
|
||||
|
||||
// create a child to load the plugins in this bundle
|
||||
ClassLoader loader = URLClassLoader.newInstance(bundle.urls.toArray(new URL[0]), getClass().getClassLoader());
|
||||
for (PluginInfo pluginInfo : bundle.plugins) {
|
||||
// reload lucene SPI with any new services from the plugin
|
||||
reloadLuceneSPI(loader);
|
||||
final Class<? extends Plugin> pluginClass = loadPluginClass(pluginInfo.getClassname(), loader);
|
||||
final Plugin plugin = loadPlugin(pluginClass, settings);
|
||||
plugins.add(new Tuple<>(pluginInfo, plugin));
|
||||
}
|
||||
// create a child to load the plugin in this bundle
|
||||
ClassLoader loader = URLClassLoader.newInstance(bundle.urls.toArray(new URL[0]),
|
||||
getClass().getClassLoader());
|
||||
// reload lucene SPI with any new services from the plugin
|
||||
reloadLuceneSPI(loader);
|
||||
final Class<? extends Plugin> pluginClass =
|
||||
loadPluginClass(bundle.plugin.getClassname(), loader);
|
||||
final Plugin plugin = loadPlugin(pluginClass, settings);
|
||||
plugins.add(new Tuple<>(bundle.plugin, plugin));
|
||||
}
|
||||
|
||||
return Collections.unmodifiableList(plugins);
|
||||
|
@ -19,12 +19,14 @@
|
||||
|
||||
package org.elasticsearch.plugins;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.nio.file.AtomicMoveNotSupportedException;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Path;
|
||||
import java.nio.file.StandardCopyOption;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.Locale;
|
||||
|
||||
import joptsimple.OptionSet;
|
||||
import joptsimple.OptionSpec;
|
||||
@ -39,36 +41,51 @@ import org.elasticsearch.env.Environment;
|
||||
import static org.elasticsearch.cli.Terminal.Verbosity.VERBOSE;
|
||||
|
||||
/**
|
||||
* A command for the plugin cli to remove a plugin from elasticsearch.
|
||||
* A command for the plugin CLI to remove a plugin from Elasticsearch.
|
||||
*/
|
||||
class RemovePluginCommand extends EnvironmentAwareCommand {
|
||||
|
||||
private final OptionSpec<String> arguments;
|
||||
|
||||
RemovePluginCommand() {
|
||||
super("Removes a plugin from elasticsearch");
|
||||
super("removes a plugin from Elasticsearch");
|
||||
this.arguments = parser.nonOptions("plugin name");
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void execute(Terminal terminal, OptionSet options, Environment env) throws Exception {
|
||||
String arg = arguments.value(options);
|
||||
execute(terminal, arg, env);
|
||||
protected void execute(final Terminal terminal, final OptionSet options, final Environment env)
|
||||
throws Exception {
|
||||
final String pluginName = arguments.value(options);
|
||||
execute(terminal, pluginName, env);
|
||||
}
|
||||
|
||||
// pkg private for testing
|
||||
void execute(Terminal terminal, String pluginName, Environment env) throws Exception {
|
||||
/**
|
||||
* Remove the plugin specified by {@code pluginName}.
|
||||
*
|
||||
* @param terminal the terminal to use for input/output
|
||||
* @param pluginName the name of the plugin to remove
|
||||
* @param env the environment for the local node
|
||||
* @throws IOException if any I/O exception occurs while performing a file operation
|
||||
* @throws UserException if plugin name is null
|
||||
* @throws UserException if plugin directory does not exist
|
||||
* @throws UserException if the plugin bin directory is not a directory
|
||||
*/
|
||||
void execute(final Terminal terminal, final String pluginName, final Environment env)
|
||||
throws IOException, UserException {
|
||||
if (pluginName == null) {
|
||||
throw new UserException(ExitCodes.USAGE, "plugin name is required");
|
||||
}
|
||||
|
||||
terminal.println("-> Removing " + Strings.coalesceToEmpty(pluginName) + "...");
|
||||
terminal.println("-> removing [" + Strings.coalesceToEmpty(pluginName) + "]...");
|
||||
|
||||
final Path pluginDir = env.pluginsFile().resolve(pluginName);
|
||||
if (Files.exists(pluginDir) == false) {
|
||||
throw new UserException(
|
||||
ExitCodes.CONFIG,
|
||||
"plugin " + pluginName + " not found; run 'elasticsearch-plugin list' to get list of installed plugins");
|
||||
final String message = String.format(
|
||||
Locale.ROOT,
|
||||
"plugin [%s] not found; "
|
||||
+ "run 'elasticsearch-plugin list' to get list of installed plugins",
|
||||
pluginName);
|
||||
throw new UserException(ExitCodes.CONFIG, message);
|
||||
}
|
||||
|
||||
final List<Path> pluginPaths = new ArrayList<>();
|
||||
@ -76,30 +93,41 @@ class RemovePluginCommand extends EnvironmentAwareCommand {
|
||||
final Path pluginBinDir = env.binFile().resolve(pluginName);
|
||||
if (Files.exists(pluginBinDir)) {
|
||||
if (Files.isDirectory(pluginBinDir) == false) {
|
||||
throw new UserException(ExitCodes.IO_ERROR, "Bin dir for " + pluginName + " is not a directory");
|
||||
throw new UserException(
|
||||
ExitCodes.IO_ERROR, "bin dir for " + pluginName + " is not a directory");
|
||||
}
|
||||
pluginPaths.add(pluginBinDir);
|
||||
terminal.println(VERBOSE, "Removing: " + pluginBinDir);
|
||||
terminal.println(VERBOSE, "removing [" + pluginBinDir + "]");
|
||||
}
|
||||
|
||||
terminal.println(VERBOSE, "Removing: " + pluginDir);
|
||||
terminal.println(VERBOSE, "removing [" + pluginDir + "]");
|
||||
final Path tmpPluginDir = env.pluginsFile().resolve(".removing-" + pluginName);
|
||||
try {
|
||||
Files.move(pluginDir, tmpPluginDir, StandardCopyOption.ATOMIC_MOVE);
|
||||
} catch (final AtomicMoveNotSupportedException e) {
|
||||
// this can happen on a union filesystem when a plugin is not installed on the top layer; we fall back to a non-atomic move
|
||||
/*
|
||||
* On a union file system if the plugin that we are removing is not installed on the
|
||||
* top layer then atomic move will not be supported. In this case, we fall back to a
|
||||
* non-atomic move.
|
||||
*/
|
||||
Files.move(pluginDir, tmpPluginDir);
|
||||
}
|
||||
pluginPaths.add(tmpPluginDir);
|
||||
|
||||
IOUtils.rm(pluginPaths.toArray(new Path[pluginPaths.size()]));
|
||||
|
||||
// we preserve the config files in case the user is upgrading the plugin, but we print
|
||||
// a message so the user knows in case they want to remove manually
|
||||
/*
|
||||
* We preserve the config files in case the user is upgrading the plugin, but we print a
|
||||
* message so the user knows in case they want to remove manually.
|
||||
*/
|
||||
final Path pluginConfigDir = env.configFile().resolve(pluginName);
|
||||
if (Files.exists(pluginConfigDir)) {
|
||||
terminal.println(
|
||||
"-> Preserving plugin config files [" + pluginConfigDir + "] in case of upgrade, delete manually if not needed");
|
||||
final String message = String.format(
|
||||
Locale.ROOT,
|
||||
"-> preserving plugin config files [%s] in case of upgrade; "
|
||||
+ "delete manually if not needed",
|
||||
pluginConfigDir);
|
||||
terminal.println(message);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -81,7 +81,7 @@ public class RestClearIndicesCacheAction extends BaseRestHandler {
|
||||
if (Fields.QUERY.match(entry.getKey())) {
|
||||
clearIndicesCacheRequest.queryCache(request.paramAsBoolean(entry.getKey(), clearIndicesCacheRequest.queryCache()));
|
||||
}
|
||||
if (Fields.REQUEST_CACHE.match(entry.getKey())) {
|
||||
if (Fields.REQUEST.match(entry.getKey())) {
|
||||
clearIndicesCacheRequest.requestCache(request.paramAsBoolean(entry.getKey(), clearIndicesCacheRequest.requestCache()));
|
||||
}
|
||||
if (Fields.FIELD_DATA.match(entry.getKey())) {
|
||||
@ -100,7 +100,7 @@ public class RestClearIndicesCacheAction extends BaseRestHandler {
|
||||
|
||||
public static class Fields {
|
||||
public static final ParseField QUERY = new ParseField("query", "filter", "filter_cache");
|
||||
public static final ParseField REQUEST_CACHE = new ParseField("request_cache");
|
||||
public static final ParseField REQUEST = new ParseField("request", "request_cache");
|
||||
public static final ParseField FIELD_DATA = new ParseField("field_data", "fielddata");
|
||||
public static final ParseField RECYCLER = new ParseField("recycler");
|
||||
public static final ParseField FIELDS = new ParseField("fields");
|
||||
|
@ -62,6 +62,7 @@ public class RestValidateQueryAction extends BaseRestHandler {
|
||||
validateQueryRequest.explain(request.paramAsBoolean("explain", false));
|
||||
validateQueryRequest.types(Strings.splitStringByCommaToArray(request.param("type")));
|
||||
validateQueryRequest.rewrite(request.paramAsBoolean("rewrite", false));
|
||||
validateQueryRequest.allShards(request.paramAsBoolean("all_shards", false));
|
||||
|
||||
Exception bodyParsingException = null;
|
||||
try {
|
||||
@ -98,6 +99,9 @@ public class RestValidateQueryAction extends BaseRestHandler {
|
||||
if (explanation.getIndex() != null) {
|
||||
builder.field(INDEX_FIELD, explanation.getIndex());
|
||||
}
|
||||
if(explanation.getShard() >= 0) {
|
||||
builder.field(SHARD_FIELD, explanation.getShard());
|
||||
}
|
||||
builder.field(VALID_FIELD, explanation.isValid());
|
||||
if (explanation.getError() != null) {
|
||||
builder.field(ERROR_FIELD, explanation.getError());
|
||||
@ -132,6 +136,7 @@ public class RestValidateQueryAction extends BaseRestHandler {
|
||||
}
|
||||
|
||||
private static final String INDEX_FIELD = "index";
|
||||
private static final String SHARD_FIELD = "shard";
|
||||
private static final String VALID_FIELD = "valid";
|
||||
private static final String EXPLANATIONS_FIELD = "explanations";
|
||||
private static final String ERROR_FIELD = "error";
|
||||
|
@ -185,7 +185,7 @@ public abstract class InternalAggregation implements Aggregation, ToXContent, Na
|
||||
builder.startObject(getName());
|
||||
}
|
||||
if (this.metaData != null) {
|
||||
builder.field(CommonFields.META);
|
||||
builder.field(CommonFields.META.getPreferredName());
|
||||
builder.map(this.metaData);
|
||||
}
|
||||
doXContentBody(builder, params);
|
||||
@ -240,18 +240,17 @@ public abstract class InternalAggregation implements Aggregation, ToXContent, Na
|
||||
* Common xcontent fields that are shared among addAggregation
|
||||
*/
|
||||
public static final class CommonFields extends ParseField.CommonFields {
|
||||
// todo convert these to ParseField
|
||||
public static final String META = "meta";
|
||||
public static final String BUCKETS = "buckets";
|
||||
public static final String VALUE = "value";
|
||||
public static final String VALUES = "values";
|
||||
public static final String VALUE_AS_STRING = "value_as_string";
|
||||
public static final String DOC_COUNT = "doc_count";
|
||||
public static final String KEY = "key";
|
||||
public static final String KEY_AS_STRING = "key_as_string";
|
||||
public static final String FROM = "from";
|
||||
public static final String FROM_AS_STRING = "from_as_string";
|
||||
public static final String TO = "to";
|
||||
public static final String TO_AS_STRING = "to_as_string";
|
||||
public static final ParseField META = new ParseField("meta");
|
||||
public static final ParseField BUCKETS = new ParseField("buckets");
|
||||
public static final ParseField VALUE = new ParseField("value");
|
||||
public static final ParseField VALUES = new ParseField("values");
|
||||
public static final ParseField VALUE_AS_STRING = new ParseField("value_as_string");
|
||||
public static final ParseField DOC_COUNT = new ParseField("doc_count");
|
||||
public static final ParseField KEY = new ParseField("key");
|
||||
public static final ParseField KEY_AS_STRING = new ParseField("key_as_string");
|
||||
public static final ParseField FROM = new ParseField("from");
|
||||
public static final ParseField FROM_AS_STRING = new ParseField("from_as_string");
|
||||
public static final ParseField TO = new ParseField("to");
|
||||
public static final ParseField TO_AS_STRING = new ParseField("to_as_string");
|
||||
}
|
||||
}
|
||||
|
2
core/src/main/java/org/elasticsearch/search/aggregations/bucket/InternalSingleBucketAggregation.java
2
core/src/main/java/org/elasticsearch/search/aggregations/bucket/InternalSingleBucketAggregation.java
@ -131,7 +131,7 @@ public abstract class InternalSingleBucketAggregation extends InternalAggregatio
|
||||
|
||||
@Override
|
||||
public XContentBuilder doXContentBody(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.field(CommonFields.DOC_COUNT, docCount);
|
||||
builder.field(CommonFields.DOC_COUNT.getPreferredName(), docCount);
|
||||
aggregations.toXContentInternal(builder, params);
|
||||
return builder;
|
||||
}
|
||||
|
@ -105,8 +105,8 @@ public class InternalAdjacencyMatrix
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.startObject();
|
||||
builder.field(CommonFields.KEY, key);
|
||||
builder.field(CommonFields.DOC_COUNT, docCount);
|
||||
builder.field(CommonFields.KEY.getPreferredName(), key);
|
||||
builder.field(CommonFields.DOC_COUNT.getPreferredName(), docCount);
|
||||
aggregations.toXContentInternal(builder, params);
|
||||
builder.endObject();
|
||||
return builder;
|
||||
@ -207,7 +207,7 @@ public class InternalAdjacencyMatrix
|
||||
|
||||
@Override
|
||||
public XContentBuilder doXContentBody(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.startArray(CommonFields.BUCKETS);
|
||||
builder.startArray(CommonFields.BUCKETS.getPreferredName());
|
||||
for (InternalBucket bucket : buckets) {
|
||||
bucket.toXContent(builder, params);
|
||||
}
|
||||
|
@ -108,7 +108,7 @@ public class InternalFilters extends InternalMultiBucketAggregation<InternalFilt
|
||||
} else {
|
||||
builder.startObject();
|
||||
}
|
||||
builder.field(CommonFields.DOC_COUNT, docCount);
|
||||
builder.field(CommonFields.DOC_COUNT.getPreferredName(), docCount);
|
||||
aggregations.toXContentInternal(builder, params);
|
||||
builder.endObject();
|
||||
return builder;
|
||||
@ -210,9 +210,9 @@ public class InternalFilters extends InternalMultiBucketAggregation<InternalFilt
|
||||
@Override
|
||||
public XContentBuilder doXContentBody(XContentBuilder builder, Params params) throws IOException {
|
||||
if (keyed) {
|
||||
builder.startObject(CommonFields.BUCKETS);
|
||||
builder.startObject(CommonFields.BUCKETS.getPreferredName());
|
||||
} else {
|
||||
builder.startArray(CommonFields.BUCKETS);
|
||||
builder.startArray(CommonFields.BUCKETS.getPreferredName());
|
||||
}
|
||||
for (InternalBucket bucket : buckets) {
|
||||
bucket.toXContent(builder, params);
|
||||
|
@ -120,8 +120,8 @@ public class InternalGeoHashGrid extends InternalMultiBucketAggregation<Internal
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.startObject();
|
||||
builder.field(CommonFields.KEY, getKeyAsString());
|
||||
builder.field(CommonFields.DOC_COUNT, docCount);
|
||||
builder.field(CommonFields.KEY.getPreferredName(), getKeyAsString());
|
||||
builder.field(CommonFields.DOC_COUNT.getPreferredName(), docCount);
|
||||
aggregations.toXContentInternal(builder, params);
|
||||
builder.endObject();
|
||||
return builder;
|
||||
@ -223,7 +223,7 @@ public class InternalGeoHashGrid extends InternalMultiBucketAggregation<Internal
|
||||
|
||||
@Override
|
||||
public XContentBuilder doXContentBody(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.startArray(CommonFields.BUCKETS);
|
||||
builder.startArray(CommonFields.BUCKETS.getPreferredName());
|
||||
for (Bucket bucket : buckets) {
|
||||
bucket.toXContent(builder, params);
|
||||
}
|
||||
|
10
core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalDateHistogram.java
10
core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalDateHistogram.java
@ -142,10 +142,10 @@ public final class InternalDateHistogram extends InternalMultiBucketAggregation<
|
||||
builder.startObject();
|
||||
}
|
||||
if (format != DocValueFormat.RAW) {
|
||||
builder.field(CommonFields.KEY_AS_STRING, keyAsString);
|
||||
builder.field(CommonFields.KEY_AS_STRING.getPreferredName(), keyAsString);
|
||||
}
|
||||
builder.field(CommonFields.KEY, key);
|
||||
builder.field(CommonFields.DOC_COUNT, docCount);
|
||||
builder.field(CommonFields.KEY.getPreferredName(), key);
|
||||
builder.field(CommonFields.DOC_COUNT.getPreferredName(), docCount);
|
||||
aggregations.toXContentInternal(builder, params);
|
||||
builder.endObject();
|
||||
return builder;
|
||||
@ -437,9 +437,9 @@ public final class InternalDateHistogram extends InternalMultiBucketAggregation<
|
||||
@Override
|
||||
public XContentBuilder doXContentBody(XContentBuilder builder, Params params) throws IOException {
|
||||
if (keyed) {
|
||||
builder.startObject(CommonFields.BUCKETS);
|
||||
builder.startObject(CommonFields.BUCKETS.getPreferredName());
|
||||
} else {
|
||||
builder.startArray(CommonFields.BUCKETS);
|
||||
builder.startArray(CommonFields.BUCKETS.getPreferredName());
|
||||
}
|
||||
for (Bucket bucket : buckets) {
|
||||
bucket.toXContent(builder, params);
|
||||
|
@ -138,10 +138,10 @@ public final class InternalHistogram extends InternalMultiBucketAggregation<Inte
|
||||
builder.startObject();
|
||||
}
|
||||
if (format != DocValueFormat.RAW) {
|
||||
builder.field(CommonFields.KEY_AS_STRING, keyAsString);
|
||||
builder.field(CommonFields.KEY_AS_STRING.getPreferredName(), keyAsString);
|
||||
}
|
||||
builder.field(CommonFields.KEY, key);
|
||||
builder.field(CommonFields.DOC_COUNT, docCount);
|
||||
builder.field(CommonFields.KEY.getPreferredName(), key);
|
||||
builder.field(CommonFields.DOC_COUNT.getPreferredName(), docCount);
|
||||
aggregations.toXContentInternal(builder, params);
|
||||
builder.endObject();
|
||||
return builder;
|
||||
@ -421,9 +421,9 @@ public final class InternalHistogram extends InternalMultiBucketAggregation<Inte
|
||||
@Override
|
||||
public XContentBuilder doXContentBody(XContentBuilder builder, Params params) throws IOException {
|
||||
if (keyed) {
|
||||
builder.startObject(CommonFields.BUCKETS);
|
||||
builder.startObject(CommonFields.BUCKETS.getPreferredName());
|
||||
} else {
|
||||
builder.startArray(CommonFields.BUCKETS);
|
||||
builder.startArray(CommonFields.BUCKETS.getPreferredName());
|
||||
}
|
||||
for (Bucket bucket : buckets) {
|
||||
bucket.toXContent(builder, params);
|
||||
|
@ -133,16 +133,16 @@ public final class InternalBinaryRange
|
||||
} else {
|
||||
builder.startObject();
|
||||
if (key != null) {
|
||||
builder.field(CommonFields.KEY, key);
|
||||
builder.field(CommonFields.KEY.getPreferredName(), key);
|
||||
}
|
||||
}
|
||||
if (from != null) {
|
||||
builder.field(CommonFields.FROM, getFrom());
|
||||
builder.field(CommonFields.FROM.getPreferredName(), getFrom());
|
||||
}
|
||||
if (to != null) {
|
||||
builder.field(CommonFields.TO, getTo());
|
||||
builder.field(CommonFields.TO.getPreferredName(), getTo());
|
||||
}
|
||||
builder.field(CommonFields.DOC_COUNT, docCount);
|
||||
builder.field(CommonFields.DOC_COUNT.getPreferredName(), docCount);
|
||||
aggregations.toXContentInternal(builder, params);
|
||||
builder.endObject();
|
||||
return builder;
|
||||
@ -270,9 +270,9 @@ public final class InternalBinaryRange
|
||||
public XContentBuilder doXContentBody(XContentBuilder builder,
|
||||
Params params) throws IOException {
|
||||
if (keyed) {
|
||||
builder.startObject(CommonFields.BUCKETS);
|
||||
builder.startObject(CommonFields.BUCKETS.getPreferredName());
|
||||
} else {
|
||||
builder.startArray(CommonFields.BUCKETS);
|
||||
builder.startArray(CommonFields.BUCKETS.getPreferredName());
|
||||
}
|
||||
for (Bucket range : buckets) {
|
||||
range.toXContent(builder, params);
|
||||
|
@ -141,21 +141,21 @@ public class InternalRange<B extends InternalRange.Bucket, R extends InternalRan
|
||||
builder.startObject(key);
|
||||
} else {
|
||||
builder.startObject();
|
||||
builder.field(CommonFields.KEY, key);
|
||||
builder.field(CommonFields.KEY.getPreferredName(), key);
|
||||
}
|
||||
if (!Double.isInfinite(from)) {
|
||||
builder.field(CommonFields.FROM, from);
|
||||
builder.field(CommonFields.FROM.getPreferredName(), from);
|
||||
if (format != DocValueFormat.RAW) {
|
||||
builder.field(CommonFields.FROM_AS_STRING, format.format(from));
|
||||
builder.field(CommonFields.FROM_AS_STRING.getPreferredName(), format.format(from));
|
||||
}
|
||||
}
|
||||
if (!Double.isInfinite(to)) {
|
||||
builder.field(CommonFields.TO, to);
|
||||
builder.field(CommonFields.TO.getPreferredName(), to);
|
||||
if (format != DocValueFormat.RAW) {
|
||||
builder.field(CommonFields.TO_AS_STRING, format.format(to));
|
||||
builder.field(CommonFields.TO_AS_STRING.getPreferredName(), format.format(to));
|
||||
}
|
||||
}
|
||||
builder.field(CommonFields.DOC_COUNT, docCount);
|
||||
builder.field(CommonFields.DOC_COUNT.getPreferredName(), docCount);
|
||||
aggregations.toXContentInternal(builder, params);
|
||||
builder.endObject();
|
||||
return builder;
|
||||
@ -302,9 +302,9 @@ public class InternalRange<B extends InternalRange.Bucket, R extends InternalRan
|
||||
@Override
|
||||
public XContentBuilder doXContentBody(XContentBuilder builder, Params params) throws IOException {
|
||||
if (keyed) {
|
||||
builder.startObject(CommonFields.BUCKETS);
|
||||
builder.startObject(CommonFields.BUCKETS.getPreferredName());
|
||||
} else {
|
||||
builder.startArray(CommonFields.BUCKETS);
|
||||
builder.startArray(CommonFields.BUCKETS.getPreferredName());
|
||||
}
|
||||
for (B range : ranges) {
|
||||
range.toXContent(builder, params);
|
||||
|
@ -59,7 +59,7 @@ public class UnmappedSampler extends InternalSampler {
|
||||
|
||||
@Override
|
||||
public XContentBuilder doXContentBody(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.field(InternalAggregation.CommonFields.DOC_COUNT, 0);
|
||||
builder.field(InternalAggregation.CommonFields.DOC_COUNT.getPreferredName(), 0);
|
||||
return builder;
|
||||
}
|
||||
|
||||
|
@ -99,11 +99,11 @@ public class SignificantLongTerms extends InternalMappedSignificantTerms<Signifi
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.startObject();
|
||||
builder.field(CommonFields.KEY, term);
|
||||
builder.field(CommonFields.KEY.getPreferredName(), term);
|
||||
if (format != DocValueFormat.RAW) {
|
||||
builder.field(CommonFields.KEY_AS_STRING, format.format(term));
|
||||
builder.field(CommonFields.KEY_AS_STRING.getPreferredName(), format.format(term));
|
||||
}
|
||||
builder.field(CommonFields.DOC_COUNT, getDocCount());
|
||||
builder.field(CommonFields.DOC_COUNT.getPreferredName(), getDocCount());
|
||||
builder.field("score", score);
|
||||
builder.field("bg_count", supersetDf);
|
||||
aggregations.toXContentInternal(builder, params);
|
||||
@ -162,7 +162,7 @@ public class SignificantLongTerms extends InternalMappedSignificantTerms<Signifi
|
||||
@Override
|
||||
public XContentBuilder doXContentBody(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.field("doc_count", subsetSize);
|
||||
builder.startArray(CommonFields.BUCKETS);
|
||||
builder.startArray(CommonFields.BUCKETS.getPreferredName());
|
||||
for (Bucket bucket : buckets) {
|
||||
bucket.toXContent(builder, params);
|
||||
}
|
||||
|
@ -104,8 +104,8 @@ public class SignificantStringTerms extends InternalMappedSignificantTerms<Signi
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.startObject();
|
||||
builder.field(CommonFields.KEY, getKeyAsString());
|
||||
builder.field(CommonFields.DOC_COUNT, getDocCount());
|
||||
builder.field(CommonFields.KEY.getPreferredName(), getKeyAsString());
|
||||
builder.field(CommonFields.DOC_COUNT.getPreferredName(), getDocCount());
|
||||
builder.field("score", score);
|
||||
builder.field("bg_count", supersetDf);
|
||||
aggregations.toXContentInternal(builder, params);
|
||||
@ -164,7 +164,7 @@ public class SignificantStringTerms extends InternalMappedSignificantTerms<Signi
|
||||
@Override
|
||||
public XContentBuilder doXContentBody(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.field("doc_count", subsetSize);
|
||||
builder.startArray(CommonFields.BUCKETS);
|
||||
builder.startArray(CommonFields.BUCKETS.getPreferredName());
|
||||
for (Bucket bucket : buckets) {
|
||||
//There is a condition (presumably when only one shard has a bucket?) where reduce is not called
|
||||
// and I end up with buckets that contravene the user's min_doc_count criteria in my reducer
|
||||
|
@ -107,7 +107,7 @@ public class UnmappedSignificantTerms extends InternalSignificantTerms<UnmappedS
|
||||
|
||||
@Override
|
||||
public XContentBuilder doXContentBody(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.startArray(CommonFields.BUCKETS).endArray();
|
||||
builder.startArray(CommonFields.BUCKETS.getPreferredName()).endArray();
|
||||
return builder;
|
||||
}
|
||||
|
||||
|
@ -86,18 +86,11 @@ public class DoubleTerms extends InternalMappedTerms<DoubleTerms, DoubleTerms.Bu
|
||||
}
|
||||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.startObject();
|
||||
builder.field(CommonFields.KEY, term);
|
||||
protected final XContentBuilder keyToXContent(XContentBuilder builder) throws IOException {
|
||||
builder.field(CommonFields.KEY.getPreferredName(), term);
|
||||
if (format != DocValueFormat.RAW) {
|
||||
builder.field(CommonFields.KEY_AS_STRING, format.format(term));
|
||||
builder.field(CommonFields.KEY_AS_STRING.getPreferredName(), format.format(term));
|
||||
}
|
||||
builder.field(CommonFields.DOC_COUNT, getDocCount());
|
||||
if (showDocCountError) {
|
||||
builder.field(InternalTerms.DOC_COUNT_ERROR_UPPER_BOUND_FIELD_NAME, getDocCountError());
|
||||
}
|
||||
aggregations.toXContentInternal(builder, params);
|
||||
builder.endObject();
|
||||
return builder;
|
||||
}
|
||||
|
||||
@ -149,18 +142,6 @@ public class DoubleTerms extends InternalMappedTerms<DoubleTerms, DoubleTerms.Bu
|
||||
shardSize, showTermDocCountError, otherDocCount, buckets, docCountError);
|
||||
}
|
||||
|
||||
@Override
|
||||
public XContentBuilder doXContentBody(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.field(InternalTerms.DOC_COUNT_ERROR_UPPER_BOUND_FIELD_NAME, docCountError);
|
||||
builder.field(SUM_OF_OTHER_DOC_COUNTS, otherDocCount);
|
||||
builder.startArray(CommonFields.BUCKETS);
|
||||
for (Bucket bucket : buckets) {
|
||||
bucket.toXContent(builder, params);
|
||||
}
|
||||
builder.endArray();
|
||||
return builder;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Bucket[] createBucketsArray(int size) {
|
||||
return new Bucket[size];
|
||||
@ -171,7 +152,7 @@ public class DoubleTerms extends InternalMappedTerms<DoubleTerms, DoubleTerms.Bu
|
||||
boolean promoteToDouble = false;
|
||||
for (InternalAggregation agg : aggregations) {
|
||||
if (agg instanceof LongTerms && ((LongTerms) agg).format == DocValueFormat.RAW) {
|
||||
/**
|
||||
/*
|
||||
* this terms agg mixes longs and doubles, we must promote longs to doubles to make the internal aggs
|
||||
* compatible
|
||||
*/
|
||||
|
@ -246,7 +246,7 @@ public class GlobalOrdinalsStringTermsAggregator extends AbstractStringTermsAggr
|
||||
}
|
||||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
protected final XContentBuilder keyToXContent(XContentBuilder builder) throws IOException {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
}
|
||||
|
@ -21,6 +21,7 @@ package org.elasticsearch.search.aggregations.bucket.terms;
|
||||
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.search.DocValueFormat;
|
||||
import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator;
|
||||
|
||||
@ -127,4 +128,9 @@ public abstract class InternalMappedTerms<A extends InternalTerms<A, B>, B exten
|
||||
protected int doHashCode() {
|
||||
return Objects.hash(super.doHashCode(), buckets, format, otherDocCount, showTermDocCountError, shardSize);
|
||||
}
|
||||
|
||||
@Override
|
||||
public final XContentBuilder doXContentBody(XContentBuilder builder, Params params) throws IOException {
|
||||
return doXContentCommon(builder, params, docCountError, otherDocCount, buckets);
|
||||
}
|
||||
}
|
||||
|
@ -18,9 +18,11 @@
|
||||
*/
|
||||
package org.elasticsearch.search.aggregations.bucket.terms;
|
||||
|
||||
import org.elasticsearch.common.ParseField;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.search.DocValueFormat;
|
||||
import org.elasticsearch.search.aggregations.AggregationExecutionException;
|
||||
import org.elasticsearch.search.aggregations.Aggregations;
|
||||
@ -43,10 +45,11 @@ import static java.util.Collections.unmodifiableList;
|
||||
public abstract class InternalTerms<A extends InternalTerms<A, B>, B extends InternalTerms.Bucket<B>>
|
||||
extends InternalMultiBucketAggregation<A, B> implements Terms, ToXContent {
|
||||
|
||||
protected static final String DOC_COUNT_ERROR_UPPER_BOUND_FIELD_NAME = "doc_count_error_upper_bound";
|
||||
protected static final String SUM_OF_OTHER_DOC_COUNTS = "sum_other_doc_count";
|
||||
protected static final ParseField DOC_COUNT_ERROR_UPPER_BOUND_FIELD_NAME = new ParseField("doc_count_error_upper_bound");
|
||||
protected static final ParseField SUM_OF_OTHER_DOC_COUNTS = new ParseField("sum_other_doc_count");
|
||||
|
||||
public abstract static class Bucket<B extends Bucket<B>> extends Terms.Bucket {
|
||||
|
||||
/**
|
||||
* Reads a bucket. Should be a constructor reference.
|
||||
*/
|
||||
@ -141,6 +144,21 @@ public abstract class InternalTerms<A extends InternalTerms<A, B>, B extends Int
|
||||
return newBucket(docCount, aggs, docCountError);
|
||||
}
|
||||
|
||||
@Override
|
||||
public final XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.startObject();
|
||||
keyToXContent(builder);
|
||||
builder.field(CommonFields.DOC_COUNT.getPreferredName(), getDocCount());
|
||||
if (showDocCountError) {
|
||||
builder.field(InternalTerms.DOC_COUNT_ERROR_UPPER_BOUND_FIELD_NAME.getPreferredName(), getDocCountError());
|
||||
}
|
||||
aggregations.toXContentInternal(builder, params);
|
||||
builder.endObject();
|
||||
return builder;
|
||||
}
|
||||
|
||||
protected abstract XContentBuilder keyToXContent(XContentBuilder builder) throws IOException;
|
||||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if (obj == null || getClass() != obj.getClass()) {
|
||||
@ -319,4 +337,16 @@ public abstract class InternalTerms<A extends InternalTerms<A, B>, B extends Int
|
||||
protected int doHashCode() {
|
||||
return Objects.hash(minDocCount, order, requiredSize);
|
||||
}
|
||||
|
||||
protected static XContentBuilder doXContentCommon(XContentBuilder builder, Params params,
|
||||
long docCountError, long otherDocCount, List<? extends Bucket> buckets) throws IOException {
|
||||
builder.field(DOC_COUNT_ERROR_UPPER_BOUND_FIELD_NAME.getPreferredName(), docCountError);
|
||||
builder.field(SUM_OF_OTHER_DOC_COUNTS.getPreferredName(), otherDocCount);
|
||||
builder.startArray(CommonFields.BUCKETS.getPreferredName());
|
||||
for (Bucket bucket : buckets) {
|
||||
bucket.toXContent(builder, params);
|
||||
}
|
||||
builder.endArray();
|
||||
return builder;
|
||||
}
|
||||
}
|
||||
|
@ -86,18 +86,11 @@ public class LongTerms extends InternalMappedTerms<LongTerms, LongTerms.Bucket>
|
||||
}
|
||||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.startObject();
|
||||
builder.field(CommonFields.KEY, term);
|
||||
protected final XContentBuilder keyToXContent(XContentBuilder builder) throws IOException {
|
||||
builder.field(CommonFields.KEY.getPreferredName(), term);
|
||||
if (format != DocValueFormat.RAW) {
|
||||
builder.field(CommonFields.KEY_AS_STRING, format.format(term));
|
||||
builder.field(CommonFields.KEY_AS_STRING.getPreferredName(), format.format(term));
|
||||
}
|
||||
builder.field(CommonFields.DOC_COUNT, getDocCount());
|
||||
if (showDocCountError) {
|
||||
builder.field(InternalTerms.DOC_COUNT_ERROR_UPPER_BOUND_FIELD_NAME, getDocCountError());
|
||||
}
|
||||
aggregations.toXContentInternal(builder, params);
|
||||
builder.endObject();
|
||||
return builder;
|
||||
}
|
||||
|
||||
@ -149,18 +142,6 @@ public class LongTerms extends InternalMappedTerms<LongTerms, LongTerms.Bucket>
|
||||
showTermDocCountError, otherDocCount, buckets, docCountError);
|
||||
}
|
||||
|
||||
@Override
|
||||
public XContentBuilder doXContentBody(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.field(InternalTerms.DOC_COUNT_ERROR_UPPER_BOUND_FIELD_NAME, docCountError);
|
||||
builder.field(SUM_OF_OTHER_DOC_COUNTS, otherDocCount);
|
||||
builder.startArray(CommonFields.BUCKETS);
|
||||
for (Bucket bucket : buckets) {
|
||||
bucket.toXContent(builder, params);
|
||||
}
|
||||
builder.endArray();
|
||||
return builder;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Bucket[] createBucketsArray(int size) {
|
||||
return new Bucket[size];
|
||||
|
@ -85,16 +85,8 @@ public class StringTerms extends InternalMappedTerms<StringTerms, StringTerms.Bu
|
||||
}
|
||||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.startObject();
|
||||
builder.field(CommonFields.KEY, getKeyAsString());
|
||||
builder.field(CommonFields.DOC_COUNT, getDocCount());
|
||||
if (showDocCountError) {
|
||||
builder.field(InternalTerms.DOC_COUNT_ERROR_UPPER_BOUND_FIELD_NAME, getDocCountError());
|
||||
}
|
||||
aggregations.toXContentInternal(builder, params);
|
||||
builder.endObject();
|
||||
return builder;
|
||||
protected final XContentBuilder keyToXContent(XContentBuilder builder) throws IOException {
|
||||
return builder.field(CommonFields.KEY.getPreferredName(), getKeyAsString());
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -145,18 +137,6 @@ public class StringTerms extends InternalMappedTerms<StringTerms, StringTerms.Bu
|
||||
showTermDocCountError, otherDocCount, buckets, docCountError);
|
||||
}
|
||||
|
||||
@Override
|
||||
public XContentBuilder doXContentBody(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.field(InternalTerms.DOC_COUNT_ERROR_UPPER_BOUND_FIELD_NAME, docCountError);
|
||||
builder.field(SUM_OF_OTHER_DOC_COUNTS, otherDocCount);
|
||||
builder.startArray(CommonFields.BUCKETS);
|
||||
for (Bucket bucket : buckets) {
|
||||
bucket.toXContent(builder, params);
|
||||
}
|
||||
builder.endArray();
|
||||
return builder;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Bucket[] createBucketsArray(int size) {
|
||||
return new Bucket[size];
|
||||
|
@ -27,6 +27,7 @@ import org.elasticsearch.search.aggregations.InternalAggregations;
|
||||
import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
@ -102,11 +103,8 @@ public class UnmappedTerms extends InternalTerms<UnmappedTerms, UnmappedTerms.Bu
|
||||
}
|
||||
|
||||
@Override
|
||||
public XContentBuilder doXContentBody(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.field(InternalTerms.DOC_COUNT_ERROR_UPPER_BOUND_FIELD_NAME, 0);
|
||||
builder.field(SUM_OF_OTHER_DOC_COUNTS, 0);
|
||||
builder.startArray(CommonFields.BUCKETS).endArray();
|
||||
return builder;
|
||||
public final XContentBuilder doXContentBody(XContentBuilder builder, Params params) throws IOException {
|
||||
return doXContentCommon(builder, params, 0, 0, Collections.emptyList());
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -96,9 +96,9 @@ public class InternalAvg extends InternalNumericMetricsAggregation.SingleValue i
|
||||
|
||||
@Override
|
||||
public XContentBuilder doXContentBody(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.field(CommonFields.VALUE, count != 0 ? getValue() : null);
|
||||
builder.field(CommonFields.VALUE.getPreferredName(), count != 0 ? getValue() : null);
|
||||
if (count != 0 && format != DocValueFormat.RAW) {
|
||||
builder.field(CommonFields.VALUE_AS_STRING, format.format(getValue()));
|
||||
builder.field(CommonFields.VALUE_AS_STRING.getPreferredName(), format.format(getValue()));
|
||||
}
|
||||
return builder;
|
||||
}
|
||||
@ -115,5 +115,4 @@ public class InternalAvg extends InternalNumericMetricsAggregation.SingleValue i
|
||||
Objects.equals(count, other.count) &&
|
||||
Objects.equals(format.getWriteableName(), other.format.getWriteableName());
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -109,7 +109,7 @@ public final class InternalCardinality extends InternalNumericMetricsAggregation
|
||||
@Override
|
||||
public XContentBuilder doXContentBody(XContentBuilder builder, Params params) throws IOException {
|
||||
final long cardinality = getValue();
|
||||
builder.field(CommonFields.VALUE, cardinality);
|
||||
builder.field(CommonFields.VALUE.getPreferredName(), cardinality);
|
||||
return builder;
|
||||
}
|
||||
|
||||
|
@ -83,9 +83,9 @@ public class InternalMax extends InternalNumericMetricsAggregation.SingleValue i
|
||||
@Override
|
||||
public XContentBuilder doXContentBody(XContentBuilder builder, Params params) throws IOException {
|
||||
boolean hasValue = !Double.isInfinite(max);
|
||||
builder.field(CommonFields.VALUE, hasValue ? max : null);
|
||||
builder.field(CommonFields.VALUE.getPreferredName(), hasValue ? max : null);
|
||||
if (hasValue && format != DocValueFormat.RAW) {
|
||||
builder.field(CommonFields.VALUE_AS_STRING, format.format(max));
|
||||
builder.field(CommonFields.VALUE_AS_STRING.getPreferredName(), format.format(max));
|
||||
}
|
||||
return builder;
|
||||
}
|
||||
|
@ -83,9 +83,9 @@ public class InternalMin extends InternalNumericMetricsAggregation.SingleValue i
|
||||
@Override
|
||||
public XContentBuilder doXContentBody(XContentBuilder builder, Params params) throws IOException {
|
||||
boolean hasValue = !Double.isInfinite(min);
|
||||
builder.field(CommonFields.VALUE, hasValue ? min : null);
|
||||
builder.field(CommonFields.VALUE.getPreferredName(), hasValue ? min : null);
|
||||
if (hasValue && format != DocValueFormat.RAW) {
|
||||
builder.field(CommonFields.VALUE_AS_STRING, format.format(min));
|
||||
builder.field(CommonFields.VALUE_AS_STRING.getPreferredName(), format.format(min));
|
||||
}
|
||||
return builder;
|
||||
}
|
||||
|
@ -115,7 +115,7 @@ abstract class AbstractInternalHDRPercentiles extends InternalNumericMetricsAggr
|
||||
@Override
|
||||
public XContentBuilder doXContentBody(XContentBuilder builder, Params params) throws IOException {
|
||||
if (keyed) {
|
||||
builder.startObject(CommonFields.VALUES);
|
||||
builder.startObject(CommonFields.VALUES.getPreferredName());
|
||||
for(int i = 0; i < keys.length; ++i) {
|
||||
String key = String.valueOf(keys[i]);
|
||||
double value = value(keys[i]);
|
||||
@ -126,14 +126,14 @@ abstract class AbstractInternalHDRPercentiles extends InternalNumericMetricsAggr
|
||||
}
|
||||
builder.endObject();
|
||||
} else {
|
||||
builder.startArray(CommonFields.VALUES);
|
||||
builder.startArray(CommonFields.VALUES.getPreferredName());
|
||||
for (int i = 0; i < keys.length; i++) {
|
||||
double value = value(keys[i]);
|
||||
builder.startObject();
|
||||
builder.field(CommonFields.KEY, keys[i]);
|
||||
builder.field(CommonFields.VALUE, value);
|
||||
builder.field(CommonFields.KEY.getPreferredName(), keys[i]);
|
||||
builder.field(CommonFields.VALUE.getPreferredName(), value);
|
||||
if (format != DocValueFormat.RAW) {
|
||||
builder.field(CommonFields.VALUE_AS_STRING, format.format(value));
|
||||
builder.field(CommonFields.VALUE_AS_STRING.getPreferredName(), format.format(value));
|
||||
}
|
||||
builder.endObject();
|
||||
}
|
||||
|
@ -98,7 +98,7 @@ abstract class AbstractInternalTDigestPercentiles extends InternalNumericMetrics
|
||||
@Override
|
||||
public XContentBuilder doXContentBody(XContentBuilder builder, Params params) throws IOException {
|
||||
if (keyed) {
|
||||
builder.startObject(CommonFields.VALUES);
|
||||
builder.startObject(CommonFields.VALUES.getPreferredName());
|
||||
for(int i = 0; i < keys.length; ++i) {
|
||||
String key = String.valueOf(keys[i]);
|
||||
double value = value(keys[i]);
|
||||
@ -109,14 +109,14 @@ abstract class AbstractInternalTDigestPercentiles extends InternalNumericMetrics
|
||||
}
|
||||
builder.endObject();
|
||||
} else {
|
||||
builder.startArray(CommonFields.VALUES);
|
||||
builder.startArray(CommonFields.VALUES.getPreferredName());
|
||||
for (int i = 0; i < keys.length; i++) {
|
||||
double value = value(keys[i]);
|
||||
builder.startObject();
|
||||
builder.field(CommonFields.KEY, keys[i]);
|
||||
builder.field(CommonFields.VALUE, value);
|
||||
builder.field(CommonFields.KEY.getPreferredName(), keys[i]);
|
||||
builder.field(CommonFields.VALUE.getPreferredName(), value);
|
||||
if (format != DocValueFormat.RAW) {
|
||||
builder.field(CommonFields.VALUE_AS_STRING, format.format(value));
|
||||
builder.field(CommonFields.VALUE_AS_STRING.getPreferredName(), format.format(value));
|
||||
}
|
||||
builder.endObject();
|
||||
}
|
||||
|
@ -82,9 +82,9 @@ public class InternalSum extends InternalNumericMetricsAggregation.SingleValue i
|
||||
|
||||
@Override
|
||||
public XContentBuilder doXContentBody(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.field(CommonFields.VALUE, sum);
|
||||
builder.field(CommonFields.VALUE.getPreferredName(), sum);
|
||||
if (format != DocValueFormat.RAW) {
|
||||
builder.field(CommonFields.VALUE_AS_STRING, format.format(sum));
|
||||
builder.field(CommonFields.VALUE_AS_STRING.getPreferredName(), format.format(sum));
|
||||
}
|
||||
return builder;
|
||||
}
|
||||
|
2
core/src/main/java/org/elasticsearch/search/aggregations/metrics/valuecount/InternalValueCount.java
2
core/src/main/java/org/elasticsearch/search/aggregations/metrics/valuecount/InternalValueCount.java
@ -81,7 +81,7 @@ public class InternalValueCount extends InternalNumericMetricsAggregation.Single
|
||||
|
||||
@Override
|
||||
public XContentBuilder doXContentBody(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.field(CommonFields.VALUE, value);
|
||||
builder.field(CommonFields.VALUE.getPreferredName(), value);
|
||||
return builder;
|
||||
}
|
||||
|
||||
|
@ -79,9 +79,9 @@ public class InternalSimpleValue extends InternalNumericMetricsAggregation.Singl
|
||||
@Override
|
||||
public XContentBuilder doXContentBody(XContentBuilder builder, Params params) throws IOException {
|
||||
boolean hasValue = !(Double.isInfinite(value) || Double.isNaN(value));
|
||||
builder.field(CommonFields.VALUE, hasValue ? value : null);
|
||||
builder.field(CommonFields.VALUE.getPreferredName(), hasValue ? value : null);
|
||||
if (hasValue && format != DocValueFormat.RAW) {
|
||||
builder.field(CommonFields.VALUE_AS_STRING, format.format(value));
|
||||
builder.field(CommonFields.VALUE_AS_STRING.getPreferredName(), format.format(value));
|
||||
}
|
||||
return builder;
|
||||
}
|
||||
|
@ -97,9 +97,9 @@ public class InternalBucketMetricValue extends InternalNumericMetricsAggregation
|
||||
@Override
|
||||
public XContentBuilder doXContentBody(XContentBuilder builder, Params params) throws IOException {
|
||||
boolean hasValue = !Double.isInfinite(value);
|
||||
builder.field(CommonFields.VALUE, hasValue ? value : null);
|
||||
builder.field(CommonFields.VALUE.getPreferredName(), hasValue ? value : null);
|
||||
if (hasValue && format != DocValueFormat.RAW) {
|
||||
builder.field(CommonFields.VALUE_AS_STRING, format.format(value));
|
||||
builder.field(CommonFields.VALUE_AS_STRING.getPreferredName(), format.format(value));
|
||||
}
|
||||
builder.startArray("keys");
|
||||
for (String key : keys) {
|
||||
|
@ -52,13 +52,14 @@ import java.util.Map;
|
||||
public class FastVectorHighlighter implements Highlighter {
|
||||
|
||||
private static final BoundaryScanner DEFAULT_SIMPLE_BOUNDARY_SCANNER = new SimpleBoundaryScanner();
|
||||
private static final BoundaryScanner DEFAULT_SENTENCE_BOUNDARY_SCANNER = new BreakIteratorBoundaryScanner(
|
||||
BreakIterator.getSentenceInstance(Locale.ROOT));
|
||||
private static final BoundaryScanner DEFAULT_WORD_BOUNDARY_SCANNER = new BreakIteratorBoundaryScanner(
|
||||
BreakIterator.getWordInstance(Locale.ROOT));
|
||||
private static final BoundaryScanner DEFAULT_SENTENCE_BOUNDARY_SCANNER =
|
||||
new BreakIteratorBoundaryScanner(BreakIterator.getSentenceInstance(Locale.ROOT));
|
||||
private static final BoundaryScanner DEFAULT_WORD_BOUNDARY_SCANNER =
|
||||
new BreakIteratorBoundaryScanner(BreakIterator.getWordInstance(Locale.ROOT));
|
||||
|
||||
public static final Setting<Boolean> SETTING_TV_HIGHLIGHT_MULTI_VALUE =
|
||||
Setting.boolSetting("search.highlight.term_vector_multi_value", true, Setting.Property.NodeScope);
|
||||
|
||||
public static final Setting<Boolean> SETTING_TV_HIGHLIGHT_MULTI_VALUE = Setting.boolSetting("search.highlight.term_vector_multi_value",
|
||||
true, Setting.Property.NodeScope);
|
||||
private static final String CACHE_KEY = "highlight-fsv";
|
||||
private final Boolean termVectorMultiValue;
|
||||
|
||||
@ -74,11 +75,12 @@ public class FastVectorHighlighter implements Highlighter {
|
||||
FieldMapper mapper = highlighterContext.mapper;
|
||||
|
||||
if (canHighlight(mapper) == false) {
|
||||
throw new IllegalArgumentException("the field [" + highlighterContext.fieldName
|
||||
+ "] should be indexed with term vector with position offsets to be used with fast vector highlighter");
|
||||
throw new IllegalArgumentException("the field [" + highlighterContext.fieldName +
|
||||
"] should be indexed with term vector with position offsets to be used with fast vector highlighter");
|
||||
}
|
||||
|
||||
Encoder encoder = field.fieldOptions().encoder().equals("html") ? HighlightUtils.Encoders.HTML : HighlightUtils.Encoders.DEFAULT;
|
||||
Encoder encoder = field.fieldOptions().encoder().equals("html") ?
|
||||
HighlightUtils.Encoders.HTML : HighlightUtils.Encoders.DEFAULT;
|
||||
|
||||
if (!hitContext.cache().containsKey(CACHE_KEY)) {
|
||||
hitContext.cache().put(CACHE_KEY, new HighlighterEntry());
|
||||
@ -90,21 +92,21 @@ public class FastVectorHighlighter implements Highlighter {
|
||||
if (field.fieldOptions().requireFieldMatch()) {
|
||||
if (cache.fieldMatchFieldQuery == null) {
|
||||
/*
|
||||
* we use top level reader to rewrite the query against all readers, with use caching it across hits (and across
|
||||
* readers...)
|
||||
* we use top level reader to rewrite the query against all readers,
|
||||
* with use caching it across hits (and across readers...)
|
||||
*/
|
||||
cache.fieldMatchFieldQuery = new CustomFieldQuery(highlighterContext.query, hitContext.topLevelReader(),
|
||||
true, field.fieldOptions().requireFieldMatch());
|
||||
cache.fieldMatchFieldQuery = new CustomFieldQuery(highlighterContext.query,
|
||||
hitContext.topLevelReader(), true, field.fieldOptions().requireFieldMatch());
|
||||
}
|
||||
fieldQuery = cache.fieldMatchFieldQuery;
|
||||
} else {
|
||||
if (cache.noFieldMatchFieldQuery == null) {
|
||||
/*
|
||||
* we use top level reader to rewrite the query against all readers, with use caching it across hits (and across
|
||||
* readers...)
|
||||
* we use top level reader to rewrite the query against all readers,
|
||||
* with use caching it across hits (and across readers...)
|
||||
*/
|
||||
cache.noFieldMatchFieldQuery = new CustomFieldQuery(highlighterContext.query, hitContext.topLevelReader(),
|
||||
true, field.fieldOptions().requireFieldMatch());
|
||||
cache.noFieldMatchFieldQuery = new CustomFieldQuery(highlighterContext.query,
|
||||
hitContext.topLevelReader(), true, field.fieldOptions().requireFieldMatch());
|
||||
}
|
||||
fieldQuery = cache.noFieldMatchFieldQuery;
|
||||
}
|
||||
@ -128,7 +130,7 @@ public class FastVectorHighlighter implements Highlighter {
|
||||
}
|
||||
} else {
|
||||
fragListBuilder = field.fieldOptions().fragmentOffset() == -1 ?
|
||||
new SimpleFragListBuilder() : new SimpleFragListBuilder(field.fieldOptions().fragmentOffset());
|
||||
new SimpleFragListBuilder() : new SimpleFragListBuilder(field.fieldOptions().fragmentOffset());
|
||||
if (field.fieldOptions().scoreOrdered()) {
|
||||
if (!forceSource && mapper.fieldType().stored()) {
|
||||
fragmentsBuilder = new ScoreOrderFragmentsBuilder(field.fieldOptions().preTags(),
|
||||
@ -142,7 +144,8 @@ public class FastVectorHighlighter implements Highlighter {
|
||||
fragmentsBuilder = new SimpleFragmentsBuilder(mapper, field.fieldOptions().preTags(),
|
||||
field.fieldOptions().postTags(), boundaryScanner);
|
||||
} else {
|
||||
fragmentsBuilder = new SourceSimpleFragmentsBuilder(mapper, context, field.fieldOptions().preTags(),
|
||||
fragmentsBuilder =
|
||||
new SourceSimpleFragmentsBuilder(mapper, context, field.fieldOptions().preTags(),
|
||||
field.fieldOptions().postTags(), boundaryScanner);
|
||||
}
|
||||
}
|
||||
@ -153,8 +156,8 @@ public class FastVectorHighlighter implements Highlighter {
|
||||
entry.fragmentsBuilder = fragmentsBuilder;
|
||||
if (cache.fvh == null) {
|
||||
// parameters to FVH are not requires since:
|
||||
// first two booleans are not relevant since they are set on the CustomFieldQuery (phrase and fieldMatch)
|
||||
// fragment builders are used explicitly
|
||||
// first two booleans are not relevant since they are set on the CustomFieldQuery
|
||||
// (phrase and fieldMatch) fragment builders are used explicitly
|
||||
cache.fvh = new org.apache.lucene.search.vectorhighlight.FastVectorHighlighter();
|
||||
}
|
||||
CustomFieldQuery.highlightFilters.set(field.fieldOptions().highlightFilter());
|
||||
@ -172,13 +175,14 @@ public class FastVectorHighlighter implements Highlighter {
|
||||
// we highlight against the low level reader and docId, because if we load source, we want to reuse it if possible
|
||||
// Only send matched fields if they were requested to save time.
|
||||
if (field.fieldOptions().matchedFields() != null && !field.fieldOptions().matchedFields().isEmpty()) {
|
||||
fragments = cache.fvh.getBestFragments(fieldQuery, hitContext.reader(), hitContext.docId(), mapper.fieldType().name(),
|
||||
field.fieldOptions().matchedFields(), fragmentCharSize, numberOfFragments, entry.fragListBuilder,
|
||||
entry.fragmentsBuilder, field.fieldOptions().preTags(), field.fieldOptions().postTags(), encoder);
|
||||
fragments = cache.fvh.getBestFragments(fieldQuery, hitContext.reader(), hitContext.docId(),
|
||||
mapper.fieldType().name(), field.fieldOptions().matchedFields(), fragmentCharSize,
|
||||
numberOfFragments, entry.fragListBuilder, entry.fragmentsBuilder, field.fieldOptions().preTags(),
|
||||
field.fieldOptions().postTags(), encoder);
|
||||
} else {
|
||||
fragments = cache.fvh.getBestFragments(fieldQuery, hitContext.reader(), hitContext.docId(), mapper.fieldType().name(),
|
||||
fragmentCharSize, numberOfFragments, entry.fragListBuilder, entry.fragmentsBuilder, field.fieldOptions().preTags(),
|
||||
field.fieldOptions().postTags(), encoder);
|
||||
fragments = cache.fvh.getBestFragments(fieldQuery, hitContext.reader(), hitContext.docId(),
|
||||
mapper.fieldType().name(), fragmentCharSize, numberOfFragments, entry.fragListBuilder,
|
||||
entry.fragmentsBuilder, field.fieldOptions().preTags(), field.fieldOptions().postTags(), encoder);
|
||||
}
|
||||
|
||||
if (fragments != null && fragments.length > 0) {
|
||||
@ -187,11 +191,13 @@ public class FastVectorHighlighter implements Highlighter {
|
||||
|
||||
int noMatchSize = highlighterContext.field.fieldOptions().noMatchSize();
|
||||
if (noMatchSize > 0) {
|
||||
// Essentially we just request that a fragment is built from 0 to noMatchSize using the normal fragmentsBuilder
|
||||
// Essentially we just request that a fragment is built from 0 to noMatchSize using
|
||||
// the normal fragmentsBuilder
|
||||
FieldFragList fieldFragList = new SimpleFieldFragList(-1 /*ignored*/);
|
||||
fieldFragList.add(0, noMatchSize, Collections.<WeightedPhraseInfo>emptyList());
|
||||
fragments = entry.fragmentsBuilder.createFragments(hitContext.reader(), hitContext.docId(), mapper.fieldType().name(),
|
||||
fieldFragList, 1, field.fieldOptions().preTags(), field.fieldOptions().postTags(), encoder);
|
||||
fragments = entry.fragmentsBuilder.createFragments(hitContext.reader(), hitContext.docId(),
|
||||
mapper.fieldType().name(), fieldFragList, 1, field.fieldOptions().preTags(),
|
||||
field.fieldOptions().postTags(), encoder);
|
||||
if (fragments != null && fragments.length > 0) {
|
||||
return new HighlightField(highlighterContext.fieldName, Text.convertFromStringArray(fragments));
|
||||
}
|
||||
@ -200,7 +206,8 @@ public class FastVectorHighlighter implements Highlighter {
|
||||
return null;
|
||||
|
||||
} catch (Exception e) {
|
||||
throw new FetchPhaseExecutionException(context, "Failed to highlight field [" + highlighterContext.fieldName + "]", e);
|
||||
throw new FetchPhaseExecutionException(context,
|
||||
"Failed to highlight field [" + highlighterContext.fieldName + "]", e);
|
||||
}
|
||||
}
|
||||
|
||||
@ -212,24 +219,31 @@ public class FastVectorHighlighter implements Highlighter {
|
||||
|
||||
private static BoundaryScanner getBoundaryScanner(Field field) {
|
||||
final FieldOptions fieldOptions = field.fieldOptions();
|
||||
final Locale boundaryScannerLocale = fieldOptions.boundaryScannerLocale();
|
||||
switch(fieldOptions.boundaryScannerType()) {
|
||||
case SENTENCE:
|
||||
if (boundaryScannerLocale != null) {
|
||||
return new BreakIteratorBoundaryScanner(BreakIterator.getSentenceInstance(boundaryScannerLocale));
|
||||
}
|
||||
return DEFAULT_SENTENCE_BOUNDARY_SCANNER;
|
||||
case WORD:
|
||||
if (boundaryScannerLocale != null) {
|
||||
return new BreakIteratorBoundaryScanner(BreakIterator.getWordInstance(boundaryScannerLocale));
|
||||
}
|
||||
return DEFAULT_WORD_BOUNDARY_SCANNER;
|
||||
default:
|
||||
if (fieldOptions.boundaryMaxScan() != SimpleBoundaryScanner.DEFAULT_MAX_SCAN
|
||||
final Locale boundaryScannerLocale =
|
||||
fieldOptions.boundaryScannerLocale() != null ? fieldOptions.boundaryScannerLocale() :
|
||||
Locale.ROOT;
|
||||
final HighlightBuilder.BoundaryScannerType type =
|
||||
fieldOptions.boundaryScannerType() != null ? fieldOptions.boundaryScannerType() :
|
||||
HighlightBuilder.BoundaryScannerType.CHARS;
|
||||
switch(type) {
|
||||
case SENTENCE:
|
||||
if (boundaryScannerLocale != null) {
|
||||
return new BreakIteratorBoundaryScanner(BreakIterator.getSentenceInstance(boundaryScannerLocale));
|
||||
}
|
||||
return DEFAULT_SENTENCE_BOUNDARY_SCANNER;
|
||||
case WORD:
|
||||
if (boundaryScannerLocale != null) {
|
||||
return new BreakIteratorBoundaryScanner(BreakIterator.getWordInstance(boundaryScannerLocale));
|
||||
}
|
||||
return DEFAULT_WORD_BOUNDARY_SCANNER;
|
||||
case CHARS:
|
||||
if (fieldOptions.boundaryMaxScan() != SimpleBoundaryScanner.DEFAULT_MAX_SCAN
|
||||
|| fieldOptions.boundaryChars() != SimpleBoundaryScanner.DEFAULT_BOUNDARY_CHARS) {
|
||||
return new SimpleBoundaryScanner(fieldOptions.boundaryMaxScan(), fieldOptions.boundaryChars());
|
||||
}
|
||||
return DEFAULT_SIMPLE_BOUNDARY_SCANNER;
|
||||
return new SimpleBoundaryScanner(fieldOptions.boundaryMaxScan(), fieldOptions.boundaryChars());
|
||||
}
|
||||
return DEFAULT_SIMPLE_BOUNDARY_SCANNER;
|
||||
default:
|
||||
throw new IllegalArgumentException("Invalid boundary scanner type: " + type.toString());
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -95,7 +95,7 @@ public class HighlightBuilder extends AbstractHighlighterBuilder<HighlightBuilde
|
||||
.preTags(DEFAULT_PRE_TAGS).postTags(DEFAULT_POST_TAGS).scoreOrdered(DEFAULT_SCORE_ORDERED)
|
||||
.highlightFilter(DEFAULT_HIGHLIGHT_FILTER).requireFieldMatch(DEFAULT_REQUIRE_FIELD_MATCH)
|
||||
.forceSource(DEFAULT_FORCE_SOURCE).fragmentCharSize(DEFAULT_FRAGMENT_CHAR_SIZE)
|
||||
.numberOfFragments(DEFAULT_NUMBER_OF_FRAGMENTS).encoder(DEFAULT_ENCODER).boundaryScannerType(BoundaryScannerType.CHARS)
|
||||
.numberOfFragments(DEFAULT_NUMBER_OF_FRAGMENTS).encoder(DEFAULT_ENCODER)
|
||||
.boundaryMaxScan(SimpleBoundaryScanner.DEFAULT_MAX_SCAN).boundaryChars(SimpleBoundaryScanner.DEFAULT_BOUNDARY_CHARS)
|
||||
.boundaryScannerLocale(Locale.ROOT).noMatchSize(DEFAULT_NO_MATCH_SIZE).phraseLimit(DEFAULT_PHRASE_LIMIT).build();
|
||||
|
||||
|
@ -22,6 +22,7 @@ import org.apache.lucene.analysis.Analyzer;
|
||||
import org.apache.lucene.search.IndexSearcher;
|
||||
import org.apache.lucene.search.highlight.Encoder;
|
||||
import org.apache.lucene.search.highlight.Snippet;
|
||||
import org.apache.lucene.search.uhighlight.BoundedBreakIteratorScanner;
|
||||
import org.apache.lucene.search.uhighlight.CustomPassageFormatter;
|
||||
import org.apache.lucene.search.uhighlight.CustomUnifiedHighlighter;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
@ -34,12 +35,15 @@ import org.elasticsearch.search.fetch.FetchSubPhase;
|
||||
import org.elasticsearch.search.internal.SearchContext;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.text.BreakIterator;
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Locale;
|
||||
import java.util.Map;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import static org.apache.lucene.search.uhighlight.CustomUnifiedHighlighter.MULTIVAL_SEP_CHAR;
|
||||
import static org.elasticsearch.search.fetch.subphase.highlight.PostingsHighlighter.filterSnippets;
|
||||
import static org.elasticsearch.search.fetch.subphase.highlight.PostingsHighlighter.mergeFieldValues;
|
||||
|
||||
@ -93,19 +97,22 @@ public class UnifiedHighlighter implements Highlighter {
|
||||
// we use a control char to separate values, which is the only char that the custom break iterator
|
||||
// breaks the text on, so we don't lose the distinction between the different values of a field and we
|
||||
// get back a snippet per value
|
||||
String fieldValue = mergeFieldValues(fieldValues, HighlightUtils.NULL_SEPARATOR);
|
||||
String fieldValue = mergeFieldValues(fieldValues, MULTIVAL_SEP_CHAR);
|
||||
org.apache.lucene.search.postingshighlight.CustomSeparatorBreakIterator breakIterator =
|
||||
new org.apache.lucene.search.postingshighlight
|
||||
.CustomSeparatorBreakIterator(HighlightUtils.NULL_SEPARATOR);
|
||||
.CustomSeparatorBreakIterator(MULTIVAL_SEP_CHAR);
|
||||
highlighter =
|
||||
new CustomUnifiedHighlighter(searcher, analyzer, mapperHighlighterEntry.passageFormatter,
|
||||
breakIterator, fieldValue, field.fieldOptions().noMatchSize() > 0);
|
||||
field.fieldOptions().boundaryScannerLocale(), breakIterator, fieldValue,
|
||||
field.fieldOptions().noMatchSize());
|
||||
numberOfFragments = fieldValues.size(); // we are highlighting the whole content, one snippet per value
|
||||
} else {
|
||||
//using paragraph separator we make sure that each field value holds a discrete passage for highlighting
|
||||
String fieldValue = mergeFieldValues(fieldValues, HighlightUtils.PARAGRAPH_SEPARATOR);
|
||||
String fieldValue = mergeFieldValues(fieldValues, MULTIVAL_SEP_CHAR);
|
||||
BreakIterator bi = getBreakIterator(field);
|
||||
highlighter = new CustomUnifiedHighlighter(searcher, analyzer,
|
||||
mapperHighlighterEntry.passageFormatter, null, fieldValue, field.fieldOptions().noMatchSize() > 0);
|
||||
mapperHighlighterEntry.passageFormatter, field.fieldOptions().boundaryScannerLocale(), bi,
|
||||
fieldValue, field.fieldOptions().noMatchSize());
|
||||
numberOfFragments = field.fieldOptions().numberOfFragments();
|
||||
}
|
||||
if (field.fieldOptions().requireFieldMatch()) {
|
||||
@ -144,11 +151,34 @@ public class UnifiedHighlighter implements Highlighter {
|
||||
return null;
|
||||
}
|
||||
|
||||
static class HighlighterEntry {
|
||||
private BreakIterator getBreakIterator(SearchContextHighlight.Field field) {
|
||||
final SearchContextHighlight.FieldOptions fieldOptions = field.fieldOptions();
|
||||
final Locale locale =
|
||||
fieldOptions.boundaryScannerLocale() != null ? fieldOptions.boundaryScannerLocale() :
|
||||
Locale.ROOT;
|
||||
final HighlightBuilder.BoundaryScannerType type =
|
||||
fieldOptions.boundaryScannerType() != null ? fieldOptions.boundaryScannerType() :
|
||||
HighlightBuilder.BoundaryScannerType.SENTENCE;
|
||||
int maxLen = fieldOptions.fragmentCharSize();
|
||||
switch (type) {
|
||||
case SENTENCE:
|
||||
if (maxLen > 0) {
|
||||
return BoundedBreakIteratorScanner.getSentence(locale, maxLen);
|
||||
}
|
||||
return BreakIterator.getSentenceInstance(locale);
|
||||
case WORD:
|
||||
// ignore maxLen
|
||||
return BreakIterator.getWordInstance(locale);
|
||||
default:
|
||||
throw new IllegalArgumentException("Invalid boundary scanner type: " + type.toString());
|
||||
}
|
||||
}
|
||||
|
||||
private static class HighlighterEntry {
|
||||
Map<FieldMapper, MapperHighlighterEntry> mappers = new HashMap<>();
|
||||
}
|
||||
|
||||
static class MapperHighlighterEntry {
|
||||
private static class MapperHighlighterEntry {
|
||||
final CustomPassageFormatter passageFormatter;
|
||||
|
||||
private MapperHighlighterEntry(CustomPassageFormatter passageFormatter) {
|
||||
|
@ -19,6 +19,7 @@
|
||||
package org.elasticsearch.search.suggest.completion;
|
||||
|
||||
import org.apache.lucene.search.suggest.document.CompletionQuery;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.elasticsearch.common.unit.Fuzziness;
|
||||
import org.elasticsearch.index.mapper.CompletionFieldMapper;
|
||||
import org.elasticsearch.index.query.QueryShardContext;
|
||||
@ -77,15 +78,7 @@ public class CompletionSuggestionContext extends SuggestionSearchContext.Suggest
|
||||
CompletionFieldMapper.CompletionFieldType fieldType = getFieldType();
|
||||
final CompletionQuery query;
|
||||
if (getPrefix() != null) {
|
||||
if (fuzzyOptions != null) {
|
||||
query = fieldType.fuzzyQuery(getPrefix().utf8ToString(),
|
||||
Fuzziness.fromEdits(fuzzyOptions.getEditDistance()),
|
||||
fuzzyOptions.getFuzzyPrefixLength(), fuzzyOptions.getFuzzyMinLength(),
|
||||
fuzzyOptions.getMaxDeterminizedStates(), fuzzyOptions.isTranspositions(),
|
||||
fuzzyOptions.isUnicodeAware());
|
||||
} else {
|
||||
query = fieldType.prefixQuery(getPrefix());
|
||||
}
|
||||
query = createCompletionQuery(getPrefix(), fieldType);
|
||||
} else if (getRegex() != null) {
|
||||
if (fuzzyOptions != null) {
|
||||
throw new IllegalArgumentException("can not use 'fuzzy' options with 'regex");
|
||||
@ -95,8 +88,10 @@ public class CompletionSuggestionContext extends SuggestionSearchContext.Suggest
|
||||
}
|
||||
query = fieldType.regexpQuery(getRegex(), regexOptions.getFlagsValue(),
|
||||
regexOptions.getMaxDeterminizedStates());
|
||||
} else if (getText() != null) {
|
||||
query = createCompletionQuery(getText(), fieldType);
|
||||
} else {
|
||||
throw new IllegalArgumentException("'prefix' or 'regex' must be defined");
|
||||
throw new IllegalArgumentException("'prefix/text' or 'regex' must be defined");
|
||||
}
|
||||
if (fieldType.hasContextMappings()) {
|
||||
ContextMappings contextMappings = fieldType.getContextMappings();
|
||||
@ -105,4 +100,18 @@ public class CompletionSuggestionContext extends SuggestionSearchContext.Suggest
|
||||
return query;
|
||||
}
|
||||
|
||||
private CompletionQuery createCompletionQuery(BytesRef prefix, CompletionFieldMapper.CompletionFieldType fieldType) {
|
||||
final CompletionQuery query;
|
||||
if (fuzzyOptions != null) {
|
||||
query = fieldType.fuzzyQuery(prefix.utf8ToString(),
|
||||
Fuzziness.fromEdits(fuzzyOptions.getEditDistance()),
|
||||
fuzzyOptions.getFuzzyPrefixLength(), fuzzyOptions.getFuzzyMinLength(),
|
||||
fuzzyOptions.getMaxDeterminizedStates(), fuzzyOptions.isTranspositions(),
|
||||
fuzzyOptions.isUnicodeAware());
|
||||
} else {
|
||||
query = fieldType.prefixQuery(prefix);
|
||||
}
|
||||
return query;
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -1150,11 +1150,24 @@ public class SnapshotsService extends AbstractLifecycleComponent implements Clus
|
||||
@Override
|
||||
public void onSnapshotFailure(Snapshot failedSnapshot, Exception e) {
|
||||
if (failedSnapshot.equals(snapshot)) {
|
||||
logger.trace("deleted snapshot failed - deleting files", e);
|
||||
logger.warn("deleted snapshot failed - deleting files", e);
|
||||
removeListener(this);
|
||||
threadPool.executor(ThreadPool.Names.SNAPSHOT).execute(() ->
|
||||
deleteSnapshot(failedSnapshot.getRepository(), failedSnapshot.getSnapshotId().getName(), listener, true)
|
||||
);
|
||||
threadPool.executor(ThreadPool.Names.SNAPSHOT).execute(() -> {
|
||||
try {
|
||||
deleteSnapshot(failedSnapshot.getRepository(),
|
||||
failedSnapshot.getSnapshotId().getName(),
|
||||
listener,
|
||||
true);
|
||||
} catch (SnapshotMissingException smex) {
|
||||
logger.info((Supplier<?>) () -> new ParameterizedMessage(
|
||||
"Tried deleting in-progress snapshot [{}], but it " +
|
||||
"could not be found after failing to abort.",
|
||||
smex.getSnapshotName()), e);
|
||||
listener.onFailure(new SnapshotException(snapshot,
|
||||
"Tried deleting in-progress snapshot [{}], but it " +
|
||||
"could not be found after failing to abort.", smex));
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
});
|
||||
|
@ -0,0 +1,60 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.tasks;
|
||||
|
||||
/**
|
||||
* An interface for a request that can be used to register a task manager task
|
||||
*/
|
||||
public interface TaskAwareRequest {
|
||||
/**
|
||||
* Set a reference to task that caused this task to be run.
|
||||
*/
|
||||
default void setParentTask(String parentTaskNode, long parentTaskId) {
|
||||
setParentTask(new TaskId(parentTaskNode, parentTaskId));
|
||||
}
|
||||
|
||||
/**
|
||||
* Set a reference to task that created this request.
|
||||
*/
|
||||
void setParentTask(TaskId taskId);
|
||||
|
||||
/**
|
||||
* Get a reference to the task that created this request. Implementers should default to
|
||||
* {@link TaskId#EMPTY_TASK_ID}, meaning "there is no parent".
|
||||
*/
|
||||
TaskId getParentTask();
|
||||
|
||||
/**
|
||||
* Returns the task object that should be used to keep track of the processing of the request.
|
||||
*
|
||||
* A request can override this method and return null to avoid being tracked by the task
|
||||
* manager.
|
||||
*/
|
||||
default Task createTask(long id, String type, String action, TaskId parentTaskId) {
|
||||
return new Task(id, type, action, getDescription(), parentTaskId);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns optional description of the request to be displayed by the task manager
|
||||
*/
|
||||
default String getDescription() {
|
||||
return "";
|
||||
}
|
||||
}
|
@ -35,18 +35,14 @@ import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
|
||||
import org.elasticsearch.common.util.concurrent.ConcurrentMapLong;
|
||||
import org.elasticsearch.transport.TransportRequest;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.Iterator;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
import java.util.concurrent.atomic.AtomicLong;
|
||||
import java.util.function.Consumer;
|
||||
|
||||
import static org.elasticsearch.common.unit.TimeValue.timeValueMillis;
|
||||
|
||||
@ -83,7 +79,7 @@ public class TaskManager extends AbstractComponent implements ClusterStateApplie
|
||||
* <p>
|
||||
* Returns the task manager tracked task or null if the task doesn't support the task manager
|
||||
*/
|
||||
public Task register(String type, String action, TransportRequest request) {
|
||||
public Task register(String type, String action, TaskAwareRequest request) {
|
||||
Task task = request.createTask(taskIdGenerator.incrementAndGet(), type, action, request.getParentTask());
|
||||
if (task == null) {
|
||||
return null;
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user