mirror of
https://github.com/honeymoose/OpenSearch.git
synced 2025-03-28 10:58:30 +00:00
Merge branch 'master' into ccr
* master: Reduce CLI scripts to one-liners (#30759) SQL: Preserve scoring in bool queries (#30730) QA: Switch rolling upgrade to 3 nodes (#30728) [TEST] Enable DEBUG logging on testAutoQueueSizingWithMax [ML] Don't install empty ML metadata on startup (#30751) Add assertion on removing copy_settings (#30748) bump lucene version for 6_3_0 [DOCS] Mark painless execute api as experimental (#30710) disable annotation processor for docs (#30610) Add more script contexts (#30721) Fix default shards count in create index docs (#30747) Mute testCorruptFileThenSnapshotAndRestore
This commit is contained in:
commit
3f6434cd6d
buildSrc/src/main/groovy/org/elasticsearch/gradle/test
distribution/src/bin
docs
plugins/examples/script-expert-scoring/src/main/java/org/elasticsearch/example/expertscript
qa/rolling-upgrade
server/src
main/java/org/elasticsearch
test/java/org/elasticsearch
common/util/concurrent
index/store
search/functionscore
x-pack
plugin
core/src/main/java/org/elasticsearch/xpack/core/ml
ml/src
main/java/org/elasticsearch/xpack/ml
MachineLearningFeatureSet.javaMlAssignmentNotifier.javaMlInitializationService.java
action
TransportCloseJobAction.javaTransportDeleteDatafeedAction.javaTransportDeleteFilterAction.javaTransportDeleteJobAction.javaTransportFinalizeJobExecutionAction.javaTransportGetCalendarEventsAction.javaTransportGetDatafeedsAction.javaTransportGetDatafeedsStatsAction.javaTransportGetJobsStatsAction.javaTransportOpenJobAction.javaTransportPreviewDatafeedAction.javaTransportPutCalendarAction.javaTransportPutDatafeedAction.javaTransportStartDatafeedAction.javaTransportStopDatafeedAction.javaTransportUpdateCalendarJobAction.javaTransportUpdateDatafeedAction.java
datafeed
job
test/java/org/elasticsearch/xpack/ml
security/src/main/bin
elasticsearch-certgenelasticsearch-certutilelasticsearch-migrateelasticsearch-saml-metadataelasticsearch-setup-passwordselasticsearch-syskeygenelasticsearch-usersx-pack-security-env
sql/src
main/java/org/elasticsearch/xpack/sql
test/java/org/elasticsearch/xpack/sql/execution/search
watcher/src/main/bin
qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/rest
@ -24,6 +24,7 @@ import org.elasticsearch.gradle.BuildPlugin
|
||||
import org.gradle.api.Plugin
|
||||
import org.gradle.api.Project
|
||||
import org.gradle.api.plugins.JavaBasePlugin
|
||||
import org.gradle.api.tasks.compile.JavaCompile
|
||||
|
||||
/**
|
||||
* Configures the build to compile against Elasticsearch's test framework and
|
||||
@ -49,5 +50,12 @@ public class StandaloneTestPlugin implements Plugin<Project> {
|
||||
test.testClassesDir project.sourceSets.test.output.classesDir
|
||||
test.mustRunAfter(project.precommit)
|
||||
project.check.dependsOn(test)
|
||||
|
||||
project.tasks.withType(JavaCompile) {
|
||||
// This will be the default in Gradle 5.0
|
||||
if (options.compilerArgs.contains("-processor") == false) {
|
||||
options.compilerArgs << '-proc:none'
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
22
distribution/src/bin/elasticsearch-cli
Normal file
22
distribution/src/bin/elasticsearch-cli
Normal file
@ -0,0 +1,22 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -e -o pipefail
|
||||
|
||||
source "`dirname "$0"`"/elasticsearch-env
|
||||
|
||||
IFS=';' read -r -a additional_sources <<< "$ES_ADDITIONAL_SOURCES"
|
||||
for additional_source in "${additional_sources[@]}"
|
||||
do
|
||||
source "`dirname "$0"`"/$additional_source
|
||||
done
|
||||
|
||||
exec \
|
||||
"$JAVA" \
|
||||
$ES_JAVA_OPTS \
|
||||
-Des.path.home="$ES_HOME" \
|
||||
-Des.path.conf="$ES_PATH_CONF" \
|
||||
-Des.distribution.flavor="$ES_DISTRIBUTION_FLAVOR" \
|
||||
-Des.distribution.type="$ES_DISTRIBUTION_TYPE" \
|
||||
-cp "$ES_CLASSPATH" \
|
||||
$1 \
|
||||
"${@:2}"
|
@ -1,14 +1,5 @@
|
||||
#!/bin/bash
|
||||
|
||||
source "`dirname "$0"`"/elasticsearch-env
|
||||
|
||||
exec \
|
||||
"$JAVA" \
|
||||
$ES_JAVA_OPTS \
|
||||
-Des.path.home="$ES_HOME" \
|
||||
-Des.path.conf="$ES_PATH_CONF" \
|
||||
-Des.distribution.flavor="$ES_DISTRIBUTION_FLAVOR" \
|
||||
-Des.distribution.type="$ES_DISTRIBUTION_TYPE" \
|
||||
-cp "$ES_CLASSPATH" \
|
||||
"`dirname "$0"`"/elasticsearch-cli \
|
||||
org.elasticsearch.common.settings.KeyStoreCli \
|
||||
"$@"
|
||||
|
@ -1,14 +1,5 @@
|
||||
#!/bin/bash
|
||||
|
||||
source "`dirname "$0"`"/elasticsearch-env
|
||||
|
||||
exec \
|
||||
"$JAVA" \
|
||||
$ES_JAVA_OPTS \
|
||||
-Des.path.home="$ES_HOME" \
|
||||
-Des.path.conf="$ES_PATH_CONF" \
|
||||
-Des.distribution.flavor="$ES_DISTRIBUTION_FLAVOR" \
|
||||
-Des.distribution.type="$ES_DISTRIBUTION_TYPE" \
|
||||
-cp "$ES_CLASSPATH" \
|
||||
"`dirname "$0"`"/elasticsearch-cli \
|
||||
org.elasticsearch.plugins.PluginCli \
|
||||
"$@"
|
||||
|
@ -1,14 +1,5 @@
|
||||
#!/bin/bash
|
||||
|
||||
source "`dirname "$0"`"/elasticsearch-env
|
||||
|
||||
exec \
|
||||
"$JAVA" \
|
||||
$ES_JAVA_OPTS \
|
||||
-Des.path.home="$ES_HOME" \
|
||||
-Des.path.conf="$ES_PATH_CONF" \
|
||||
-Des.distribution.flavor="$ES_DISTRIBUTION_FLAVOR" \
|
||||
-Des.distribution.type="$ES_DISTRIBUTION_TYPE" \
|
||||
-cp "$ES_CLASSPATH" \
|
||||
"`dirname "$0"`"/elasticsearch-cli \
|
||||
org.elasticsearch.index.translog.TranslogToolCli \
|
||||
"$@"
|
||||
|
@ -1,6 +1,8 @@
|
||||
[[painless-execute-api]]
|
||||
=== Painless execute API
|
||||
|
||||
experimental[The painless execute api is new and the request / response format may change in a breaking way in the future]
|
||||
|
||||
The Painless execute API allows an arbitrary script to be executed and a result to be returned.
|
||||
|
||||
[[painless-execute-api-parameters]]
|
||||
|
@ -25,7 +25,7 @@ PUT twitter
|
||||
}
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
<1> Default for `number_of_shards` is 5
|
||||
<1> Default for `number_of_shards` is 1
|
||||
<2> Default for `number_of_replicas` is 1 (ie one replica for each primary shard)
|
||||
|
||||
The above second curl example shows how an index called `twitter` can be
|
||||
|
@ -54,7 +54,7 @@ public class ExpertScriptPlugin extends Plugin implements ScriptPlugin {
|
||||
|
||||
@Override
|
||||
public <T> T compile(String scriptName, String scriptSource, ScriptContext<T> context, Map<String, String> params) {
|
||||
if (context.equals(SearchScript.CONTEXT) == false) {
|
||||
if (context.equals(SearchScript.SCRIPT_SCORE_CONTEXT) == false) {
|
||||
throw new IllegalArgumentException(getType() + " scripts cannot be used for context [" + context.name + "]");
|
||||
}
|
||||
// we use the script "source" as the script identifier
|
||||
|
@ -30,6 +30,26 @@ task bwcTest {
|
||||
}
|
||||
|
||||
for (Version version : bwcVersions.wireCompatible) {
|
||||
/*
|
||||
* The goal here is to:
|
||||
* <ul>
|
||||
* <li>start three nodes on the old version
|
||||
* <li>run tests with systemProperty 'tests.rest.suite', 'old_cluster'
|
||||
* <li>shut down one node
|
||||
* <li>start a node with the new version
|
||||
* <li>run tests with systemProperty 'tests.rest.suite', 'mixed_cluster'
|
||||
* <li>shut down one node on the old version
|
||||
* <li>start a node with the new version
|
||||
* <li>run tests with systemProperty 'tests.rest.suite', 'mixed_cluster' again
|
||||
* <li>shut down the last node with the old version
|
||||
* <li>start a node with the new version
|
||||
* <li>run tests with systemProperty 'tests.rest.suite', 'upgraded_cluster'
|
||||
* <li>shut down the entire cluster
|
||||
* </ul>
|
||||
*
|
||||
* Be careful: gradle dry run spits out tasks in the wrong order but,
|
||||
* strangely, running the tasks works properly.
|
||||
*/
|
||||
String baseName = "v${version}"
|
||||
|
||||
Task oldClusterTest = tasks.create(name: "${baseName}#oldClusterTest", type: RestIntegTestTask) {
|
||||
@ -39,8 +59,8 @@ for (Version version : bwcVersions.wireCompatible) {
|
||||
Object extension = extensions.findByName("${baseName}#oldClusterTestCluster")
|
||||
configure(extensions.findByName("${baseName}#oldClusterTestCluster")) {
|
||||
bwcVersion = version
|
||||
numBwcNodes = 2
|
||||
numNodes = 2
|
||||
numBwcNodes = 3
|
||||
numNodes = 3
|
||||
clusterName = 'rolling-upgrade'
|
||||
setting 'repositories.url.allowed_urls', 'http://snapshot.test*'
|
||||
if (version.onOrAfter('5.3.0')) {
|
||||
@ -53,43 +73,57 @@ for (Version version : bwcVersions.wireCompatible) {
|
||||
systemProperty 'tests.rest.suite', 'old_cluster'
|
||||
}
|
||||
|
||||
Task mixedClusterTest = tasks.create(name: "${baseName}#mixedClusterTest", type: RestIntegTestTask)
|
||||
|
||||
configure(extensions.findByName("${baseName}#mixedClusterTestCluster")) {
|
||||
dependsOn oldClusterTestRunner, "${baseName}#oldClusterTestCluster#node1.stop"
|
||||
clusterName = 'rolling-upgrade'
|
||||
unicastTransportUri = { seedNode, node, ant -> oldClusterTest.nodes.get(0).transportUri() }
|
||||
minimumMasterNodes = { 2 }
|
||||
/* Override the data directory so the new node always gets the node we
|
||||
* just stopped's data directory. */
|
||||
dataDir = { nodeNumber -> oldClusterTest.nodes[1].dataDir }
|
||||
setting 'repositories.url.allowed_urls', 'http://snapshot.test*'
|
||||
Closure configureUpgradeCluster = {String name, Task lastRunner, int stopNode, Closure unicastSeed ->
|
||||
configure(extensions.findByName("${baseName}#${name}")) {
|
||||
dependsOn lastRunner, "${baseName}#oldClusterTestCluster#node${stopNode}.stop"
|
||||
clusterName = 'rolling-upgrade'
|
||||
unicastTransportUri = { seedNode, node, ant -> unicastSeed() }
|
||||
minimumMasterNodes = { 3 }
|
||||
/* Override the data directory so the new node always gets the node we
|
||||
* just stopped's data directory. */
|
||||
dataDir = { nodeNumber -> oldClusterTest.nodes[stopNode].dataDir }
|
||||
setting 'repositories.url.allowed_urls', 'http://snapshot.test*'
|
||||
}
|
||||
}
|
||||
|
||||
Task mixedClusterTestRunner = tasks.getByName("${baseName}#mixedClusterTestRunner")
|
||||
mixedClusterTestRunner.configure {
|
||||
Task oneThirdUpgradedTest = tasks.create(name: "${baseName}#oneThirdUpgradedTest", type: RestIntegTestTask)
|
||||
|
||||
configureUpgradeCluster("oneThirdUpgradedTestCluster", oldClusterTestRunner,
|
||||
0, { oldClusterTest.nodes.get(1).transportUri() })
|
||||
|
||||
Task oneThirdUpgradedTestRunner = tasks.getByName("${baseName}#oneThirdUpgradedTestRunner")
|
||||
oneThirdUpgradedTestRunner.configure {
|
||||
systemProperty 'tests.rest.suite', 'mixed_cluster'
|
||||
finalizedBy "${baseName}#oldClusterTestCluster#node0.stop"
|
||||
systemProperty 'tests.first_round', 'true'
|
||||
finalizedBy "${baseName}#oldClusterTestCluster#node1.stop"
|
||||
}
|
||||
|
||||
Task twoThirdsUpgradedTest = tasks.create(name: "${baseName}#twoThirdsUpgradedTest", type: RestIntegTestTask)
|
||||
|
||||
configureUpgradeCluster("twoThirdsUpgradedTestCluster", oneThirdUpgradedTestRunner,
|
||||
1, { oneThirdUpgradedTest.nodes.get(0).transportUri() })
|
||||
|
||||
Task twoThirdsUpgradedTestRunner = tasks.getByName("${baseName}#twoThirdsUpgradedTestRunner")
|
||||
twoThirdsUpgradedTestRunner.configure {
|
||||
systemProperty 'tests.rest.suite', 'mixed_cluster'
|
||||
systemProperty 'tests.first_round', 'false'
|
||||
finalizedBy "${baseName}#oldClusterTestCluster#node2.stop"
|
||||
}
|
||||
|
||||
Task upgradedClusterTest = tasks.create(name: "${baseName}#upgradedClusterTest", type: RestIntegTestTask)
|
||||
|
||||
configure(extensions.findByName("${baseName}#upgradedClusterTestCluster")) {
|
||||
dependsOn mixedClusterTestRunner, "${baseName}#oldClusterTestCluster#node0.stop"
|
||||
clusterName = 'rolling-upgrade'
|
||||
unicastTransportUri = { seedNode, node, ant -> mixedClusterTest.nodes.get(0).transportUri() }
|
||||
minimumMasterNodes = { 2 }
|
||||
/* Override the data directory so the new node always gets the node we
|
||||
* just stopped's data directory. */
|
||||
dataDir = { nodeNumber -> oldClusterTest.nodes[0].dataDir}
|
||||
setting 'repositories.url.allowed_urls', 'http://snapshot.test*'
|
||||
}
|
||||
configureUpgradeCluster("upgradedClusterTestCluster", twoThirdsUpgradedTestRunner,
|
||||
2, { twoThirdsUpgradedTest.nodes.get(0).transportUri() })
|
||||
|
||||
Task upgradedClusterTestRunner = tasks.getByName("${baseName}#upgradedClusterTestRunner")
|
||||
upgradedClusterTestRunner.configure {
|
||||
systemProperty 'tests.rest.suite', 'upgraded_cluster'
|
||||
// only need to kill the mixed cluster tests node here because we explicitly told it to not stop nodes upon completion
|
||||
finalizedBy "${baseName}#mixedClusterTestCluster#stop"
|
||||
/*
|
||||
* Force stopping all the upgraded nodes after the test runner
|
||||
* so they are alive during the test.
|
||||
*/
|
||||
finalizedBy "${baseName}#oneThirdUpgradedTestCluster#stop"
|
||||
finalizedBy "${baseName}#twoThirdsUpgradedTestCluster#stop"
|
||||
}
|
||||
|
||||
Task versionBwcTest = tasks.create(name: "${baseName}#bwcTest") {
|
||||
|
91
qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/AbstractRollingTestCase.java
Normal file
91
qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/AbstractRollingTestCase.java
Normal file
@ -0,0 +1,91 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.upgrades;
|
||||
|
||||
import org.apache.http.entity.ContentType;
|
||||
import org.apache.http.entity.StringEntity;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.support.PlainActionFuture;
|
||||
import org.elasticsearch.client.Response;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.util.concurrent.AbstractRunnable;
|
||||
import org.elasticsearch.test.rest.ESRestTestCase;
|
||||
import org.elasticsearch.test.rest.yaml.ObjectPath;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.Future;
|
||||
import java.util.function.Predicate;
|
||||
|
||||
import static com.carrotsearch.randomizedtesting.RandomizedTest.randomAsciiOfLength;
|
||||
import static java.util.Collections.emptyMap;
|
||||
import static org.elasticsearch.cluster.routing.UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING;
|
||||
import static org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider.INDEX_ROUTING_ALLOCATION_ENABLE_SETTING;
|
||||
import static org.elasticsearch.cluster.routing.allocation.decider.MaxRetryAllocationDecider.SETTING_ALLOCATION_MAX_RETRY;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.hamcrest.Matchers.hasSize;
|
||||
import static org.hamcrest.Matchers.notNullValue;
|
||||
|
||||
public abstract class AbstractRollingTestCase extends ESRestTestCase {
|
||||
protected enum ClusterType {
|
||||
OLD,
|
||||
MIXED,
|
||||
UPGRADED;
|
||||
|
||||
public static ClusterType parse(String value) {
|
||||
switch (value) {
|
||||
case "old_cluster":
|
||||
return OLD;
|
||||
case "mixed_cluster":
|
||||
return MIXED;
|
||||
case "upgraded_cluster":
|
||||
return UPGRADED;
|
||||
default:
|
||||
throw new AssertionError("unknown cluster type: " + value);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
protected static final ClusterType CLUSTER_TYPE = ClusterType.parse(System.getProperty("tests.rest.suite"));
|
||||
|
||||
@Override
|
||||
protected final boolean preserveIndicesUponCompletion() {
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected final boolean preserveReposUponCompletion() {
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected final Settings restClientSettings() {
|
||||
return Settings.builder().put(super.restClientSettings())
|
||||
// increase the timeout here to 90 seconds to handle long waits for a green
|
||||
// cluster health. the waits for green need to be longer than a minute to
|
||||
// account for delayed shards
|
||||
.put(ESRestTestCase.CLIENT_RETRY_TIMEOUT, "90s")
|
||||
.put(ESRestTestCase.CLIENT_SOCKET_TIMEOUT, "90s")
|
||||
.build();
|
||||
}
|
||||
}
|
@ -0,0 +1,135 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.upgrades;
|
||||
|
||||
import org.apache.http.util.EntityUtils;
|
||||
import org.elasticsearch.common.Booleans;
|
||||
import org.elasticsearch.client.Request;
|
||||
import org.elasticsearch.client.Response;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
|
||||
/**
|
||||
* Basic test that indexed documents survive the rolling restart. See
|
||||
* {@link RecoveryIT} for much more in depth testing of the mechanism
|
||||
* by which they survive.
|
||||
*/
|
||||
public class IndexingIT extends AbstractRollingTestCase {
|
||||
public void testIndexing() throws IOException {
|
||||
switch (CLUSTER_TYPE) {
|
||||
case OLD:
|
||||
break;
|
||||
case MIXED:
|
||||
Request waitForYellow = new Request("GET", "/_cluster/health");
|
||||
waitForYellow.addParameter("wait_for_nodes", "3");
|
||||
waitForYellow.addParameter("wait_for_status", "yellow");
|
||||
client().performRequest(waitForYellow);
|
||||
break;
|
||||
case UPGRADED:
|
||||
Request waitForGreen = new Request("GET", "/_cluster/health/test_index,index_with_replicas,empty_index");
|
||||
waitForGreen.addParameter("wait_for_nodes", "3");
|
||||
waitForGreen.addParameter("wait_for_status", "green");
|
||||
// wait for long enough that we give delayed unassigned shards to stop being delayed
|
||||
waitForGreen.addParameter("timeout", "70s");
|
||||
waitForGreen.addParameter("level", "shards");
|
||||
client().performRequest(waitForGreen);
|
||||
break;
|
||||
default:
|
||||
throw new UnsupportedOperationException("Unknown cluster type [" + CLUSTER_TYPE + "]");
|
||||
}
|
||||
|
||||
if (CLUSTER_TYPE == ClusterType.OLD) {
|
||||
Request createTestIndex = new Request("PUT", "/test_index");
|
||||
createTestIndex.setJsonEntity("{\"settings\": {\"index.number_of_replicas\": 0}}");
|
||||
client().performRequest(createTestIndex);
|
||||
|
||||
String recoverQuickly = "{\"settings\": {\"index.unassigned.node_left.delayed_timeout\": \"100ms\"}}";
|
||||
Request createIndexWithReplicas = new Request("PUT", "/index_with_replicas");
|
||||
createIndexWithReplicas.setJsonEntity(recoverQuickly);
|
||||
client().performRequest(createIndexWithReplicas);
|
||||
|
||||
Request createEmptyIndex = new Request("PUT", "/empty_index");
|
||||
// Ask for recovery to be quick
|
||||
createEmptyIndex.setJsonEntity(recoverQuickly);
|
||||
client().performRequest(createEmptyIndex);
|
||||
|
||||
bulk("test_index", "_OLD", 5);
|
||||
bulk("index_with_replicas", "_OLD", 5);
|
||||
}
|
||||
|
||||
int expectedCount;
|
||||
switch (CLUSTER_TYPE) {
|
||||
case OLD:
|
||||
expectedCount = 5;
|
||||
break;
|
||||
case MIXED:
|
||||
if (Booleans.parseBoolean(System.getProperty("tests.first_round"))) {
|
||||
expectedCount = 5;
|
||||
} else {
|
||||
expectedCount = 10;
|
||||
}
|
||||
break;
|
||||
case UPGRADED:
|
||||
expectedCount = 15;
|
||||
break;
|
||||
default:
|
||||
throw new UnsupportedOperationException("Unknown cluster type [" + CLUSTER_TYPE + "]");
|
||||
}
|
||||
|
||||
assertCount("test_index", expectedCount);
|
||||
assertCount("index_with_replicas", 5);
|
||||
assertCount("empty_index", 0);
|
||||
|
||||
if (CLUSTER_TYPE != ClusterType.OLD) {
|
||||
bulk("test_index", "_" + CLUSTER_TYPE, 5);
|
||||
Request toBeDeleted = new Request("PUT", "/test_index/doc/to_be_deleted");
|
||||
toBeDeleted.addParameter("refresh", "true");
|
||||
toBeDeleted.setJsonEntity("{\"f1\": \"delete-me\"}");
|
||||
client().performRequest(toBeDeleted);
|
||||
assertCount("test_index", expectedCount + 6);
|
||||
|
||||
Request delete = new Request("DELETE", "/test_index/doc/to_be_deleted");
|
||||
delete.addParameter("refresh", "true");
|
||||
client().performRequest(delete);
|
||||
|
||||
assertCount("test_index", expectedCount + 5);
|
||||
}
|
||||
}
|
||||
|
||||
private void bulk(String index, String valueSuffix, int count) throws IOException {
|
||||
StringBuilder b = new StringBuilder();
|
||||
for (int i = 0; i < count; i++) {
|
||||
b.append("{\"index\": {\"_index\": \"").append(index).append("\", \"_type\": \"doc\"}}\n");
|
||||
b.append("{\"f1\": \"v").append(i).append(valueSuffix).append("\", \"f2\": ").append(i).append("}\n");
|
||||
}
|
||||
Request bulk = new Request("POST", "/_bulk");
|
||||
bulk.addParameter("refresh", "true");
|
||||
bulk.setJsonEntity(b.toString());
|
||||
client().performRequest(bulk);
|
||||
}
|
||||
|
||||
private void assertCount(String index, int count) throws IOException {
|
||||
Request searchTestIndexRequest = new Request("POST", "/" + index + "/_search");
|
||||
searchTestIndexRequest.addParameter("filter_path", "hits.total");
|
||||
Response searchTestIndexResponse = client().performRequest(searchTestIndexRequest);
|
||||
assertEquals("{\"hits\":{\"total\":" + count + "}}",
|
||||
EntityUtils.toString(searchTestIndexResponse.getEntity(), StandardCharsets.UTF_8));
|
||||
}
|
||||
}
|
@ -46,53 +46,13 @@ import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.hamcrest.Matchers.hasSize;
|
||||
import static org.hamcrest.Matchers.notNullValue;
|
||||
|
||||
public class RecoveryIT extends ESRestTestCase {
|
||||
|
||||
@Override
|
||||
protected boolean preserveIndicesUponCompletion() {
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected boolean preserveReposUponCompletion() {
|
||||
return true;
|
||||
}
|
||||
|
||||
private enum CLUSTER_TYPE {
|
||||
OLD,
|
||||
MIXED,
|
||||
UPGRADED;
|
||||
|
||||
public static CLUSTER_TYPE parse(String value) {
|
||||
switch (value) {
|
||||
case "old_cluster":
|
||||
return OLD;
|
||||
case "mixed_cluster":
|
||||
return MIXED;
|
||||
case "upgraded_cluster":
|
||||
return UPGRADED;
|
||||
default:
|
||||
throw new AssertionError("unknown cluster type: " + value);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private final CLUSTER_TYPE clusterType = CLUSTER_TYPE.parse(System.getProperty("tests.rest.suite"));
|
||||
|
||||
@Override
|
||||
protected Settings restClientSettings() {
|
||||
return Settings.builder().put(super.restClientSettings())
|
||||
// increase the timeout here to 90 seconds to handle long waits for a green
|
||||
// cluster health. the waits for green need to be longer than a minute to
|
||||
// account for delayed shards
|
||||
.put(ESRestTestCase.CLIENT_RETRY_TIMEOUT, "90s")
|
||||
.put(ESRestTestCase.CLIENT_SOCKET_TIMEOUT, "90s")
|
||||
.build();
|
||||
}
|
||||
|
||||
/**
|
||||
* In depth testing of the recovery mechanism during a rolling restart.
|
||||
*/
|
||||
public class RecoveryIT extends AbstractRollingTestCase {
|
||||
public void testHistoryUUIDIsGenerated() throws Exception {
|
||||
final String index = "index_history_uuid";
|
||||
if (clusterType == CLUSTER_TYPE.OLD) {
|
||||
if (CLUSTER_TYPE == ClusterType.OLD) {
|
||||
Settings.Builder settings = Settings.builder()
|
||||
.put(IndexMetaData.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1)
|
||||
.put(IndexMetaData.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 1)
|
||||
@ -102,7 +62,7 @@ public class RecoveryIT extends ESRestTestCase {
|
||||
// before timing out
|
||||
.put(INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), "100ms");
|
||||
createIndex(index, settings.build());
|
||||
} else if (clusterType == CLUSTER_TYPE.UPGRADED) {
|
||||
} else if (CLUSTER_TYPE == ClusterType.UPGRADED) {
|
||||
ensureGreen(index);
|
||||
Response response = client().performRequest("GET", index + "/_stats", Collections.singletonMap("level", "shards"));
|
||||
assertOK(response);
|
||||
@ -157,11 +117,11 @@ public class RecoveryIT extends ESRestTestCase {
|
||||
final Map<String, Object> nodeMap = objectPath.evaluate("nodes");
|
||||
List<String> nodes = new ArrayList<>(nodeMap.keySet());
|
||||
|
||||
switch (clusterType) {
|
||||
switch (CLUSTER_TYPE) {
|
||||
case OLD:
|
||||
Settings.Builder settings = Settings.builder()
|
||||
.put(IndexMetaData.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1)
|
||||
.put(IndexMetaData.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 1)
|
||||
.put(IndexMetaData.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 2)
|
||||
// if the node with the replica is the first to be restarted, while a replica is still recovering
|
||||
// then delayed allocation will kick in. When the node comes back, the master will search for a copy
|
||||
// but the recovering copy will be seen as invalid and the cluster health won't return to GREEN
|
||||
@ -181,6 +141,7 @@ public class RecoveryIT extends ESRestTestCase {
|
||||
assertOK(client().performRequest("POST", index + "/_refresh"));
|
||||
assertCount(index, "_only_nodes:" + nodes.get(0), 60);
|
||||
assertCount(index, "_only_nodes:" + nodes.get(1), 60);
|
||||
assertCount(index, "_only_nodes:" + nodes.get(2), 60);
|
||||
// make sure that we can index while the replicas are recovering
|
||||
updateIndexSettings(index, Settings.builder().put(INDEX_ROUTING_ALLOCATION_ENABLE_SETTING.getKey(), "primaries"));
|
||||
break;
|
||||
@ -191,9 +152,10 @@ public class RecoveryIT extends ESRestTestCase {
|
||||
assertOK(client().performRequest("POST", index + "/_refresh"));
|
||||
assertCount(index, "_only_nodes:" + nodes.get(0), 110);
|
||||
assertCount(index, "_only_nodes:" + nodes.get(1), 110);
|
||||
assertCount(index, "_only_nodes:" + nodes.get(2), 110);
|
||||
break;
|
||||
default:
|
||||
throw new IllegalStateException("unknown type " + clusterType);
|
||||
throw new IllegalStateException("unknown type " + CLUSTER_TYPE);
|
||||
}
|
||||
}
|
||||
|
||||
@ -221,11 +183,11 @@ public class RecoveryIT extends ESRestTestCase {
|
||||
|
||||
public void testRelocationWithConcurrentIndexing() throws Exception {
|
||||
final String index = "relocation_with_concurrent_indexing";
|
||||
switch (clusterType) {
|
||||
switch (CLUSTER_TYPE) {
|
||||
case OLD:
|
||||
Settings.Builder settings = Settings.builder()
|
||||
.put(IndexMetaData.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1)
|
||||
.put(IndexMetaData.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 1)
|
||||
.put(IndexMetaData.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 2)
|
||||
// if the node with the replica is the first to be restarted, while a replica is still recovering
|
||||
// then delayed allocation will kick in. When the node comes back, the master will search for a copy
|
||||
// but the recovering copy will be seen as invalid and the cluster health won't return to GREEN
|
||||
@ -258,7 +220,7 @@ public class RecoveryIT extends ESRestTestCase {
|
||||
break;
|
||||
case UPGRADED:
|
||||
updateIndexSettings(index, Settings.builder()
|
||||
.put(IndexMetaData.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 1)
|
||||
.put(IndexMetaData.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 2)
|
||||
.put("index.routing.allocation.include._id", (String)null)
|
||||
);
|
||||
asyncIndexDocs(index, 60, 50).get();
|
||||
@ -271,9 +233,10 @@ public class RecoveryIT extends ESRestTestCase {
|
||||
|
||||
assertCount(index, "_only_nodes:" + nodes.get(0), 110);
|
||||
assertCount(index, "_only_nodes:" + nodes.get(1), 110);
|
||||
assertCount(index, "_only_nodes:" + nodes.get(2), 110);
|
||||
break;
|
||||
default:
|
||||
throw new IllegalStateException("unknown type " + clusterType);
|
||||
throw new IllegalStateException("unknown type " + CLUSTER_TYPE);
|
||||
}
|
||||
}
|
||||
|
||||
|
1
qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/UpgradeClusterClientYamlTestSuiteIT.java
1
qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/UpgradeClusterClientYamlTestSuiteIT.java
@ -60,4 +60,3 @@ public class UpgradeClusterClientYamlTestSuiteIT extends ESClientYamlSuiteTestCa
|
||||
.build();
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1,74 +1,8 @@
|
||||
---
|
||||
"Index data and search on the mixed cluster":
|
||||
- do:
|
||||
cluster.health:
|
||||
wait_for_status: yellow
|
||||
wait_for_nodes: 2
|
||||
|
||||
- do:
|
||||
search:
|
||||
index: test_index
|
||||
|
||||
- match: { hits.total: 5 } # no new indexed data, so expect the original 5 documents from the old cluster
|
||||
|
||||
- do:
|
||||
search:
|
||||
index: index_with_replicas
|
||||
|
||||
- match: { hits.total: 5 } # just check we recovered fine
|
||||
|
||||
- do:
|
||||
bulk:
|
||||
refresh: true
|
||||
body:
|
||||
- '{"index": {"_index": "test_index", "_type": "doc"}}'
|
||||
- '{"f1": "v1_mixed", "f2": 5}'
|
||||
- '{"index": {"_index": "test_index", "_type": "doc"}}'
|
||||
- '{"f1": "v2_mixed", "f2": 6}'
|
||||
- '{"index": {"_index": "test_index", "_type": "doc"}}'
|
||||
- '{"f1": "v3_mixed", "f2": 7}'
|
||||
- '{"index": {"_index": "test_index", "_type": "doc"}}'
|
||||
- '{"f1": "v4_mixed", "f2": 8}'
|
||||
- '{"index": {"_index": "test_index", "_type": "doc"}}'
|
||||
- '{"f1": "v5_mixed", "f2": 9}'
|
||||
|
||||
- do:
|
||||
index:
|
||||
index: test_index
|
||||
type: doc
|
||||
id: d10
|
||||
body: {"f1": "v6_mixed", "f2": 10}
|
||||
|
||||
- do:
|
||||
indices.refresh:
|
||||
index: test_index
|
||||
|
||||
- do:
|
||||
search:
|
||||
index: test_index
|
||||
|
||||
- match: { hits.total: 11 } # 5 docs from old cluster, 6 docs from mixed cluster
|
||||
|
||||
- do:
|
||||
delete:
|
||||
index: test_index
|
||||
type: doc
|
||||
id: d10
|
||||
|
||||
- do:
|
||||
indices.refresh:
|
||||
index: test_index
|
||||
|
||||
- do:
|
||||
search:
|
||||
index: test_index
|
||||
|
||||
- match: { hits.total: 10 }
|
||||
|
||||
---
|
||||
"Verify that we can still find things with the template":
|
||||
- do:
|
||||
search_template:
|
||||
index: test_search_template
|
||||
body:
|
||||
id: test_search_template
|
||||
params:
|
||||
|
@ -1,76 +1,5 @@
|
||||
---
|
||||
"Index data, search, and create things in the cluster state that we'll validate are there after the ugprade":
|
||||
- do:
|
||||
indices.create:
|
||||
index: test_index
|
||||
body:
|
||||
settings:
|
||||
index:
|
||||
number_of_replicas: 0
|
||||
- do:
|
||||
indices.create:
|
||||
index: index_with_replicas # dummy index to ensure we can recover indices with replicas just fine
|
||||
body:
|
||||
# if the node with the replica is the first to be restarted, then delayed
|
||||
# allocation will kick in, and the cluster health won't return to GREEN
|
||||
# before timing out
|
||||
index.unassigned.node_left.delayed_timeout: "100ms"
|
||||
|
||||
- do:
|
||||
indices.create:
|
||||
index: empty_index # index to ensure we can recover empty indices
|
||||
body:
|
||||
# if the node with the replica is the first to be restarted, then delayed
|
||||
# allocation will kick in, and the cluster health won't return to GREEN
|
||||
# before timing out
|
||||
index.unassigned.node_left.delayed_timeout: "100ms"
|
||||
|
||||
- do:
|
||||
bulk:
|
||||
refresh: true
|
||||
body:
|
||||
- '{"index": {"_index": "test_index", "_type": "doc"}}'
|
||||
- '{"f1": "v1_old", "f2": 0}'
|
||||
- '{"index": {"_index": "test_index", "_type": "doc"}}'
|
||||
- '{"f1": "v2_old", "f2": 1}'
|
||||
- '{"index": {"_index": "test_index", "_type": "doc"}}'
|
||||
- '{"f1": "v3_old", "f2": 2}'
|
||||
- '{"index": {"_index": "test_index", "_type": "doc"}}'
|
||||
- '{"f1": "v4_old", "f2": 3}'
|
||||
- '{"index": {"_index": "test_index", "_type": "doc"}}'
|
||||
- '{"f1": "v5_old", "f2": 4}'
|
||||
|
||||
- do:
|
||||
bulk:
|
||||
refresh: true
|
||||
body:
|
||||
- '{"index": {"_index": "index_with_replicas", "_type": "doc"}}'
|
||||
- '{"f1": "d_old"}'
|
||||
- '{"index": {"_index": "index_with_replicas", "_type": "doc"}}'
|
||||
- '{"f1": "d_old"}'
|
||||
- '{"index": {"_index": "index_with_replicas", "_type": "doc"}}'
|
||||
- '{"f1": "d_old"}'
|
||||
- '{"index": {"_index": "index_with_replicas", "_type": "doc"}}'
|
||||
- '{"f1": "d_old"}'
|
||||
- '{"index": {"_index": "index_with_replicas", "_type": "doc"}}'
|
||||
- '{"f1": "d_old"}'
|
||||
|
||||
- do:
|
||||
indices.refresh:
|
||||
index: test_index,index_with_replicas
|
||||
|
||||
- do:
|
||||
search:
|
||||
index: test_index
|
||||
|
||||
- match: { hits.total: 5 }
|
||||
|
||||
- do:
|
||||
search:
|
||||
index: index_with_replicas
|
||||
|
||||
- match: { hits.total: 5 }
|
||||
|
||||
"Create things in the cluster state that we'll validate are there after the ugprade":
|
||||
- do:
|
||||
snapshot.create_repository:
|
||||
repository: my_repo
|
||||
@ -91,6 +20,21 @@
|
||||
}
|
||||
- match: { "acknowledged": true }
|
||||
|
||||
- do:
|
||||
bulk:
|
||||
refresh: true
|
||||
body:
|
||||
- '{"index": {"_index": "test_search_template", "_type": "doc"}}'
|
||||
- '{"f1": "v1_old"}'
|
||||
- '{"index": {"_index": "test_search_template", "_type": "doc"}}'
|
||||
- '{"f1": "v2_old"}'
|
||||
- '{"index": {"_index": "test_search_template", "_type": "doc"}}'
|
||||
- '{"f1": "v3_old"}'
|
||||
- '{"index": {"_index": "test_search_template", "_type": "doc"}}'
|
||||
- '{"f1": "v4_old"}'
|
||||
- '{"index": {"_index": "test_search_template", "_type": "doc"}}'
|
||||
- '{"f1": "v5_old"}'
|
||||
|
||||
- do:
|
||||
put_script:
|
||||
id: test_search_template
|
||||
@ -105,6 +49,7 @@
|
||||
|
||||
- do:
|
||||
search_template:
|
||||
index: test_search_template
|
||||
body:
|
||||
id: test_search_template
|
||||
params:
|
||||
|
@ -1,55 +1,8 @@
|
||||
---
|
||||
"Index data and search on the upgraded cluster":
|
||||
- do:
|
||||
cluster.health:
|
||||
wait_for_status: green
|
||||
wait_for_nodes: 2
|
||||
# wait for long enough that we give delayed unassigned shards to stop being delayed
|
||||
timeout: 70s
|
||||
level: shards
|
||||
index: test_index,index_with_replicas,empty_index
|
||||
|
||||
- do:
|
||||
search:
|
||||
index: test_index
|
||||
|
||||
- match: { hits.total: 10 } # no new indexed data, so expect the original 10 documents from the old and mixed clusters
|
||||
|
||||
- do:
|
||||
search:
|
||||
index: index_with_replicas
|
||||
|
||||
- match: { hits.total: 5 } # just check we recovered fine
|
||||
|
||||
- do:
|
||||
bulk:
|
||||
refresh: true
|
||||
body:
|
||||
- '{"index": {"_index": "test_index", "_type": "doc"}}'
|
||||
- '{"f1": "v1_upgraded", "f2": 10}'
|
||||
- '{"index": {"_index": "test_index", "_type": "doc"}}'
|
||||
- '{"f1": "v2_upgraded", "f2": 11}'
|
||||
- '{"index": {"_index": "test_index", "_type": "doc"}}'
|
||||
- '{"f1": "v3_upgraded", "f2": 12}'
|
||||
- '{"index": {"_index": "test_index", "_type": "doc"}}'
|
||||
- '{"f1": "v4_upgraded", "f2": 13}'
|
||||
- '{"index": {"_index": "test_index", "_type": "doc"}}'
|
||||
- '{"f1": "v5_upgraded", "f2": 14}'
|
||||
|
||||
- do:
|
||||
indices.refresh:
|
||||
index: test_index
|
||||
|
||||
- do:
|
||||
search:
|
||||
index: test_index
|
||||
|
||||
- match: { hits.total: 15 } # 10 docs from previous clusters plus 5 new docs
|
||||
|
||||
---
|
||||
"Verify that we can still find things with the template":
|
||||
- do:
|
||||
search_template:
|
||||
index: test_search_template
|
||||
body:
|
||||
id: test_search_template
|
||||
params:
|
||||
|
@ -167,7 +167,7 @@ public class Version implements Comparable<Version>, ToXContentFragment {
|
||||
public static final int V_6_2_5_ID = 6020599;
|
||||
public static final Version V_6_2_5 = new Version(V_6_2_5_ID, org.apache.lucene.util.Version.LUCENE_7_2_1);
|
||||
public static final int V_6_3_0_ID = 6030099;
|
||||
public static final Version V_6_3_0 = new Version(V_6_3_0_ID, org.apache.lucene.util.Version.LUCENE_7_3_0);
|
||||
public static final Version V_6_3_0 = new Version(V_6_3_0_ID, org.apache.lucene.util.Version.LUCENE_7_3_1);
|
||||
public static final int V_6_4_0_ID = 6040099;
|
||||
public static final Version V_6_4_0 = new Version(V_6_4_0_ID, org.apache.lucene.util.Version.LUCENE_7_4_0);
|
||||
public static final int V_7_0_0_alpha1_ID = 7000001;
|
||||
|
@ -249,7 +249,8 @@ public final class TermsSetQueryBuilder extends AbstractQueryBuilder<TermsSetQue
|
||||
IndexNumericFieldData fieldData = context.getForField(msmFieldType);
|
||||
longValuesSource = new FieldValuesSource(fieldData);
|
||||
} else if (minimumShouldMatchScript != null) {
|
||||
SearchScript.Factory factory = context.getScriptService().compile(minimumShouldMatchScript, SearchScript.CONTEXT);
|
||||
SearchScript.Factory factory = context.getScriptService().compile(minimumShouldMatchScript,
|
||||
SearchScript.TERMS_SET_QUERY_CONTEXT);
|
||||
Map<String, Object> params = new HashMap<>();
|
||||
params.putAll(minimumShouldMatchScript.getParams());
|
||||
params.put("num_terms", queries.size());
|
||||
|
@ -92,7 +92,7 @@ public class ScriptScoreFunctionBuilder extends ScoreFunctionBuilder<ScriptScore
|
||||
@Override
|
||||
protected ScoreFunction doToFunction(QueryShardContext context) {
|
||||
try {
|
||||
SearchScript.Factory factory = context.getScriptService().compile(script, SearchScript.CONTEXT);
|
||||
SearchScript.Factory factory = context.getScriptService().compile(script, SearchScript.SCRIPT_SCORE_CONTEXT);
|
||||
SearchScript.LeafFactory searchScript = factory.newFactory(script.getParams(), context.lookup());
|
||||
return new ScriptScoreFunction(script, searchScript);
|
||||
} catch (Exception e) {
|
||||
|
@ -19,6 +19,7 @@
|
||||
|
||||
package org.elasticsearch.rest.action.admin.indices;
|
||||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.admin.indices.shrink.ResizeRequest;
|
||||
import org.elasticsearch.action.admin.indices.shrink.ResizeType;
|
||||
import org.elasticsearch.action.support.ActiveShardCount;
|
||||
@ -47,6 +48,8 @@ public abstract class RestResizeHandler extends BaseRestHandler {
|
||||
public final RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException {
|
||||
final ResizeRequest resizeRequest = new ResizeRequest(request.param("target"), request.param("index"));
|
||||
resizeRequest.setResizeType(getResizeType());
|
||||
// copy_settings should be removed in Elasticsearch 8.0.0; cf. https://github.com/elastic/elasticsearch/issues/28347
|
||||
assert Version.CURRENT.major < 8;
|
||||
final String rawCopySettings = request.param("copy_settings");
|
||||
final Boolean copySettings;
|
||||
if (rawCopySettings == null) {
|
||||
|
@ -42,6 +42,9 @@ public class ScriptModule {
|
||||
CORE_CONTEXTS = Stream.of(
|
||||
SearchScript.CONTEXT,
|
||||
SearchScript.AGGS_CONTEXT,
|
||||
SearchScript.SCRIPT_SCORE_CONTEXT,
|
||||
SearchScript.SCRIPT_SORT_CONTEXT,
|
||||
SearchScript.TERMS_SET_QUERY_CONTEXT,
|
||||
ExecutableScript.CONTEXT,
|
||||
ExecutableScript.AGGS_CONTEXT,
|
||||
ExecutableScript.UPDATE_CONTEXT,
|
||||
|
@ -158,6 +158,12 @@ public abstract class SearchScript implements ScorerAware, ExecutableScript {
|
||||
|
||||
/** The context used to compile {@link SearchScript} factories. */
|
||||
public static final ScriptContext<Factory> CONTEXT = new ScriptContext<>("search", Factory.class);
|
||||
// TODO: remove aggs context when it has its own interface
|
||||
// TODO: remove these contexts when it has its own interface
|
||||
public static final ScriptContext<Factory> AGGS_CONTEXT = new ScriptContext<>("aggs", Factory.class);
|
||||
}
|
||||
// Can return a double. (For ScriptSortType#NUMBER only, for ScriptSortType#STRING normal CONTEXT should be used)
|
||||
public static final ScriptContext<Factory> SCRIPT_SORT_CONTEXT = new ScriptContext<>("sort", Factory.class);
|
||||
// Can return a float
|
||||
public static final ScriptContext<Factory> SCRIPT_SCORE_CONTEXT = new ScriptContext<>("score", Factory.class);
|
||||
// Can return a long
|
||||
public static final ScriptContext<Factory> TERMS_SET_QUERY_CONTEXT = new ScriptContext<>("terms_set", Factory.class);
|
||||
}
|
||||
|
@ -305,7 +305,7 @@ public class ScriptSortBuilder extends SortBuilder<ScriptSortBuilder> {
|
||||
|
||||
@Override
|
||||
public SortFieldAndFormat build(QueryShardContext context) throws IOException {
|
||||
final SearchScript.Factory factory = context.getScriptService().compile(script, SearchScript.CONTEXT);
|
||||
final SearchScript.Factory factory = context.getScriptService().compile(script, SearchScript.SCRIPT_SORT_CONTEXT);
|
||||
final SearchScript.LeafFactory searchScript = factory.newFactory(script.getParams(), context.lookup());
|
||||
|
||||
MultiValueMode valueMode = null;
|
||||
|
@ -154,6 +154,7 @@ public class QueueResizingEsThreadPoolExecutorTests extends ESTestCase {
|
||||
context.close();
|
||||
}
|
||||
|
||||
@TestLogging("org.elasticsearch.common.util.concurrent:DEBUG")
|
||||
public void testAutoQueueSizingWithMax() throws Exception {
|
||||
ThreadContext context = new ThreadContext(Settings.EMPTY);
|
||||
ResizableBlockingQueue<Runnable> queue =
|
||||
|
@ -470,6 +470,7 @@ public class CorruptedFileIT extends ESIntegTestCase {
|
||||
* TODO once checksum verification on snapshotting is implemented this test needs to be fixed or split into several
|
||||
* parts... We should also corrupt files on the actual snapshot and check that we don't restore the corrupted shard.
|
||||
*/
|
||||
@AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/30577")
|
||||
@TestLogging("org.elasticsearch.repositories:TRACE,org.elasticsearch.snapshots:TRACE,org.elasticsearch.index.engine:DEBUG")
|
||||
public void testCorruptFileThenSnapshotAndRestore() throws ExecutionException, InterruptedException, IOException {
|
||||
int numDocs = scaledRandomIntBetween(100, 1000);
|
||||
|
@ -76,7 +76,7 @@ public class ExplainableScriptIT extends ESIntegTestCase {
|
||||
@Override
|
||||
public <T> T compile(String scriptName, String scriptSource, ScriptContext<T> context, Map<String, String> params) {
|
||||
assert scriptSource.equals("explainable_script");
|
||||
assert context == SearchScript.CONTEXT;
|
||||
assert context == SearchScript.SCRIPT_SCORE_CONTEXT;
|
||||
SearchScript.Factory factory = (p, lookup) -> new SearchScript.LeafFactory() {
|
||||
@Override
|
||||
public SearchScript newInstance(LeafReaderContext context) throws IOException {
|
||||
|
@ -9,6 +9,7 @@ import org.elasticsearch.ResourceAlreadyExistsException;
|
||||
import org.elasticsearch.ResourceNotFoundException;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.cluster.AbstractDiffable;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.Diff;
|
||||
import org.elasticsearch.cluster.DiffableUtils;
|
||||
import org.elasticsearch.cluster.NamedDiff;
|
||||
@ -467,6 +468,14 @@ public class MlMetadata implements MetaData.Custom {
|
||||
}
|
||||
}
|
||||
|
||||
public static MlMetadata getMlMetadata(ClusterState state) {
|
||||
MlMetadata mlMetadata = (state == null) ? null : state.getMetaData().custom(MLMetadataField.TYPE);
|
||||
if (mlMetadata == null) {
|
||||
return EMPTY_METADATA;
|
||||
}
|
||||
return mlMetadata;
|
||||
}
|
||||
|
||||
public static class JobAlreadyMarkedAsDeletedException extends RuntimeException {
|
||||
}
|
||||
}
|
||||
|
@ -6,7 +6,6 @@
|
||||
package org.elasticsearch.xpack.core.ml.job.persistence;
|
||||
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.xpack.core.ml.MLMetadataField;
|
||||
import org.elasticsearch.xpack.core.ml.MlMetadata;
|
||||
|
||||
/**
|
||||
@ -47,8 +46,7 @@ public final class AnomalyDetectorsIndex {
|
||||
* @return The index name
|
||||
*/
|
||||
public static String getPhysicalIndexFromState(ClusterState state, String jobId) {
|
||||
MlMetadata meta = state.getMetaData().custom(MLMetadataField.TYPE);
|
||||
return meta.getJobs().get(jobId).getResultsIndexName();
|
||||
return MlMetadata.getMlMetadata(state).getJobs().get(jobId).getResultsIndexName();
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -23,7 +23,6 @@ import org.elasticsearch.xpack.core.XPackFeatureSet;
|
||||
import org.elasticsearch.xpack.core.XPackPlugin;
|
||||
import org.elasticsearch.xpack.core.XPackSettings;
|
||||
import org.elasticsearch.xpack.core.XPackField;
|
||||
import org.elasticsearch.xpack.core.ml.MLMetadataField;
|
||||
import org.elasticsearch.xpack.core.ml.MachineLearningFeatureSetUsage;
|
||||
import org.elasticsearch.xpack.core.ml.MlMetadata;
|
||||
import org.elasticsearch.xpack.core.ml.action.GetDatafeedsStatsAction;
|
||||
@ -132,15 +131,7 @@ public class MachineLearningFeatureSet implements XPackFeatureSet {
|
||||
@Override
|
||||
public void usage(ActionListener<XPackFeatureSet.Usage> listener) {
|
||||
ClusterState state = clusterService.state();
|
||||
MlMetadata mlMetadata = state.getMetaData().custom(MLMetadataField.TYPE);
|
||||
|
||||
// Handle case when usage is called but MlMetadata has not been installed yet
|
||||
if (mlMetadata == null) {
|
||||
listener.onResponse(new MachineLearningFeatureSetUsage(available(), enabled,
|
||||
Collections.emptyMap(), Collections.emptyMap()));
|
||||
} else {
|
||||
new Retriever(client, mlMetadata, available(), enabled()).execute(listener);
|
||||
}
|
||||
new Retriever(client, MlMetadata.getMlMetadata(state), available(), enabled()).execute(listener);
|
||||
}
|
||||
|
||||
public static class Retriever {
|
||||
|
@ -13,7 +13,6 @@ import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.common.component.AbstractComponent;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.xpack.core.ml.MLMetadataField;
|
||||
import org.elasticsearch.xpack.core.ml.MlMetadata;
|
||||
import org.elasticsearch.xpack.core.ml.action.OpenJobAction;
|
||||
import org.elasticsearch.xpack.core.ml.action.StartDatafeedAction;
|
||||
@ -90,8 +89,7 @@ public class MlAssignmentNotifier extends AbstractComponent implements ClusterSt
|
||||
}
|
||||
} else if (StartDatafeedAction.TASK_NAME.equals(currentTask.getTaskName())) {
|
||||
String datafeedId = ((StartDatafeedAction.DatafeedParams) currentTask.getParams()).getDatafeedId();
|
||||
MlMetadata mlMetadata = event.state().getMetaData().custom(MLMetadataField.TYPE);
|
||||
DatafeedConfig datafeedConfig = mlMetadata.getDatafeed(datafeedId);
|
||||
DatafeedConfig datafeedConfig = MlMetadata.getMlMetadata(event.state()).getDatafeed(datafeedId);
|
||||
if (currentAssignment.getExecutorNode() == null) {
|
||||
String msg = "No node found to start datafeed [" + datafeedId +"]. Reasons [" +
|
||||
currentAssignment.getExplanation() + "]";
|
||||
|
@ -7,20 +7,13 @@ package org.elasticsearch.xpack.ml;
|
||||
|
||||
import org.elasticsearch.client.Client;
|
||||
import org.elasticsearch.cluster.ClusterChangedEvent;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.ClusterStateListener;
|
||||
import org.elasticsearch.cluster.ClusterStateUpdateTask;
|
||||
import org.elasticsearch.cluster.metadata.MetaData;
|
||||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.common.component.AbstractComponent;
|
||||
import org.elasticsearch.common.component.LifecycleListener;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.gateway.GatewayService;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.xpack.core.ml.MLMetadataField;
|
||||
import org.elasticsearch.xpack.core.ml.MlMetadata;
|
||||
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
|
||||
class MlInitializationService extends AbstractComponent implements ClusterStateListener {
|
||||
|
||||
@ -28,8 +21,6 @@ class MlInitializationService extends AbstractComponent implements ClusterStateL
|
||||
private final ClusterService clusterService;
|
||||
private final Client client;
|
||||
|
||||
private final AtomicBoolean installMlMetadataCheck = new AtomicBoolean(false);
|
||||
|
||||
private volatile MlDailyMaintenanceService mlDailyMaintenanceService;
|
||||
|
||||
MlInitializationService(Settings settings, ThreadPool threadPool, ClusterService clusterService, Client client) {
|
||||
@ -48,45 +39,12 @@ class MlInitializationService extends AbstractComponent implements ClusterStateL
|
||||
}
|
||||
|
||||
if (event.localNodeMaster()) {
|
||||
MetaData metaData = event.state().metaData();
|
||||
installMlMetadata(metaData);
|
||||
installDailyMaintenanceService();
|
||||
} else {
|
||||
uninstallDailyMaintenanceService();
|
||||
}
|
||||
}
|
||||
|
||||
private void installMlMetadata(MetaData metaData) {
|
||||
if (metaData.custom(MLMetadataField.TYPE) == null) {
|
||||
if (installMlMetadataCheck.compareAndSet(false, true)) {
|
||||
threadPool.executor(ThreadPool.Names.GENERIC).execute(() ->
|
||||
clusterService.submitStateUpdateTask("install-ml-metadata", new ClusterStateUpdateTask() {
|
||||
@Override
|
||||
public ClusterState execute(ClusterState currentState) throws Exception {
|
||||
// If the metadata has been added already don't try to update
|
||||
if (currentState.metaData().custom(MLMetadataField.TYPE) != null) {
|
||||
return currentState;
|
||||
}
|
||||
ClusterState.Builder builder = new ClusterState.Builder(currentState);
|
||||
MetaData.Builder metadataBuilder = MetaData.builder(currentState.metaData());
|
||||
metadataBuilder.putCustom(MLMetadataField.TYPE, MlMetadata.EMPTY_METADATA);
|
||||
builder.metaData(metadataBuilder.build());
|
||||
return builder.build();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(String source, Exception e) {
|
||||
installMlMetadataCheck.set(false);
|
||||
logger.error("unable to install ml metadata", e);
|
||||
}
|
||||
})
|
||||
);
|
||||
}
|
||||
} else {
|
||||
installMlMetadataCheck.set(false);
|
||||
}
|
||||
}
|
||||
|
||||
private void installDailyMaintenanceService() {
|
||||
if (mlDailyMaintenanceService == null) {
|
||||
mlDailyMaintenanceService = new MlDailyMaintenanceService(clusterService.getClusterName(), threadPool, client);
|
||||
|
@ -27,7 +27,6 @@ import org.elasticsearch.discovery.MasterNotDiscoveredException;
|
||||
import org.elasticsearch.tasks.Task;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
import org.elasticsearch.xpack.core.ml.MLMetadataField;
|
||||
import org.elasticsearch.xpack.core.ml.MlMetadata;
|
||||
import org.elasticsearch.xpack.core.ml.action.CloseJobAction;
|
||||
import org.elasticsearch.xpack.core.ml.action.FinalizeJobExecutionAction;
|
||||
@ -92,8 +91,7 @@ public class TransportCloseJobAction extends TransportTasksAction<TransportOpenJ
|
||||
static void resolveAndValidateJobId(CloseJobAction.Request request, ClusterState state, List<String> openJobIds,
|
||||
List<String> closingJobIds) {
|
||||
PersistentTasksCustomMetaData tasksMetaData = state.getMetaData().custom(PersistentTasksCustomMetaData.TYPE);
|
||||
MlMetadata maybeNull = state.metaData().custom(MLMetadataField.TYPE);
|
||||
final MlMetadata mlMetadata = (maybeNull == null) ? MlMetadata.EMPTY_METADATA : maybeNull;
|
||||
final MlMetadata mlMetadata = MlMetadata.getMlMetadata(state);
|
||||
|
||||
List<String> failedJobs = new ArrayList<>();
|
||||
|
||||
@ -107,7 +105,7 @@ public class TransportCloseJobAction extends TransportTasksAction<TransportOpenJ
|
||||
};
|
||||
|
||||
Set<String> expandedJobIds = mlMetadata.expandJobIds(request.getJobId(), request.allowNoJobs());
|
||||
expandedJobIds.stream().forEach(jobIdProcessor::accept);
|
||||
expandedJobIds.forEach(jobIdProcessor::accept);
|
||||
if (request.isForce() == false && failedJobs.size() > 0) {
|
||||
if (expandedJobIds.size() == 1) {
|
||||
throw ExceptionsHelper.conflictStatusException("cannot close job [{}] because it failed, use force close",
|
||||
|
4
x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteDatafeedAction.java
4
x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteDatafeedAction.java
@ -119,8 +119,8 @@ public class TransportDeleteDatafeedAction extends TransportMasterNodeAction<Del
|
||||
}
|
||||
|
||||
@Override
|
||||
public ClusterState execute(ClusterState currentState) throws Exception {
|
||||
MlMetadata currentMetadata = currentState.getMetaData().custom(MLMetadataField.TYPE);
|
||||
public ClusterState execute(ClusterState currentState) {
|
||||
MlMetadata currentMetadata = MlMetadata.getMlMetadata(currentState);
|
||||
PersistentTasksCustomMetaData persistentTasks =
|
||||
currentState.getMetaData().custom(PersistentTasksCustomMetaData.TYPE);
|
||||
MlMetadata newMetadata = new MlMetadata.Builder(currentMetadata)
|
||||
|
@ -24,7 +24,6 @@ import org.elasticsearch.rest.RestStatus;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
import org.elasticsearch.xpack.core.ml.action.DeleteFilterAction;
|
||||
import org.elasticsearch.xpack.core.ml.MLMetadataField;
|
||||
import org.elasticsearch.xpack.core.ml.MlMetaIndex;
|
||||
import org.elasticsearch.xpack.core.ml.MlMetadata;
|
||||
import org.elasticsearch.xpack.core.ml.job.config.Detector;
|
||||
@ -60,8 +59,7 @@ public class TransportDeleteFilterAction extends HandledTransportAction<DeleteFi
|
||||
|
||||
final String filterId = request.getFilterId();
|
||||
ClusterState state = clusterService.state();
|
||||
MlMetadata currentMlMetadata = state.metaData().custom(MLMetadataField.TYPE);
|
||||
Map<String, Job> jobs = currentMlMetadata.getJobs();
|
||||
Map<String, Job> jobs = MlMetadata.getMlMetadata(state).getJobs();
|
||||
List<String> currentlyUsedBy = new ArrayList<>();
|
||||
for (Job job : jobs.values()) {
|
||||
List<Detector> detectors = job.getAnalysisConfig().getDetectors();
|
||||
|
@ -200,10 +200,9 @@ public class TransportDeleteJobAction extends TransportMasterNodeAction<DeleteJo
|
||||
void markJobAsDeleting(String jobId, ActionListener<Boolean> listener, boolean force) {
|
||||
clusterService.submitStateUpdateTask("mark-job-as-deleted", new ClusterStateUpdateTask() {
|
||||
@Override
|
||||
public ClusterState execute(ClusterState currentState) throws Exception {
|
||||
MlMetadata currentMlMetadata = currentState.metaData().custom(MLMetadataField.TYPE);
|
||||
public ClusterState execute(ClusterState currentState) {
|
||||
PersistentTasksCustomMetaData tasks = currentState.metaData().custom(PersistentTasksCustomMetaData.TYPE);
|
||||
MlMetadata.Builder builder = new MlMetadata.Builder(currentMlMetadata);
|
||||
MlMetadata.Builder builder = new MlMetadata.Builder(MlMetadata.getMlMetadata(currentState));
|
||||
builder.markJobAsDeleted(jobId, tasks, force);
|
||||
return buildNewClusterState(currentState, builder);
|
||||
}
|
||||
@ -248,11 +247,7 @@ public class TransportDeleteJobAction extends TransportMasterNodeAction<DeleteJo
|
||||
}
|
||||
|
||||
static boolean jobIsDeletedFromState(String jobId, ClusterState clusterState) {
|
||||
MlMetadata metadata = clusterState.metaData().custom(MLMetadataField.TYPE);
|
||||
if (metadata == null) {
|
||||
return true;
|
||||
}
|
||||
return !metadata.getJobs().containsKey(jobId);
|
||||
return !MlMetadata.getMlMetadata(clusterState).getJobs().containsKey(jobId);
|
||||
}
|
||||
|
||||
private static ClusterState buildNewClusterState(ClusterState currentState, MlMetadata.Builder builder) {
|
||||
|
@ -56,8 +56,8 @@ public class TransportFinalizeJobExecutionAction extends TransportMasterNodeActi
|
||||
logger.debug("finalizing jobs [{}]", jobIdString);
|
||||
clusterService.submitStateUpdateTask(source, new ClusterStateUpdateTask() {
|
||||
@Override
|
||||
public ClusterState execute(ClusterState currentState) throws Exception {
|
||||
MlMetadata mlMetadata = currentState.metaData().custom(MLMetadataField.TYPE);
|
||||
public ClusterState execute(ClusterState currentState) {
|
||||
MlMetadata mlMetadata = MlMetadata.getMlMetadata(currentState);
|
||||
MlMetadata.Builder mlMetadataBuilder = new MlMetadata.Builder(mlMetadata);
|
||||
Date finishedTime = new Date();
|
||||
|
||||
|
@ -15,7 +15,6 @@ import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
import org.elasticsearch.xpack.core.ml.MLMetadataField;
|
||||
import org.elasticsearch.xpack.core.ml.MlMetadata;
|
||||
import org.elasticsearch.xpack.core.ml.action.GetCalendarEventsAction;
|
||||
import org.elasticsearch.xpack.core.ml.action.GetCalendarsAction;
|
||||
@ -70,7 +69,7 @@ public class TransportGetCalendarEventsAction extends HandledTransportAction<Get
|
||||
|
||||
if (request.getJobId() != null) {
|
||||
ClusterState state = clusterService.state();
|
||||
MlMetadata currentMlMetadata = state.metaData().custom(MLMetadataField.TYPE);
|
||||
MlMetadata currentMlMetadata = MlMetadata.getMlMetadata(state);
|
||||
|
||||
List<String> jobGroups;
|
||||
String requestId = request.getJobId();
|
||||
|
@ -18,7 +18,6 @@ import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
import org.elasticsearch.xpack.core.ml.action.GetDatafeedsAction;
|
||||
import org.elasticsearch.xpack.core.ml.MLMetadataField;
|
||||
import org.elasticsearch.xpack.core.ml.MlMetadata;
|
||||
import org.elasticsearch.xpack.core.ml.action.util.QueryPage;
|
||||
import org.elasticsearch.xpack.core.ml.datafeed.DatafeedConfig;
|
||||
@ -52,10 +51,7 @@ public class TransportGetDatafeedsAction extends TransportMasterNodeReadAction<G
|
||||
ActionListener<GetDatafeedsAction.Response> listener) throws Exception {
|
||||
logger.debug("Get datafeed '{}'", request.getDatafeedId());
|
||||
|
||||
MlMetadata mlMetadata = state.metaData().custom(MLMetadataField.TYPE);
|
||||
if (mlMetadata == null) {
|
||||
mlMetadata = MlMetadata.EMPTY_METADATA;
|
||||
}
|
||||
MlMetadata mlMetadata = MlMetadata.getMlMetadata(state);
|
||||
Set<String> expandedDatafeedIds = mlMetadata.expandDatafeedIds(request.getDatafeedId(), request.allowNoDatafeeds());
|
||||
List<DatafeedConfig> datafeedConfigs = new ArrayList<>();
|
||||
for (String expandedDatafeedId : expandedDatafeedIds) {
|
||||
|
@ -18,7 +18,6 @@ import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
import org.elasticsearch.xpack.core.ml.MLMetadataField;
|
||||
import org.elasticsearch.xpack.core.ml.MlMetadata;
|
||||
import org.elasticsearch.xpack.core.ml.action.GetDatafeedsStatsAction;
|
||||
import org.elasticsearch.xpack.core.ml.action.util.QueryPage;
|
||||
@ -56,11 +55,7 @@ public class TransportGetDatafeedsStatsAction extends TransportMasterNodeReadAct
|
||||
ActionListener<GetDatafeedsStatsAction.Response> listener) throws Exception {
|
||||
logger.debug("Get stats for datafeed '{}'", request.getDatafeedId());
|
||||
|
||||
MlMetadata mlMetadata = state.metaData().custom(MLMetadataField.TYPE);
|
||||
if (mlMetadata == null) {
|
||||
mlMetadata = MlMetadata.EMPTY_METADATA;
|
||||
}
|
||||
|
||||
MlMetadata mlMetadata = MlMetadata.getMlMetadata(state);
|
||||
Set<String> expandedDatafeedIds = mlMetadata.expandDatafeedIds(request.getDatafeedId(), request.allowNoDatafeeds());
|
||||
|
||||
PersistentTasksCustomMetaData tasksInProgress = state.getMetaData().custom(PersistentTasksCustomMetaData.TYPE);
|
||||
|
@ -23,7 +23,6 @@ import org.elasticsearch.common.util.concurrent.AtomicArray;
|
||||
import org.elasticsearch.tasks.Task;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
import org.elasticsearch.xpack.core.ml.MLMetadataField;
|
||||
import org.elasticsearch.xpack.core.ml.MlMetadata;
|
||||
import org.elasticsearch.xpack.core.ml.action.GetJobsStatsAction;
|
||||
import org.elasticsearch.xpack.core.ml.action.util.QueryPage;
|
||||
@ -69,8 +68,7 @@ public class TransportGetJobsStatsAction extends TransportTasksAction<TransportO
|
||||
|
||||
@Override
|
||||
protected void doExecute(Task task, GetJobsStatsAction.Request request, ActionListener<GetJobsStatsAction.Response> listener) {
|
||||
MlMetadata clusterMlMetadata = clusterService.state().metaData().custom(MLMetadataField.TYPE);
|
||||
MlMetadata mlMetadata = (clusterMlMetadata == null) ? MlMetadata.EMPTY_METADATA : clusterMlMetadata;
|
||||
MlMetadata mlMetadata = MlMetadata.getMlMetadata(clusterService.state());
|
||||
request.setExpandedJobsIds(new ArrayList<>(mlMetadata.expandJobIds(request.getJobId(), request.allowNoJobs())));
|
||||
ActionListener<GetJobsStatsAction.Response> finalListener = listener;
|
||||
listener = ActionListener.wrap(response -> gatherStatsForClosedJobs(mlMetadata,
|
||||
|
@ -49,7 +49,6 @@ import org.elasticsearch.tasks.TaskId;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
import org.elasticsearch.xpack.core.XPackField;
|
||||
import org.elasticsearch.xpack.core.ml.MLMetadataField;
|
||||
import org.elasticsearch.xpack.core.ml.MlMetaIndex;
|
||||
import org.elasticsearch.xpack.core.ml.MlMetadata;
|
||||
import org.elasticsearch.xpack.core.ml.action.OpenJobAction;
|
||||
@ -163,7 +162,7 @@ public class TransportOpenJobAction extends TransportMasterNodeAction<OpenJobAct
|
||||
continue;
|
||||
}
|
||||
|
||||
MlMetadata mlMetadata = clusterState.getMetaData().custom(MLMetadataField.TYPE);
|
||||
MlMetadata mlMetadata = MlMetadata.getMlMetadata(clusterState);
|
||||
Job job = mlMetadata.getJobs().get(jobId);
|
||||
Set<String> compatibleJobTypes = Job.getCompatibleJobTypes(node.getVersion());
|
||||
if (compatibleJobTypes.contains(job.getJobType()) == false) {
|
||||
@ -474,8 +473,7 @@ public class TransportOpenJobAction extends TransportMasterNodeAction<OpenJobAct
|
||||
// Step 3. Update established model memory for pre-6.1 jobs that haven't had it set
|
||||
ActionListener<Boolean> missingMappingsListener = ActionListener.wrap(
|
||||
response -> {
|
||||
MlMetadata mlMetadata = clusterService.state().getMetaData().custom(MLMetadataField.TYPE);
|
||||
Job job = mlMetadata.getJobs().get(jobParams.getJobId());
|
||||
Job job = MlMetadata.getMlMetadata(clusterService.state()).getJobs().get(jobParams.getJobId());
|
||||
if (job != null) {
|
||||
Version jobVersion = job.getJobVersion();
|
||||
Long jobEstablishedModelMemory = job.getEstablishedModelMemory();
|
||||
@ -650,8 +648,7 @@ public class TransportOpenJobAction extends TransportMasterNodeAction<OpenJobAct
|
||||
public void validate(OpenJobAction.JobParams params, ClusterState clusterState) {
|
||||
// If we already know that we can't find an ml node because all ml nodes are running at capacity or
|
||||
// simply because there are no ml nodes in the cluster then we fail quickly here:
|
||||
MlMetadata mlMetadata = clusterState.metaData().custom(MLMetadataField.TYPE);
|
||||
TransportOpenJobAction.validate(params.getJobId(), mlMetadata);
|
||||
TransportOpenJobAction.validate(params.getJobId(), MlMetadata.getMlMetadata(clusterState));
|
||||
PersistentTasksCustomMetaData.Assignment assignment = selectLeastLoadedMlNode(params.getJobId(), clusterState,
|
||||
maxConcurrentJobAllocations, fallbackMaxNumberOfOpenJobs, maxMachineMemoryPercent, logger);
|
||||
if (assignment.getExecutorNode() == null) {
|
||||
|
3
x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPreviewDatafeedAction.java
3
x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPreviewDatafeedAction.java
@ -17,7 +17,6 @@ import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
import org.elasticsearch.xpack.core.ClientHelper;
|
||||
import org.elasticsearch.xpack.core.ml.MLMetadataField;
|
||||
import org.elasticsearch.xpack.core.ml.MlMetadata;
|
||||
import org.elasticsearch.xpack.core.ml.action.PreviewDatafeedAction;
|
||||
import org.elasticsearch.xpack.core.ml.datafeed.ChunkingConfig;
|
||||
@ -52,7 +51,7 @@ public class TransportPreviewDatafeedAction extends HandledTransportAction<Previ
|
||||
|
||||
@Override
|
||||
protected void doExecute(PreviewDatafeedAction.Request request, ActionListener<PreviewDatafeedAction.Response> listener) {
|
||||
MlMetadata mlMetadata = clusterService.state().getMetaData().custom(MLMetadataField.TYPE);
|
||||
MlMetadata mlMetadata = MlMetadata.getMlMetadata(clusterService.state());
|
||||
DatafeedConfig datafeed = mlMetadata.getDatafeed(request.getDatafeedId());
|
||||
if (datafeed == null) {
|
||||
throw ExceptionsHelper.missingDatafeedException(request.getDatafeedId());
|
||||
|
@ -14,9 +14,7 @@ import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.HandledTransportAction;
|
||||
import org.elasticsearch.action.support.WriteRequest;
|
||||
import org.elasticsearch.client.Client;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
@ -26,16 +24,12 @@ import org.elasticsearch.index.engine.VersionConflictEngineException;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
import org.elasticsearch.xpack.core.ml.action.PutCalendarAction;
|
||||
import org.elasticsearch.xpack.core.ml.MLMetadataField;
|
||||
import org.elasticsearch.xpack.core.ml.MlMetaIndex;
|
||||
import org.elasticsearch.xpack.core.ml.MlMetadata;
|
||||
import org.elasticsearch.xpack.core.ml.calendars.Calendar;
|
||||
import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.function.Consumer;
|
||||
|
||||
import static org.elasticsearch.xpack.core.ClientHelper.ML_ORIGIN;
|
||||
import static org.elasticsearch.xpack.core.ClientHelper.executeAsyncWithOrigin;
|
||||
@ -43,17 +37,15 @@ import static org.elasticsearch.xpack.core.ClientHelper.executeAsyncWithOrigin;
|
||||
public class TransportPutCalendarAction extends HandledTransportAction<PutCalendarAction.Request, PutCalendarAction.Response> {
|
||||
|
||||
private final Client client;
|
||||
private final ClusterService clusterService;
|
||||
|
||||
@Inject
|
||||
public TransportPutCalendarAction(Settings settings, ThreadPool threadPool,
|
||||
TransportService transportService, ActionFilters actionFilters,
|
||||
IndexNameExpressionResolver indexNameExpressionResolver,
|
||||
Client client, ClusterService clusterService) {
|
||||
Client client) {
|
||||
super(settings, PutCalendarAction.NAME, threadPool, transportService, actionFilters,
|
||||
indexNameExpressionResolver, PutCalendarAction.Request::new);
|
||||
this.client = client;
|
||||
this.clusterService = clusterService;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -141,7 +141,7 @@ public class TransportPutDatafeedAction extends TransportMasterNodeAction<PutDat
|
||||
}
|
||||
|
||||
private ClusterState putDatafeed(PutDatafeedAction.Request request, ClusterState clusterState) {
|
||||
MlMetadata currentMetadata = clusterState.getMetaData().custom(MLMetadataField.TYPE);
|
||||
MlMetadata currentMetadata = MlMetadata.getMlMetadata(clusterState);
|
||||
MlMetadata newMetadata = new MlMetadata.Builder(currentMetadata)
|
||||
.putDatafeed(request.getDatafeed(), threadPool.getThreadContext()).build();
|
||||
return ClusterState.builder(clusterState).metaData(
|
||||
|
5
x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStartDatafeedAction.java
5
x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStartDatafeedAction.java
@ -130,7 +130,7 @@ public class TransportStartDatafeedAction extends TransportMasterNodeAction<Star
|
||||
};
|
||||
|
||||
// Verify data extractor factory can be created, then start persistent task
|
||||
MlMetadata mlMetadata = state.metaData().custom(MLMetadataField.TYPE);
|
||||
MlMetadata mlMetadata = MlMetadata.getMlMetadata(state);
|
||||
PersistentTasksCustomMetaData tasks = state.getMetaData().custom(PersistentTasksCustomMetaData.TYPE);
|
||||
validate(params.getDatafeedId(), mlMetadata, tasks);
|
||||
DatafeedConfig datafeed = mlMetadata.getDatafeed(params.getDatafeedId());
|
||||
@ -221,9 +221,8 @@ public class TransportStartDatafeedAction extends TransportMasterNodeAction<Star
|
||||
|
||||
@Override
|
||||
public void validate(StartDatafeedAction.DatafeedParams params, ClusterState clusterState) {
|
||||
MlMetadata mlMetadata = clusterState.metaData().custom(MLMetadataField.TYPE);
|
||||
PersistentTasksCustomMetaData tasks = clusterState.getMetaData().custom(PersistentTasksCustomMetaData.TYPE);
|
||||
TransportStartDatafeedAction.validate(params.getDatafeedId(), mlMetadata, tasks);
|
||||
TransportStartDatafeedAction.validate(params.getDatafeedId(), MlMetadata.getMlMetadata(clusterState), tasks);
|
||||
new DatafeedNodeSelector(clusterState, resolver, params.getDatafeedId()).checkDatafeedTaskCanBeCreated();
|
||||
}
|
||||
|
||||
|
@ -130,7 +130,7 @@ public class TransportStopDatafeedAction extends TransportTasksAction<TransportS
|
||||
new ActionListenerResponseHandler<>(listener, StopDatafeedAction.Response::new));
|
||||
}
|
||||
} else {
|
||||
MlMetadata mlMetadata = state.getMetaData().custom(MLMetadataField.TYPE);
|
||||
MlMetadata mlMetadata = MlMetadata.getMlMetadata(state);
|
||||
PersistentTasksCustomMetaData tasks = state.getMetaData().custom(PersistentTasksCustomMetaData.TYPE);
|
||||
|
||||
List<String> startedDatafeeds = new ArrayList<>();
|
||||
|
@ -16,7 +16,6 @@ import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
import org.elasticsearch.xpack.core.ml.MLMetadataField;
|
||||
import org.elasticsearch.xpack.core.ml.MlMetadata;
|
||||
import org.elasticsearch.xpack.core.ml.action.PutCalendarAction;
|
||||
import org.elasticsearch.xpack.core.ml.action.UpdateCalendarJobAction;
|
||||
@ -48,8 +47,7 @@ public class TransportUpdateCalendarJobAction extends HandledTransportAction<Upd
|
||||
@Override
|
||||
protected void doExecute(UpdateCalendarJobAction.Request request, ActionListener<PutCalendarAction.Response> listener) {
|
||||
ClusterState clusterState = clusterService.state();
|
||||
MlMetadata maybeNullMetaData = clusterState.getMetaData().custom(MLMetadataField.TYPE);
|
||||
final MlMetadata mlMetadata = maybeNullMetaData == null ? MlMetadata.EMPTY_METADATA : maybeNullMetaData;
|
||||
final MlMetadata mlMetadata = MlMetadata.getMlMetadata(clusterState);
|
||||
|
||||
Set<String> jobIdsToAdd = Strings.tokenizeByCommaToSet(request.getJobIdsToAddExpression());
|
||||
Set<String> jobIdsToRemove = Strings.tokenizeByCommaToSet(request.getJobIdsToRemoveExpression());
|
||||
|
4
x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportUpdateDatafeedAction.java
4
x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportUpdateDatafeedAction.java
@ -63,9 +63,9 @@ public class TransportUpdateDatafeedAction extends TransportMasterNodeAction<Upd
|
||||
}
|
||||
|
||||
@Override
|
||||
public ClusterState execute(ClusterState currentState) throws Exception {
|
||||
public ClusterState execute(ClusterState currentState) {
|
||||
DatafeedUpdate update = request.getUpdate();
|
||||
MlMetadata currentMetadata = currentState.getMetaData().custom(MLMetadataField.TYPE);
|
||||
MlMetadata currentMetadata = MlMetadata.getMlMetadata(currentState);
|
||||
PersistentTasksCustomMetaData persistentTasks =
|
||||
currentState.getMetaData().custom(PersistentTasksCustomMetaData.TYPE);
|
||||
MlMetadata newMetadata = new MlMetadata.Builder(currentMetadata)
|
||||
|
@ -20,7 +20,6 @@ import org.elasticsearch.common.util.concurrent.FutureUtils;
|
||||
import org.elasticsearch.common.util.concurrent.ThreadContext;
|
||||
import org.elasticsearch.rest.RestStatus;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.xpack.core.ml.MLMetadataField;
|
||||
import org.elasticsearch.xpack.core.ml.MlMetadata;
|
||||
import org.elasticsearch.xpack.core.ml.action.CloseJobAction;
|
||||
import org.elasticsearch.xpack.core.ml.action.StartDatafeedAction;
|
||||
@ -80,10 +79,7 @@ public class DatafeedManager extends AbstractComponent {
|
||||
public void run(TransportStartDatafeedAction.DatafeedTask task, Consumer<Exception> taskHandler) {
|
||||
String datafeedId = task.getDatafeedId();
|
||||
ClusterState state = clusterService.state();
|
||||
MlMetadata mlMetadata = state.metaData().custom(MLMetadataField.TYPE);
|
||||
if (mlMetadata == null) {
|
||||
mlMetadata = MlMetadata.EMPTY_METADATA;
|
||||
}
|
||||
MlMetadata mlMetadata = MlMetadata.getMlMetadata(state);
|
||||
|
||||
DatafeedConfig datafeed = mlMetadata.getDatafeed(datafeedId);
|
||||
Job job = mlMetadata.getJobs().get(datafeed.getJobId());
|
||||
|
@ -12,7 +12,6 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.cluster.routing.IndexRoutingTable;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.logging.Loggers;
|
||||
import org.elasticsearch.xpack.core.ml.MLMetadataField;
|
||||
import org.elasticsearch.xpack.core.ml.MlMetadata;
|
||||
import org.elasticsearch.xpack.core.ml.datafeed.DatafeedConfig;
|
||||
import org.elasticsearch.xpack.core.ml.job.config.JobState;
|
||||
@ -33,7 +32,7 @@ public class DatafeedNodeSelector {
|
||||
private final IndexNameExpressionResolver resolver;
|
||||
|
||||
public DatafeedNodeSelector(ClusterState clusterState, IndexNameExpressionResolver resolver, String datafeedId) {
|
||||
MlMetadata mlMetadata = Objects.requireNonNull(clusterState.metaData().custom(MLMetadataField.TYPE));
|
||||
MlMetadata mlMetadata = MlMetadata.getMlMetadata(clusterState);
|
||||
PersistentTasksCustomMetaData tasks = clusterState.getMetaData().custom(PersistentTasksCustomMetaData.TYPE);
|
||||
this.datafeed = mlMetadata.getDatafeed(datafeedId);
|
||||
this.jobTask = MlMetadata.getJobTask(datafeed.getJobId(), tasks);
|
||||
|
@ -133,8 +133,7 @@ public class JobManager extends AbstractComponent {
|
||||
* @throws ResourceNotFoundException if no job matches {@code jobId}
|
||||
*/
|
||||
public static Job getJobOrThrowIfUnknown(String jobId, ClusterState clusterState) {
|
||||
MlMetadata mlMetadata = clusterState.getMetaData().custom(MLMetadataField.TYPE);
|
||||
Job job = (mlMetadata == null) ? null : mlMetadata.getJobs().get(jobId);
|
||||
Job job = MlMetadata.getMlMetadata(clusterState).getJobs().get(jobId);
|
||||
if (job == null) {
|
||||
throw ExceptionsHelper.missingJobException(jobId);
|
||||
}
|
||||
@ -142,11 +141,7 @@ public class JobManager extends AbstractComponent {
|
||||
}
|
||||
|
||||
private Set<String> expandJobIds(String expression, boolean allowNoJobs, ClusterState clusterState) {
|
||||
MlMetadata mlMetadata = clusterState.getMetaData().custom(MLMetadataField.TYPE);
|
||||
if (mlMetadata == null) {
|
||||
mlMetadata = MlMetadata.EMPTY_METADATA;
|
||||
}
|
||||
return mlMetadata.expandJobIds(expression, allowNoJobs);
|
||||
return MlMetadata.getMlMetadata(clusterState).expandJobIds(expression, allowNoJobs);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -160,7 +155,7 @@ public class JobManager extends AbstractComponent {
|
||||
*/
|
||||
public QueryPage<Job> expandJobs(String expression, boolean allowNoJobs, ClusterState clusterState) {
|
||||
Set<String> expandedJobIds = expandJobIds(expression, allowNoJobs, clusterState);
|
||||
MlMetadata mlMetadata = clusterState.getMetaData().custom(MLMetadataField.TYPE);
|
||||
MlMetadata mlMetadata = MlMetadata.getMlMetadata(clusterState);
|
||||
List<Job> jobs = new ArrayList<>();
|
||||
for (String expandedJobId : expandedJobIds) {
|
||||
jobs.add(mlMetadata.getJobs().get(expandedJobId));
|
||||
@ -188,8 +183,8 @@ public class JobManager extends AbstractComponent {
|
||||
DEPRECATION_LOGGER.deprecated("Creating jobs with delimited data format is deprecated. Please use xcontent instead.");
|
||||
}
|
||||
|
||||
MlMetadata currentMlMetadata = state.metaData().custom(MLMetadataField.TYPE);
|
||||
if (currentMlMetadata != null && currentMlMetadata.getJobs().containsKey(job.getId())) {
|
||||
MlMetadata currentMlMetadata = MlMetadata.getMlMetadata(state);
|
||||
if (currentMlMetadata.getJobs().containsKey(job.getId())) {
|
||||
actionListener.onFailure(ExceptionsHelper.jobAlreadyExists(job.getId()));
|
||||
return;
|
||||
}
|
||||
@ -469,8 +464,8 @@ public class JobManager extends AbstractComponent {
|
||||
}
|
||||
|
||||
@Override
|
||||
public ClusterState execute(ClusterState currentState) throws Exception {
|
||||
MlMetadata currentMlMetadata = currentState.metaData().custom(MLMetadataField.TYPE);
|
||||
public ClusterState execute(ClusterState currentState) {
|
||||
MlMetadata currentMlMetadata = MlMetadata.getMlMetadata(currentState);
|
||||
if (currentMlMetadata.getJobs().containsKey(jobId) == false) {
|
||||
// We wouldn't have got here if the job never existed so
|
||||
// the Job must have been deleted by another action.
|
||||
@ -560,8 +555,7 @@ public class JobManager extends AbstractComponent {
|
||||
}
|
||||
|
||||
private static MlMetadata.Builder createMlMetadataBuilder(ClusterState currentState) {
|
||||
MlMetadata currentMlMetadata = currentState.metaData().custom(MLMetadataField.TYPE);
|
||||
return new MlMetadata.Builder(currentMlMetadata);
|
||||
return new MlMetadata.Builder(MlMetadata.getMlMetadata(currentState));
|
||||
}
|
||||
|
||||
private static ClusterState buildNewClusterState(ClusterState currentState, MlMetadata.Builder builder) {
|
||||
|
@ -11,7 +11,6 @@ import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.index.query.BoolQueryBuilder;
|
||||
import org.elasticsearch.index.query.QueryBuilders;
|
||||
import org.elasticsearch.xpack.core.ml.MLMetadataField;
|
||||
import org.elasticsearch.xpack.core.ml.MlMetadata;
|
||||
import org.elasticsearch.xpack.core.ml.job.config.Job;
|
||||
import org.elasticsearch.xpack.core.ml.job.results.Result;
|
||||
@ -61,12 +60,8 @@ abstract class AbstractExpiredJobDataRemover implements MlDataRemover {
|
||||
}
|
||||
|
||||
private Iterator<Job> newJobIterator() {
|
||||
List<Job> jobs = new ArrayList<>();
|
||||
ClusterState clusterState = clusterService.state();
|
||||
MlMetadata mlMetadata = clusterState.getMetaData().custom(MLMetadataField.TYPE);
|
||||
if (mlMetadata != null) {
|
||||
jobs.addAll(mlMetadata.getJobs().values());
|
||||
}
|
||||
List<Job> jobs = new ArrayList<>(MlMetadata.getMlMetadata(clusterState).getJobs().values());
|
||||
return createVolatileCursorIterator(jobs);
|
||||
}
|
||||
|
||||
|
@ -11,10 +11,8 @@ import org.elasticsearch.action.bulk.BulkRequestBuilder;
|
||||
import org.elasticsearch.action.bulk.BulkResponse;
|
||||
import org.elasticsearch.action.delete.DeleteRequest;
|
||||
import org.elasticsearch.client.Client;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.common.logging.Loggers;
|
||||
import org.elasticsearch.xpack.core.ml.MLMetadataField;
|
||||
import org.elasticsearch.xpack.core.ml.MlMetadata;
|
||||
import org.elasticsearch.xpack.core.ml.job.persistence.AnomalyDetectorsIndex;
|
||||
import org.elasticsearch.xpack.core.ml.job.persistence.ElasticsearchMappings;
|
||||
@ -24,7 +22,6 @@ import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.Quantiles;
|
||||
import org.elasticsearch.xpack.ml.job.persistence.BatchedStateDocIdsIterator;
|
||||
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.Deque;
|
||||
import java.util.List;
|
||||
import java.util.Objects;
|
||||
@ -84,12 +81,7 @@ public class UnusedStateRemover implements MlDataRemover {
|
||||
}
|
||||
|
||||
private Set<String> getJobIds() {
|
||||
ClusterState clusterState = clusterService.state();
|
||||
MlMetadata mlMetadata = clusterState.getMetaData().custom(MLMetadataField.TYPE);
|
||||
if (mlMetadata != null) {
|
||||
return mlMetadata.getJobs().keySet();
|
||||
}
|
||||
return Collections.emptySet();
|
||||
return MlMetadata.getMlMetadata(clusterService.state()).getJobs().keySet();
|
||||
}
|
||||
|
||||
private void executeDeleteUnusedStateDocs(BulkRequestBuilder deleteUnusedStateRequestBuilder, ActionListener<Boolean> listener) {
|
||||
|
@ -65,7 +65,7 @@ public class MachineLearningFeatureSetTests extends ESTestCase {
|
||||
private XPackLicenseState licenseState;
|
||||
|
||||
@Before
|
||||
public void init() throws Exception {
|
||||
public void init() {
|
||||
commonSettings = Settings.builder()
|
||||
.put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toAbsolutePath())
|
||||
.put(MachineLearningField.AUTODETECT_PROCESS.getKey(), false)
|
||||
@ -232,9 +232,28 @@ public class MachineLearningFeatureSetTests extends ESTestCase {
|
||||
try (XContentBuilder builder = XContentFactory.jsonBuilder()) {
|
||||
usage.toXContent(builder, ToXContent.EMPTY_PARAMS);
|
||||
source = new XContentSource(builder);
|
||||
assertThat(source.getValue("jobs"), equalTo(Collections.emptyMap()));
|
||||
assertThat(source.getValue("datafeeds"), equalTo(Collections.emptyMap()));
|
||||
}
|
||||
|
||||
assertThat(source.getValue("jobs._all.count"), equalTo(0));
|
||||
assertThat(source.getValue("jobs._all.detectors.min"), equalTo(0.0));
|
||||
assertThat(source.getValue("jobs._all.detectors.max"), equalTo(0.0));
|
||||
assertThat(source.getValue("jobs._all.detectors.total"), equalTo(0.0));
|
||||
assertThat(source.getValue("jobs._all.detectors.avg"), equalTo(0.0));
|
||||
assertThat(source.getValue("jobs._all.model_size.min"), equalTo(0.0));
|
||||
assertThat(source.getValue("jobs._all.model_size.max"), equalTo(0.0));
|
||||
assertThat(source.getValue("jobs._all.model_size.total"), equalTo(0.0));
|
||||
assertThat(source.getValue("jobs._all.model_size.avg"), equalTo(0.0));
|
||||
|
||||
assertThat(source.getValue("jobs.opening"), is(nullValue()));
|
||||
assertThat(source.getValue("jobs.opened"), is(nullValue()));
|
||||
assertThat(source.getValue("jobs.closing"), is(nullValue()));
|
||||
assertThat(source.getValue("jobs.closed"), is(nullValue()));
|
||||
assertThat(source.getValue("jobs.failed"), is(nullValue()));
|
||||
|
||||
assertThat(source.getValue("datafeeds._all.count"), equalTo(0));
|
||||
|
||||
assertThat(source.getValue("datafeeds.started"), is(nullValue()));
|
||||
assertThat(source.getValue("datafeeds.stopped"), is(nullValue()));
|
||||
}
|
||||
|
||||
private void givenJobs(List<Job> jobs, List<GetJobsStatsAction.Response.JobStats> jobsStats) {
|
||||
|
@ -10,7 +10,6 @@ import org.elasticsearch.client.Client;
|
||||
import org.elasticsearch.cluster.ClusterChangedEvent;
|
||||
import org.elasticsearch.cluster.ClusterName;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.ClusterStateUpdateTask;
|
||||
import org.elasticsearch.cluster.metadata.MetaData;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNodes;
|
||||
@ -22,20 +21,15 @@ import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.xpack.core.ml.MLMetadataField;
|
||||
import org.elasticsearch.xpack.core.ml.MlMetadata;
|
||||
import org.junit.Before;
|
||||
import org.mockito.Mockito;
|
||||
|
||||
import java.net.InetAddress;
|
||||
import java.util.concurrent.ExecutorService;
|
||||
import java.util.concurrent.ScheduledFuture;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
import java.util.concurrent.atomic.AtomicReference;
|
||||
|
||||
import static org.elasticsearch.mock.orig.Mockito.doAnswer;
|
||||
import static org.elasticsearch.mock.orig.Mockito.times;
|
||||
import static org.hamcrest.Matchers.is;
|
||||
import static org.hamcrest.Matchers.nullValue;
|
||||
import static org.mockito.Matchers.any;
|
||||
import static org.mockito.Matchers.eq;
|
||||
import static org.mockito.Mockito.mock;
|
||||
import static org.mockito.Mockito.verify;
|
||||
import static org.mockito.Mockito.when;
|
||||
@ -68,7 +62,7 @@ public class MlInitializationServiceTests extends ESTestCase {
|
||||
when(clusterService.getClusterName()).thenReturn(CLUSTER_NAME);
|
||||
}
|
||||
|
||||
public void testInitialize() throws Exception {
|
||||
public void testInitialize() {
|
||||
MlInitializationService initializationService = new MlInitializationService(Settings.EMPTY, threadPool, clusterService, client);
|
||||
|
||||
ClusterState cs = ClusterState.builder(new ClusterName("_name"))
|
||||
@ -80,11 +74,10 @@ public class MlInitializationServiceTests extends ESTestCase {
|
||||
.build();
|
||||
initializationService.clusterChanged(new ClusterChangedEvent("_source", cs, cs));
|
||||
|
||||
verify(clusterService, times(1)).submitStateUpdateTask(eq("install-ml-metadata"), any());
|
||||
assertThat(initializationService.getDailyMaintenanceService().isStarted(), is(true));
|
||||
}
|
||||
|
||||
public void testInitialize_noMasterNode() throws Exception {
|
||||
public void testInitialize_noMasterNode() {
|
||||
MlInitializationService initializationService = new MlInitializationService(Settings.EMPTY, threadPool, clusterService, client);
|
||||
|
||||
ClusterState cs = ClusterState.builder(new ClusterName("_name"))
|
||||
@ -94,11 +87,10 @@ public class MlInitializationServiceTests extends ESTestCase {
|
||||
.build();
|
||||
initializationService.clusterChanged(new ClusterChangedEvent("_source", cs, cs));
|
||||
|
||||
verify(clusterService, times(0)).submitStateUpdateTask(eq("install-ml-metadata"), any());
|
||||
assertThat(initializationService.getDailyMaintenanceService(), is(nullValue()));
|
||||
}
|
||||
|
||||
public void testInitialize_alreadyInitialized() throws Exception {
|
||||
public void testInitialize_alreadyInitialized() {
|
||||
MlInitializationService initializationService = new MlInitializationService(Settings.EMPTY, threadPool, clusterService, client);
|
||||
|
||||
ClusterState cs = ClusterState.builder(new ClusterName("_name"))
|
||||
@ -113,67 +105,10 @@ public class MlInitializationServiceTests extends ESTestCase {
|
||||
initializationService.setDailyMaintenanceService(initialDailyMaintenanceService);
|
||||
initializationService.clusterChanged(new ClusterChangedEvent("_source", cs, cs));
|
||||
|
||||
verify(clusterService, times(0)).submitStateUpdateTask(eq("install-ml-metadata"), any());
|
||||
assertSame(initialDailyMaintenanceService, initializationService.getDailyMaintenanceService());
|
||||
}
|
||||
|
||||
public void testInitialize_onlyOnce() throws Exception {
|
||||
MlInitializationService initializationService = new MlInitializationService(Settings.EMPTY, threadPool, clusterService, client);
|
||||
|
||||
ClusterState cs = ClusterState.builder(new ClusterName("_name"))
|
||||
.nodes(DiscoveryNodes.builder()
|
||||
.add(new DiscoveryNode("_node_id", new TransportAddress(InetAddress.getLoopbackAddress(), 9200), Version.CURRENT))
|
||||
.localNodeId("_node_id")
|
||||
.masterNodeId("_node_id"))
|
||||
.metaData(MetaData.builder())
|
||||
.build();
|
||||
initializationService.clusterChanged(new ClusterChangedEvent("_source", cs, cs));
|
||||
initializationService.clusterChanged(new ClusterChangedEvent("_source", cs, cs));
|
||||
|
||||
verify(clusterService, times(1)).submitStateUpdateTask(eq("install-ml-metadata"), any());
|
||||
}
|
||||
|
||||
public void testInitialize_reintialiseAfterFailure() throws Exception {
|
||||
MlInitializationService initializationService = new MlInitializationService(Settings.EMPTY, threadPool, clusterService, client);
|
||||
|
||||
// Fail the first cluster state update
|
||||
AtomicBoolean onFailureCalled = new AtomicBoolean(false);
|
||||
Mockito.doAnswer(invocation -> {
|
||||
ClusterStateUpdateTask task = (ClusterStateUpdateTask) invocation.getArguments()[1];
|
||||
task.onFailure("mock a failure", new IllegalStateException());
|
||||
onFailureCalled.set(true);
|
||||
return null;
|
||||
}).when(clusterService).submitStateUpdateTask(eq("install-ml-metadata"), any(ClusterStateUpdateTask.class));
|
||||
|
||||
ClusterState cs = ClusterState.builder(new ClusterName("_name"))
|
||||
.nodes(DiscoveryNodes.builder()
|
||||
.add(new DiscoveryNode("_node_id", new TransportAddress(InetAddress.getLoopbackAddress(), 9200), Version.CURRENT))
|
||||
.localNodeId("_node_id")
|
||||
.masterNodeId("_node_id"))
|
||||
.metaData(MetaData.builder())
|
||||
.build();
|
||||
initializationService.clusterChanged(new ClusterChangedEvent("_source", cs, cs));
|
||||
assertTrue("Something went wrong mocking the cluster update task", onFailureCalled.get());
|
||||
verify(clusterService, times(1)).submitStateUpdateTask(eq("install-ml-metadata"), any(ClusterStateUpdateTask.class));
|
||||
|
||||
// 2nd update succeeds
|
||||
AtomicReference<ClusterState> clusterStateHolder = new AtomicReference<>();
|
||||
Mockito.doAnswer(invocation -> {
|
||||
ClusterStateUpdateTask task = (ClusterStateUpdateTask) invocation.getArguments()[1];
|
||||
clusterStateHolder.set(task.execute(cs));
|
||||
return null;
|
||||
}).when(clusterService).submitStateUpdateTask(eq("install-ml-metadata"), any(ClusterStateUpdateTask.class));
|
||||
|
||||
initializationService.clusterChanged(new ClusterChangedEvent("_source", cs, cs));
|
||||
assertTrue("Something went wrong mocking the sucessful cluster update task", clusterStateHolder.get() != null);
|
||||
verify(clusterService, times(2)).submitStateUpdateTask(eq("install-ml-metadata"), any(ClusterStateUpdateTask.class));
|
||||
|
||||
// 3rd update won't be called as ML Metadata has been installed
|
||||
initializationService.clusterChanged(new ClusterChangedEvent("_source", clusterStateHolder.get(), clusterStateHolder.get()));
|
||||
verify(clusterService, times(2)).submitStateUpdateTask(eq("install-ml-metadata"), any(ClusterStateUpdateTask.class));
|
||||
}
|
||||
|
||||
public void testNodeGoesFromMasterToNonMasterAndBack() throws Exception {
|
||||
public void testNodeGoesFromMasterToNonMasterAndBack() {
|
||||
MlInitializationService initializationService = new MlInitializationService(Settings.EMPTY, threadPool, clusterService, client);
|
||||
MlDailyMaintenanceService initialDailyMaintenanceService = mock(MlDailyMaintenanceService.class);
|
||||
initializationService.setDailyMaintenanceService(initialDailyMaintenanceService);
|
||||
|
@ -251,11 +251,11 @@ public class DatafeedManagerTests extends ESTestCase {
|
||||
}
|
||||
|
||||
public void testDatafeedTaskWaitsUntilJobIsOpened() {
|
||||
PersistentTasksCustomMetaData.Builder tasksBuilder = PersistentTasksCustomMetaData.builder();
|
||||
PersistentTasksCustomMetaData.Builder tasksBuilder = PersistentTasksCustomMetaData.builder();
|
||||
addJobTask("job_id", "node_id", JobState.OPENING, tasksBuilder);
|
||||
ClusterState.Builder cs = ClusterState.builder(clusterService.state())
|
||||
.metaData(new MetaData.Builder().putCustom(MLMetadataField.TYPE, clusterService.state().getMetaData()
|
||||
.custom(MLMetadataField.TYPE)).putCustom(PersistentTasksCustomMetaData.TYPE, tasksBuilder.build()));
|
||||
.metaData(new MetaData.Builder().putCustom(MLMetadataField.TYPE, MlMetadata.getMlMetadata(clusterService.state()))
|
||||
.putCustom(PersistentTasksCustomMetaData.TYPE, tasksBuilder.build()));
|
||||
when(clusterService.state()).thenReturn(cs.build());
|
||||
|
||||
Consumer<Exception> handler = mockConsumer();
|
||||
@ -269,8 +269,8 @@ public class DatafeedManagerTests extends ESTestCase {
|
||||
addJobTask("job_id", "node_id", JobState.OPENING, tasksBuilder);
|
||||
addJobTask("another_job", "node_id", JobState.OPENED, tasksBuilder);
|
||||
ClusterState.Builder anotherJobCs = ClusterState.builder(clusterService.state())
|
||||
.metaData(new MetaData.Builder().putCustom(MLMetadataField.TYPE, clusterService.state().getMetaData()
|
||||
.custom(MLMetadataField.TYPE)).putCustom(PersistentTasksCustomMetaData.TYPE, tasksBuilder.build()));
|
||||
.metaData(new MetaData.Builder().putCustom(MLMetadataField.TYPE, MlMetadata.getMlMetadata(clusterService.state()))
|
||||
.putCustom(PersistentTasksCustomMetaData.TYPE, tasksBuilder.build()));
|
||||
|
||||
capturedClusterStateListener.getValue().clusterChanged(new ClusterChangedEvent("_source", anotherJobCs.build(), cs.build()));
|
||||
|
||||
@ -280,8 +280,8 @@ public class DatafeedManagerTests extends ESTestCase {
|
||||
tasksBuilder = PersistentTasksCustomMetaData.builder();
|
||||
addJobTask("job_id", "node_id", JobState.OPENED, tasksBuilder);
|
||||
ClusterState.Builder jobOpenedCs = ClusterState.builder(clusterService.state())
|
||||
.metaData(new MetaData.Builder().putCustom(MLMetadataField.TYPE, clusterService.state().getMetaData()
|
||||
.custom(MLMetadataField.TYPE)).putCustom(PersistentTasksCustomMetaData.TYPE, tasksBuilder.build()));
|
||||
.metaData(new MetaData.Builder().putCustom(MLMetadataField.TYPE, MlMetadata.getMlMetadata(clusterService.state()))
|
||||
.putCustom(PersistentTasksCustomMetaData.TYPE, tasksBuilder.build()));
|
||||
|
||||
capturedClusterStateListener.getValue().clusterChanged(
|
||||
new ClusterChangedEvent("_source", jobOpenedCs.build(), anotherJobCs.build()));
|
||||
@ -294,8 +294,8 @@ public class DatafeedManagerTests extends ESTestCase {
|
||||
PersistentTasksCustomMetaData.Builder tasksBuilder = PersistentTasksCustomMetaData.builder();
|
||||
addJobTask("job_id", "node_id", JobState.OPENING, tasksBuilder);
|
||||
ClusterState.Builder cs = ClusterState.builder(clusterService.state())
|
||||
.metaData(new MetaData.Builder().putCustom(MLMetadataField.TYPE, clusterService.state().getMetaData()
|
||||
.custom(MLMetadataField.TYPE)).putCustom(PersistentTasksCustomMetaData.TYPE, tasksBuilder.build()));
|
||||
.metaData(new MetaData.Builder().putCustom(MLMetadataField.TYPE, MlMetadata.getMlMetadata(clusterService.state()))
|
||||
.putCustom(PersistentTasksCustomMetaData.TYPE, tasksBuilder.build()));
|
||||
when(clusterService.state()).thenReturn(cs.build());
|
||||
|
||||
Consumer<Exception> handler = mockConsumer();
|
||||
@ -308,8 +308,8 @@ public class DatafeedManagerTests extends ESTestCase {
|
||||
tasksBuilder = PersistentTasksCustomMetaData.builder();
|
||||
addJobTask("job_id", "node_id", JobState.FAILED, tasksBuilder);
|
||||
ClusterState.Builder updatedCs = ClusterState.builder(clusterService.state())
|
||||
.metaData(new MetaData.Builder().putCustom(MLMetadataField.TYPE, clusterService.state().getMetaData()
|
||||
.custom(MLMetadataField.TYPE)).putCustom(PersistentTasksCustomMetaData.TYPE, tasksBuilder.build()));
|
||||
.metaData(new MetaData.Builder().putCustom(MLMetadataField.TYPE, MlMetadata.getMlMetadata(clusterService.state()))
|
||||
.putCustom(PersistentTasksCustomMetaData.TYPE, tasksBuilder.build()));
|
||||
|
||||
capturedClusterStateListener.getValue().clusterChanged(new ClusterChangedEvent("_source", updatedCs.build(), cs.build()));
|
||||
|
||||
@ -322,8 +322,8 @@ public class DatafeedManagerTests extends ESTestCase {
|
||||
PersistentTasksCustomMetaData.Builder tasksBuilder = PersistentTasksCustomMetaData.builder();
|
||||
addJobTask("job_id", "node_id", JobState.OPENING, tasksBuilder);
|
||||
ClusterState.Builder cs = ClusterState.builder(clusterService.state())
|
||||
.metaData(new MetaData.Builder().putCustom(MLMetadataField.TYPE, clusterService.state().getMetaData()
|
||||
.custom(MLMetadataField.TYPE)).putCustom(PersistentTasksCustomMetaData.TYPE, tasksBuilder.build()));
|
||||
.metaData(new MetaData.Builder().putCustom(MLMetadataField.TYPE, MlMetadata.getMlMetadata(clusterService.state()))
|
||||
.putCustom(PersistentTasksCustomMetaData.TYPE, tasksBuilder.build()));
|
||||
when(clusterService.state()).thenReturn(cs.build());
|
||||
|
||||
Consumer<Exception> handler = mockConsumer();
|
||||
@ -340,8 +340,8 @@ public class DatafeedManagerTests extends ESTestCase {
|
||||
tasksBuilder = PersistentTasksCustomMetaData.builder();
|
||||
addJobTask("job_id", "node_id", JobState.OPENED, tasksBuilder);
|
||||
ClusterState.Builder updatedCs = ClusterState.builder(clusterService.state())
|
||||
.metaData(new MetaData.Builder().putCustom(MLMetadataField.TYPE, clusterService.state().getMetaData()
|
||||
.custom(MLMetadataField.TYPE)).putCustom(PersistentTasksCustomMetaData.TYPE, tasksBuilder.build()));
|
||||
.metaData(new MetaData.Builder().putCustom(MLMetadataField.TYPE, MlMetadata.getMlMetadata(clusterService.state()))
|
||||
.putCustom(PersistentTasksCustomMetaData.TYPE, tasksBuilder.build()));
|
||||
|
||||
capturedClusterStateListener.getValue().clusterChanged(new ClusterChangedEvent("_source", cs.build(), updatedCs.build()));
|
||||
|
||||
|
@ -103,7 +103,7 @@ public class DeleteJobIT extends BaseMlIntegTestCase {
|
||||
}
|
||||
|
||||
private ClusterState markJobAsDeleted(String jobId, ClusterState currentState) {
|
||||
MlMetadata mlMetadata = currentState.metaData().custom(MLMetadataField.TYPE);
|
||||
MlMetadata mlMetadata = MlMetadata.getMlMetadata(currentState);
|
||||
assertNotNull(mlMetadata);
|
||||
|
||||
MlMetadata.Builder builder = new MlMetadata.Builder(mlMetadata);
|
||||
@ -116,7 +116,7 @@ public class DeleteJobIT extends BaseMlIntegTestCase {
|
||||
}
|
||||
|
||||
private ClusterState removeJobFromClusterState(String jobId, ClusterState currentState) {
|
||||
MlMetadata.Builder builder = new MlMetadata.Builder(currentState.metaData().custom(MLMetadataField.TYPE));
|
||||
MlMetadata.Builder builder = new MlMetadata.Builder(MlMetadata.getMlMetadata(currentState));
|
||||
builder.deleteJob(jobId, currentState.getMetaData().custom(PersistentTasksCustomMetaData.TYPE));
|
||||
|
||||
ClusterState.Builder newState = ClusterState.builder(currentState);
|
||||
|
@ -326,7 +326,7 @@ public class JobManagerTests extends ESTestCase {
|
||||
|
||||
private ClusterState createClusterState() {
|
||||
ClusterState.Builder builder = ClusterState.builder(new ClusterName("_name"));
|
||||
builder.metaData(MetaData.builder().putCustom(MLMetadataField.TYPE, MlMetadata.EMPTY_METADATA));
|
||||
builder.metaData(MetaData.builder());
|
||||
return builder.build();
|
||||
}
|
||||
}
|
||||
|
@ -39,8 +39,6 @@ import org.elasticsearch.search.SearchHit;
|
||||
import org.elasticsearch.search.SearchHits;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.xpack.core.ml.MLMetadataField;
|
||||
import org.elasticsearch.xpack.core.ml.MlMetadata;
|
||||
import org.elasticsearch.xpack.core.ml.action.util.QueryPage;
|
||||
import org.elasticsearch.xpack.core.ml.job.config.Job;
|
||||
import org.elasticsearch.xpack.core.ml.job.persistence.AnomalyDetectorsIndex;
|
||||
@ -93,7 +91,7 @@ public class JobProviderTests extends ESTestCase {
|
||||
AtomicReference<Boolean> resultHolder = new AtomicReference<>();
|
||||
|
||||
ClusterState cs = ClusterState.builder(new ClusterName("_name"))
|
||||
.metaData(MetaData.builder().putCustom(MLMetadataField.TYPE, MlMetadata.EMPTY_METADATA).indices(ImmutableOpenMap.of()))
|
||||
.metaData(MetaData.builder().indices(ImmutableOpenMap.of()))
|
||||
.build();
|
||||
|
||||
ClusterService clusterService = mock(ClusterService.class);
|
||||
@ -157,7 +155,7 @@ public class JobProviderTests extends ESTestCase {
|
||||
.fPut(AnomalyDetectorsIndex.jobResultsAliasedName("foo"), indexMetaData).build();
|
||||
|
||||
ClusterState cs2 = ClusterState.builder(new ClusterName("_name"))
|
||||
.metaData(MetaData.builder().putCustom(MLMetadataField.TYPE, MlMetadata.EMPTY_METADATA).indices(indexMap)).build();
|
||||
.metaData(MetaData.builder().indices(indexMap)).build();
|
||||
|
||||
ClusterService clusterService = mock(ClusterService.class);
|
||||
|
||||
@ -209,7 +207,7 @@ public class JobProviderTests extends ESTestCase {
|
||||
ImmutableOpenMap<String, IndexMetaData> indexMap = ImmutableOpenMap.<String, IndexMetaData>builder().build();
|
||||
|
||||
ClusterState cs = ClusterState.builder(new ClusterName("_name"))
|
||||
.metaData(MetaData.builder().putCustom(MLMetadataField.TYPE, MlMetadata.EMPTY_METADATA).indices(indexMap)).build();
|
||||
.metaData(MetaData.builder().indices(indexMap)).build();
|
||||
|
||||
ClusterService clusterService = mock(ClusterService.class);
|
||||
|
||||
|
@ -28,7 +28,6 @@ import org.elasticsearch.test.MockHttpTransport;
|
||||
import org.elasticsearch.test.discovery.TestZenDiscovery;
|
||||
import org.elasticsearch.xpack.core.XPackSettings;
|
||||
import org.elasticsearch.xpack.ml.LocalStateMachineLearning;
|
||||
import org.elasticsearch.xpack.core.ml.MLMetadataField;
|
||||
import org.elasticsearch.xpack.ml.MachineLearning;
|
||||
import org.elasticsearch.xpack.core.ml.MachineLearningField;
|
||||
import org.elasticsearch.xpack.core.ml.MlMetadata;
|
||||
@ -272,8 +271,7 @@ public abstract class BaseMlIntegTestCase extends ESIntegTestCase {
|
||||
}
|
||||
|
||||
public static void deleteAllDatafeeds(Logger logger, Client client) throws Exception {
|
||||
MetaData metaData = client.admin().cluster().prepareState().get().getState().getMetaData();
|
||||
MlMetadata mlMetadata = metaData.custom(MLMetadataField.TYPE);
|
||||
MlMetadata mlMetadata = MlMetadata.getMlMetadata(client.admin().cluster().prepareState().get().getState());
|
||||
try {
|
||||
logger.info("Closing all datafeeds (using _all)");
|
||||
StopDatafeedAction.Response stopResponse = client
|
||||
@ -312,8 +310,7 @@ public abstract class BaseMlIntegTestCase extends ESIntegTestCase {
|
||||
}
|
||||
|
||||
public static void deleteAllJobs(Logger logger, Client client) throws Exception {
|
||||
MetaData metaData = client.admin().cluster().prepareState().get().getState().getMetaData();
|
||||
MlMetadata mlMetadata = metaData.custom(MLMetadataField.TYPE);
|
||||
MlMetadata mlMetadata = MlMetadata.getMlMetadata(client.admin().cluster().prepareState().get().getState());
|
||||
|
||||
try {
|
||||
CloseJobAction.Request closeRequest = new CloseJobAction.Request(MetaData.ALL);
|
||||
|
@ -4,17 +4,7 @@
|
||||
# or more contributor license agreements. Licensed under the Elastic License;
|
||||
# you may not use this file except in compliance with the Elastic License.
|
||||
|
||||
source "`dirname "$0"`"/elasticsearch-env
|
||||
|
||||
source "`dirname "$0"`"/x-pack-security-env
|
||||
|
||||
exec \
|
||||
"$JAVA" \
|
||||
$ES_JAVA_OPTS \
|
||||
-Des.path.home="$ES_HOME" \
|
||||
-Des.path.conf="$ES_PATH_CONF" \
|
||||
-Des.distribution.flavor="$ES_DISTRIBUTION_FLAVOR" \
|
||||
-Des.distribution.type="$ES_DISTRIBUTION_TYPE" \
|
||||
-cp "$ES_CLASSPATH" \
|
||||
ES_ADDITIONAL_SOURCES="x-pack-env;x-pack-security-env" \
|
||||
"`dirname "$0"`"/elasticsearch-cli \
|
||||
org.elasticsearch.xpack.core.ssl.CertificateGenerateTool \
|
||||
"$@"
|
||||
|
@ -4,17 +4,7 @@
|
||||
# or more contributor license agreements. Licensed under the Elastic License;
|
||||
# you may not use this file except in compliance with the Elastic License.
|
||||
|
||||
source "`dirname "$0"`"/elasticsearch-env
|
||||
|
||||
source "`dirname "$0"`"/x-pack-security-env
|
||||
|
||||
exec \
|
||||
"$JAVA" \
|
||||
$ES_JAVA_OPTS \
|
||||
-Des.path.home="$ES_HOME" \
|
||||
-Des.path.conf="$ES_PATH_CONF" \
|
||||
-Des.distribution.flavor="$ES_DISTRIBUTION_FLAVOR" \
|
||||
-Des.distribution.type="$ES_DISTRIBUTION_TYPE" \
|
||||
-cp "$ES_CLASSPATH" \
|
||||
ES_ADDITIONAL_SOURCES="x-pack-env;x-pack-security-env" \
|
||||
"`dirname "$0"`"/elasticsearch-cli \
|
||||
org.elasticsearch.xpack.core.ssl.CertificateTool \
|
||||
"$@"
|
||||
|
@ -4,17 +4,7 @@
|
||||
# or more contributor license agreements. Licensed under the Elastic License;
|
||||
# you may not use this file except in compliance with the Elastic License.
|
||||
|
||||
source "`dirname "$0"`"/elasticsearch-env
|
||||
|
||||
source "`dirname "$0"`"/x-pack-security-env
|
||||
|
||||
exec \
|
||||
"$JAVA" \
|
||||
$ES_JAVA_OPTS \
|
||||
-Des.path.home="$ES_HOME" \
|
||||
-Des.path.conf="$ES_PATH_CONF" \
|
||||
-Des.distribution.flavor="$ES_DISTRIBUTION_FLAVOR" \
|
||||
-Des.distribution.type="$ES_DISTRIBUTION_TYPE" \
|
||||
-cp "$ES_CLASSPATH" \
|
||||
ES_ADDITIONAL_SOURCES="x-pack-env;x-pack-security-env" \
|
||||
"`dirname "$0"`"/elasticsearch-cli \
|
||||
org.elasticsearch.xpack.security.authc.esnative.ESNativeRealmMigrateTool \
|
||||
"$@"
|
||||
"$@"
|
@ -4,17 +4,7 @@
|
||||
# or more contributor license agreements. Licensed under the Elastic License;
|
||||
# you may not use this file except in compliance with the Elastic License.
|
||||
|
||||
source "`dirname "$0"`"/elasticsearch-env
|
||||
|
||||
source "`dirname "$0"`"/x-pack-security-env
|
||||
|
||||
exec \
|
||||
"$JAVA" \
|
||||
$ES_JAVA_OPTS \
|
||||
-Des.path.home="$ES_HOME" \
|
||||
-Des.path.conf="$ES_PATH_CONF" \
|
||||
-Des.distribution.flavor="$ES_DISTRIBUTION_FLAVOR" \
|
||||
-Des.distribution.type="$ES_DISTRIBUTION_TYPE" \
|
||||
-cp "$ES_CLASSPATH" \
|
||||
ES_ADDITIONAL_SOURCES="x-pack-env;x-pack-security-env" \
|
||||
"`dirname "$0"`"/elasticsearch-cli \
|
||||
org.elasticsearch.xpack.security.authc.saml.SamlMetadataCommand \
|
||||
"$@"
|
||||
|
@ -4,17 +4,7 @@
|
||||
# or more contributor license agreements. Licensed under the Elastic License;
|
||||
# you may not use this file except in compliance with the Elastic License.
|
||||
|
||||
source "`dirname "$0"`"/elasticsearch-env
|
||||
|
||||
source "`dirname "$0"`"/x-pack-security-env
|
||||
|
||||
exec \
|
||||
"$JAVA" \
|
||||
$ES_JAVA_OPTS \
|
||||
-Des.path.home="$ES_HOME" \
|
||||
-Des.path.conf="$ES_PATH_CONF" \
|
||||
-Des.distribution.flavor="$ES_DISTRIBUTION_FLAVOR" \
|
||||
-Des.distribution.type="$ES_DISTRIBUTION_TYPE" \
|
||||
-cp "$ES_CLASSPATH" \
|
||||
ES_ADDITIONAL_SOURCES="x-pack-env;x-pack-security-env" \
|
||||
"`dirname "$0"`"/elasticsearch-cli \
|
||||
org.elasticsearch.xpack.security.authc.esnative.tool.SetupPasswordTool \
|
||||
"$@"
|
||||
|
@ -4,17 +4,7 @@
|
||||
# or more contributor license agreements. Licensed under the Elastic License;
|
||||
# you may not use this file except in compliance with the Elastic License.
|
||||
|
||||
source "`dirname "$0"`"/elasticsearch-env
|
||||
|
||||
source "`dirname "$0"`"/x-pack-security-env
|
||||
|
||||
exec \
|
||||
"$JAVA" \
|
||||
$ES_JAVA_OPTS \
|
||||
-Des.path.home="$ES_HOME" \
|
||||
-Des.path.conf="$ES_PATH_CONF" \
|
||||
-Des.distribution.flavor="$ES_DISTRIBUTION_FLAVOR" \
|
||||
-Des.distribution.type="$ES_DISTRIBUTION_TYPE" \
|
||||
-cp "$ES_CLASSPATH" \
|
||||
ES_ADDITIONAL_SOURCES="x-pack-env;x-pack-security-env" \
|
||||
"`dirname "$0"`"/elasticsearch-cli \
|
||||
org.elasticsearch.xpack.security.crypto.tool.SystemKeyTool \
|
||||
"$@"
|
||||
|
@ -4,17 +4,7 @@
|
||||
# or more contributor license agreements. Licensed under the Elastic License;
|
||||
# you may not use this file except in compliance with the Elastic License.
|
||||
|
||||
source "`dirname "$0"`"/elasticsearch-env
|
||||
|
||||
source "`dirname "$0"`"/x-pack-security-env
|
||||
|
||||
exec \
|
||||
"$JAVA" \
|
||||
$ES_JAVA_OPTS \
|
||||
-Des.path.home="$ES_HOME" \
|
||||
-Des.path.conf="$ES_PATH_CONF" \
|
||||
-Des.distribution.flavor="$ES_DISTRIBUTION_FLAVOR" \
|
||||
-Des.distribution.type="$ES_DISTRIBUTION_TYPE" \
|
||||
-cp "$ES_CLASSPATH" \
|
||||
ES_ADDITIONAL_SOURCES="x-pack-env;x-pack-security-env" \
|
||||
"`dirname "$0"`"/elasticsearch-cli \
|
||||
org.elasticsearch.xpack.security.authc.file.tool.UsersTool \
|
||||
"$@"
|
||||
|
@ -4,7 +4,5 @@
|
||||
# or more contributor license agreements. Licensed under the Elastic License;
|
||||
# you may not use this file except in compliance with the Elastic License.
|
||||
|
||||
source "`dirname "$0"`"/x-pack-env
|
||||
|
||||
# include x-pack-security jars in classpath
|
||||
ES_CLASSPATH="$ES_CLASSPATH:$ES_HOME/modules/x-pack-security/*"
|
||||
|
18
x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/SourceGenerator.java
18
x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/SourceGenerator.java
@ -5,8 +5,6 @@
|
||||
*/
|
||||
package org.elasticsearch.xpack.sql.execution.search;
|
||||
|
||||
import org.elasticsearch.index.query.BoolQueryBuilder;
|
||||
import org.elasticsearch.index.query.ConstantScoreQueryBuilder;
|
||||
import org.elasticsearch.index.query.QueryBuilder;
|
||||
import org.elasticsearch.search.aggregations.AggregationBuilder;
|
||||
import org.elasticsearch.search.aggregations.bucket.composite.CompositeAggregationBuilder;
|
||||
@ -28,6 +26,7 @@ import org.elasticsearch.xpack.sql.querydsl.container.Sort;
|
||||
import java.util.List;
|
||||
|
||||
import static java.util.Collections.singletonList;
|
||||
import static org.elasticsearch.index.query.QueryBuilders.boolQuery;
|
||||
import static org.elasticsearch.search.sort.SortBuilders.fieldSort;
|
||||
import static org.elasticsearch.search.sort.SortBuilders.scoreSort;
|
||||
import static org.elasticsearch.search.sort.SortBuilders.scriptSort;
|
||||
@ -37,20 +36,23 @@ public abstract class SourceGenerator {
|
||||
private static final List<String> NO_STORED_FIELD = singletonList(StoredFieldsContext._NONE_);
|
||||
|
||||
public static SearchSourceBuilder sourceBuilder(QueryContainer container, QueryBuilder filter, Integer size) {
|
||||
final SearchSourceBuilder source = new SearchSourceBuilder();
|
||||
QueryBuilder finalQuery = null;
|
||||
// add the source
|
||||
if (container.query() == null) {
|
||||
if (container.query() != null) {
|
||||
if (filter != null) {
|
||||
source.query(new ConstantScoreQueryBuilder(filter));
|
||||
finalQuery = boolQuery().must(container.query().asBuilder()).filter(filter);
|
||||
} else {
|
||||
finalQuery = container.query().asBuilder();
|
||||
}
|
||||
} else {
|
||||
if (filter != null) {
|
||||
source.query(new BoolQueryBuilder().must(container.query().asBuilder()).filter(filter));
|
||||
} else {
|
||||
source.query(container.query().asBuilder());
|
||||
finalQuery = boolQuery().filter(filter);
|
||||
}
|
||||
}
|
||||
|
||||
final SearchSourceBuilder source = new SearchSourceBuilder();
|
||||
source.query(finalQuery);
|
||||
|
||||
SqlSourceBuilder sortBuilder = new SqlSourceBuilder();
|
||||
// Iterate through all the columns requested, collecting the fields that
|
||||
// need to be retrieved from the result documents
|
||||
|
@ -5,13 +5,13 @@
|
||||
*/
|
||||
package org.elasticsearch.xpack.sql.querydsl.query;
|
||||
|
||||
import java.util.Objects;
|
||||
|
||||
import org.elasticsearch.index.query.BoolQueryBuilder;
|
||||
import org.elasticsearch.index.query.QueryBuilder;
|
||||
import org.elasticsearch.search.sort.NestedSortBuilder;
|
||||
import org.elasticsearch.xpack.sql.tree.Location;
|
||||
|
||||
import java.util.Objects;
|
||||
|
||||
import static org.elasticsearch.index.query.QueryBuilders.boolQuery;
|
||||
|
||||
/**
|
||||
@ -63,9 +63,8 @@ public class BoolQuery extends Query {
|
||||
public QueryBuilder asBuilder() {
|
||||
BoolQueryBuilder boolQuery = boolQuery();
|
||||
if (isAnd) {
|
||||
// TODO are we throwing out score by using filter?
|
||||
boolQuery.filter(left.asBuilder());
|
||||
boolQuery.filter(right.asBuilder());
|
||||
boolQuery.must(left.asBuilder());
|
||||
boolQuery.must(right.asBuilder());
|
||||
} else {
|
||||
boolQuery.should(left.asBuilder());
|
||||
boolQuery.should(right.asBuilder());
|
||||
|
@ -5,9 +5,6 @@
|
||||
*/
|
||||
package org.elasticsearch.xpack.sql.execution.search;
|
||||
|
||||
import org.elasticsearch.index.query.BoolQueryBuilder;
|
||||
import org.elasticsearch.index.query.ConstantScoreQueryBuilder;
|
||||
import org.elasticsearch.index.query.MatchQueryBuilder;
|
||||
import org.elasticsearch.index.query.Operator;
|
||||
import org.elasticsearch.index.query.QueryBuilder;
|
||||
import org.elasticsearch.search.aggregations.AggregatorFactories.Builder;
|
||||
@ -28,6 +25,8 @@ import org.elasticsearch.xpack.sql.tree.Location;
|
||||
import org.elasticsearch.xpack.sql.type.KeywordEsField;
|
||||
|
||||
import static java.util.Collections.singletonList;
|
||||
import static org.elasticsearch.index.query.QueryBuilders.boolQuery;
|
||||
import static org.elasticsearch.index.query.QueryBuilders.matchQuery;
|
||||
import static org.elasticsearch.search.sort.SortBuilders.fieldSort;
|
||||
import static org.elasticsearch.search.sort.SortBuilders.scoreSort;
|
||||
|
||||
@ -42,22 +41,22 @@ public class SourceGeneratorTests extends ESTestCase {
|
||||
public void testQueryNoFilter() {
|
||||
QueryContainer container = new QueryContainer().with(new MatchQuery(Location.EMPTY, "foo", "bar"));
|
||||
SearchSourceBuilder sourceBuilder = SourceGenerator.sourceBuilder(container, null, randomIntBetween(1, 10));
|
||||
assertEquals(new MatchQueryBuilder("foo", "bar").operator(Operator.OR), sourceBuilder.query());
|
||||
assertEquals(matchQuery("foo", "bar").operator(Operator.OR), sourceBuilder.query());
|
||||
}
|
||||
|
||||
public void testNoQueryFilter() {
|
||||
QueryContainer container = new QueryContainer();
|
||||
QueryBuilder filter = new MatchQueryBuilder("bar", "baz");
|
||||
QueryBuilder filter = matchQuery("bar", "baz");
|
||||
SearchSourceBuilder sourceBuilder = SourceGenerator.sourceBuilder(container, filter, randomIntBetween(1, 10));
|
||||
assertEquals(new ConstantScoreQueryBuilder(new MatchQueryBuilder("bar", "baz")), sourceBuilder.query());
|
||||
assertEquals(boolQuery().filter(matchQuery("bar", "baz")), sourceBuilder.query());
|
||||
}
|
||||
|
||||
public void testQueryFilter() {
|
||||
QueryContainer container = new QueryContainer().with(new MatchQuery(Location.EMPTY, "foo", "bar"));
|
||||
QueryBuilder filter = new MatchQueryBuilder("bar", "baz");
|
||||
QueryBuilder filter = matchQuery("bar", "baz");
|
||||
SearchSourceBuilder sourceBuilder = SourceGenerator.sourceBuilder(container, filter, randomIntBetween(1, 10));
|
||||
assertEquals(new BoolQueryBuilder().must(new MatchQueryBuilder("foo", "bar").operator(Operator.OR))
|
||||
.filter(new MatchQueryBuilder("bar", "baz")), sourceBuilder.query());
|
||||
assertEquals(boolQuery().must(matchQuery("foo", "bar").operator(Operator.OR)).filter(matchQuery("bar", "baz")),
|
||||
sourceBuilder.query());
|
||||
}
|
||||
|
||||
public void testLimit() {
|
||||
|
@ -4,16 +4,7 @@
|
||||
# or more contributor license agreements. Licensed under the Elastic License;
|
||||
# you may not use this file except in compliance with the Elastic License.
|
||||
|
||||
source "`dirname "$0"`"/elasticsearch-env
|
||||
|
||||
source "`dirname "$0"`"/x-pack-watcher-env
|
||||
|
||||
exec \
|
||||
"$JAVA" \
|
||||
$ES_JAVA_OPTS \
|
||||
-Des.path.home="$ES_HOME" \
|
||||
-Des.path.conf="$ES_PATH_CONF" \
|
||||
-Des.distribution.flavor="$ES_DISTRIBUTION_FLAVOR" \
|
||||
-cp "$ES_CLASSPATH" \
|
||||
ES_ADDITIONAL_SOURCES="x-pack-env;x-pack-watcher-env" \
|
||||
"`dirname "$0"`"/elasticsearch-cli \
|
||||
org.elasticsearch.xpack.watcher.trigger.schedule.tool.CronEvalTool \
|
||||
"$@"
|
||||
|
@ -4,7 +4,5 @@
|
||||
# or more contributor license agreements. Licensed under the Elastic License;
|
||||
# you may not use this file except in compliance with the Elastic License.
|
||||
|
||||
source "`dirname "$0"`"/x-pack-env
|
||||
|
||||
# include x-pack-security jars in classpath
|
||||
ES_CLASSPATH="$ES_CLASSPATH:$ES_HOME/modules/x-pack-watcher/*"
|
||||
|
@ -16,7 +16,6 @@ import org.elasticsearch.client.Request;
|
||||
import org.elasticsearch.client.Response;
|
||||
import org.elasticsearch.client.ResponseException;
|
||||
import org.elasticsearch.common.CheckedSupplier;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.collect.Tuple;
|
||||
import org.elasticsearch.common.io.Streams;
|
||||
import org.elasticsearch.common.xcontent.XContentHelper;
|
||||
@ -33,12 +32,11 @@ import java.nio.charset.StandardCharsets;
|
||||
import java.sql.JDBCType;
|
||||
import java.util.Arrays;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Locale;
|
||||
import java.util.Map;
|
||||
import java.util.TreeMap;
|
||||
|
||||
import static java.util.Collections.emptyList;
|
||||
import static java.util.Collections.emptyMap;
|
||||
import static java.util.Collections.singletonList;
|
||||
import static java.util.Collections.singletonMap;
|
||||
import static java.util.Collections.unmodifiableMap;
|
||||
@ -396,19 +394,23 @@ public abstract class RestSqlTestCase extends ESRestTestCase implements ErrorsTe
|
||||
assertNotNull(query);
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
Map<String, Object> constantScore = (Map<String, Object>) query.get("constant_score");
|
||||
assertNotNull(constantScore);
|
||||
Map<String, Object> bool = (Map<String, Object>) query.get("bool");
|
||||
assertNotNull(bool);
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
Map<String, Object> filter = (Map<String, Object>) constantScore.get("filter");
|
||||
List<Object> filter = (List<Object>) bool.get("filter");
|
||||
assertNotNull(filter);
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
Map<String, Object> match = (Map<String, Object>) filter.get("match");
|
||||
assertNotNull(match);
|
||||
Map<String, Object> map = (Map<String, Object>) filter.get(0);
|
||||
assertNotNull(map);
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
Map<String, Object> matchQuery = (Map<String, Object>) match.get("test");
|
||||
Map<String, Object> matchQ = (Map<String, Object>) map.get("match");
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
Map<String, Object> matchQuery = (Map<String, Object>) matchQ.get("test");
|
||||
|
||||
assertNotNull(matchQuery);
|
||||
assertEquals("foo", matchQuery.get("query"));
|
||||
}
|
||||
|
Loading…
x
Reference in New Issue
Block a user