Merge remote-tracking branch 'es/7.x' into enrich-7.x
This commit is contained in:
commit
102016d571
|
@ -7,4 +7,4 @@
|
|||
ES_BUILD_JAVA=openjdk12
|
||||
ES_RUNTIME_JAVA=java8
|
||||
GRADLE_TASK=build
|
||||
GRADLE_EXTRA_ARGS=--no-parallel
|
||||
GRADLE_EXTRA_ARGS=
|
||||
|
|
|
@ -129,7 +129,6 @@ class BuildPlugin implements Plugin<Project> {
|
|||
|
||||
project.getTasks().register("buildResources", ExportElasticsearchBuildResourcesTask)
|
||||
|
||||
setupSeed(project)
|
||||
configureRepositories(project)
|
||||
project.extensions.getByType(ExtraPropertiesExtension).set('versions', VersionProperties.versions)
|
||||
configureInputNormalization(project)
|
||||
|
@ -963,32 +962,6 @@ class BuildPlugin implements Plugin<Project> {
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Pins the test seed at configuration time so it isn't different on every
|
||||
* {@link Test} execution. This is useful if random
|
||||
* decisions in one run of {@linkplain Test} influence the
|
||||
* outcome of subsequent runs. Pinning the seed up front like this makes
|
||||
* the reproduction line from one run be useful on another run.
|
||||
*/
|
||||
static String setupSeed(Project project) {
|
||||
ExtraPropertiesExtension ext = project.rootProject.extensions.getByType(ExtraPropertiesExtension)
|
||||
if (ext.has('testSeed')) {
|
||||
/* Skip this if we've already pinned the testSeed. It is important
|
||||
* that this checks the rootProject so that we know we've only ever
|
||||
* initialized one time. */
|
||||
return ext.get('testSeed')
|
||||
}
|
||||
|
||||
String testSeed = System.getProperty('tests.seed')
|
||||
if (testSeed == null) {
|
||||
long seed = new Random(System.currentTimeMillis()).nextLong()
|
||||
testSeed = Long.toUnsignedString(seed, 16).toUpperCase(Locale.ROOT)
|
||||
}
|
||||
|
||||
ext.set('testSeed', testSeed)
|
||||
return testSeed
|
||||
}
|
||||
|
||||
private static class TestFailureReportingPlugin implements Plugin<Project> {
|
||||
@Override
|
||||
void apply(Project project) {
|
||||
|
|
|
@ -23,6 +23,7 @@ import groovy.transform.PackageScope
|
|||
import org.elasticsearch.gradle.doc.SnippetsTask.Snippet
|
||||
import org.gradle.api.InvalidUserDataException
|
||||
import org.gradle.api.tasks.Input
|
||||
import org.gradle.api.tasks.Internal
|
||||
import org.gradle.api.tasks.OutputDirectory
|
||||
|
||||
import java.nio.file.Files
|
||||
|
@ -58,6 +59,9 @@ class RestTestsFromSnippetsTask extends SnippetsTask {
|
|||
@OutputDirectory
|
||||
File testRoot = project.file('build/rest')
|
||||
|
||||
@Internal
|
||||
Set<String> names = new HashSet<>()
|
||||
|
||||
RestTestsFromSnippetsTask() {
|
||||
project.afterEvaluate {
|
||||
// Wait to set this so testRoot can be customized
|
||||
|
|
|
@ -163,7 +163,7 @@ class PluginBuildPlugin implements Plugin<Project> {
|
|||
|
||||
private static void configureDependencies(Project project) {
|
||||
project.dependencies {
|
||||
if (ClasspathUtils.isElasticsearchProject()) {
|
||||
if (ClasspathUtils.isElasticsearchProject(project)) {
|
||||
compileOnly project.project(':server')
|
||||
testCompile project.project(':test:framework')
|
||||
} else {
|
||||
|
|
|
@ -47,7 +47,7 @@ class PrecommitTasks {
|
|||
}
|
||||
|
||||
Configuration jarHellConfig = project.configurations.create("jarHell")
|
||||
if (ClasspathUtils.isElasticsearchProject() && project.path.equals(":libs:elasticsearch-core") == false) {
|
||||
if (ClasspathUtils.isElasticsearchProject(project) && project.path.equals(":libs:elasticsearch-core") == false) {
|
||||
// External plugins will depend on this already via transitive dependencies.
|
||||
// Internal projects are not all plugins, so make sure the check is available
|
||||
// we are not doing this for this project itself to avoid jar hell with itself
|
||||
|
@ -252,7 +252,7 @@ class PrecommitTasks {
|
|||
}
|
||||
|
||||
private static TaskProvider configureLoggerUsage(Project project) {
|
||||
Object dependency = ClasspathUtils.isElasticsearchProject() ? project.project(':test:logger-usage') :
|
||||
Object dependency = ClasspathUtils.isElasticsearchProject(project) ? project.project(':test:logger-usage') :
|
||||
"org.elasticsearch.test:logger-usage:${VersionProperties.elasticsearch}"
|
||||
|
||||
project.configurations.create('loggerUsagePlugin')
|
||||
|
|
|
@ -21,7 +21,6 @@ package org.elasticsearch.gradle.test
|
|||
import org.elasticsearch.gradle.VersionProperties
|
||||
import org.elasticsearch.gradle.testclusters.ElasticsearchCluster
|
||||
import org.elasticsearch.gradle.testclusters.RestTestRunnerTask
|
||||
import org.elasticsearch.gradle.testclusters.TestClustersPlugin
|
||||
import org.elasticsearch.gradle.tool.Boilerplate
|
||||
import org.elasticsearch.gradle.tool.ClasspathUtils
|
||||
import org.gradle.api.DefaultTask
|
||||
|
@ -121,7 +120,7 @@ class RestIntegTestTask extends DefaultTask {
|
|||
Boilerplate.maybeCreate(project.configurations, 'restSpec') {
|
||||
project.dependencies.add(
|
||||
'restSpec',
|
||||
ClasspathUtils.isElasticsearchProject() ? project.project(':rest-api-spec') :
|
||||
ClasspathUtils.isElasticsearchProject(project) ? project.project(':rest-api-spec') :
|
||||
"org.elasticsearch:rest-api-spec:${VersionProperties.elasticsearch}"
|
||||
)
|
||||
}
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
|
||||
package org.elasticsearch.gradle;
|
||||
|
||||
import org.elasticsearch.gradle.info.GlobalBuildInfoPlugin;
|
||||
import org.gradle.api.Plugin;
|
||||
import org.gradle.api.Project;
|
||||
|
||||
|
@ -35,10 +36,12 @@ public class ReaperPlugin implements Plugin<Project> {
|
|||
throw new IllegalArgumentException("ReaperPlugin can only be applied to the root project of a build");
|
||||
}
|
||||
|
||||
project.getPlugins().apply(GlobalBuildInfoPlugin.class);
|
||||
|
||||
Path inputDir = project.getRootDir().toPath().resolve(".gradle")
|
||||
.resolve("reaper").resolve("build-" + ProcessHandle.current().pid());
|
||||
ReaperService service = project.getExtensions().create("reaper", ReaperService.class,
|
||||
project.getLogger(), project.getBuildDir().toPath(), inputDir);
|
||||
project, project.getBuildDir().toPath(), inputDir);
|
||||
|
||||
project.getGradle().buildFinished(result -> service.shutdown());
|
||||
}
|
||||
|
|
|
@ -21,6 +21,7 @@ package org.elasticsearch.gradle;
|
|||
|
||||
import org.elasticsearch.gradle.tool.ClasspathUtils;
|
||||
import org.gradle.api.GradleException;
|
||||
import org.gradle.api.Project;
|
||||
import org.gradle.api.logging.Logger;
|
||||
import org.gradle.internal.jvm.Jvm;
|
||||
|
||||
|
@ -40,14 +41,16 @@ public class ReaperService {
|
|||
|
||||
private static final String REAPER_CLASS = "org/elasticsearch/gradle/reaper/Reaper.class";
|
||||
private static final Pattern REAPER_JAR_PATH_PATTERN = Pattern.compile("file:(.*)!/" + REAPER_CLASS);
|
||||
private Logger logger;
|
||||
private Path buildDir;
|
||||
private Path inputDir;
|
||||
private Path logFile;
|
||||
private final Logger logger;
|
||||
private final boolean isInternal;
|
||||
private final Path buildDir;
|
||||
private final Path inputDir;
|
||||
private final Path logFile;
|
||||
private volatile Process reaperProcess;
|
||||
|
||||
public ReaperService(Logger logger, Path buildDir, Path inputDir) {
|
||||
this.logger = logger;
|
||||
public ReaperService(Project project, Path buildDir, Path inputDir) {
|
||||
this.logger = project.getLogger();
|
||||
this.isInternal = ClasspathUtils.isElasticsearchProject(project);
|
||||
this.buildDir = buildDir;
|
||||
this.inputDir = inputDir;
|
||||
this.logFile = inputDir.resolve("reaper.log");
|
||||
|
@ -137,7 +140,7 @@ public class ReaperService {
|
|||
}
|
||||
|
||||
private Path locateReaperJar() {
|
||||
if (ClasspathUtils.isElasticsearchProject()) {
|
||||
if (isInternal) {
|
||||
// when running inside the Elasticsearch build just pull find the jar in the runtime classpath
|
||||
URL main = this.getClass().getClassLoader().getResource(REAPER_CLASS);
|
||||
String mainPath = main.getFile();
|
||||
|
|
|
@ -25,7 +25,9 @@ import java.util.ArrayList;
|
|||
import java.util.Arrays;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Locale;
|
||||
import java.util.Map;
|
||||
import java.util.Random;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
public class GlobalBuildInfoPlugin implements Plugin<Project> {
|
||||
|
@ -46,6 +48,15 @@ public class GlobalBuildInfoPlugin implements Plugin<Project> {
|
|||
File compilerJavaHome = findCompilerJavaHome();
|
||||
File runtimeJavaHome = findRuntimeJavaHome(compilerJavaHome);
|
||||
|
||||
String testSeedProperty = System.getProperty("tests.seed");
|
||||
final String testSeed;
|
||||
if (testSeedProperty == null) {
|
||||
long seed = new Random(System.currentTimeMillis()).nextLong();
|
||||
testSeed = Long.toUnsignedString(seed, 16).toUpperCase(Locale.ROOT);
|
||||
} else {
|
||||
testSeed = testSeedProperty;
|
||||
}
|
||||
|
||||
final List<JavaHome> javaVersions = new ArrayList<>();
|
||||
for (int version = 8; version <= Integer.parseInt(minimumCompilerVersion.getMajorVersion()); version++) {
|
||||
if (System.getenv(getJavaHomeEnvVarName(Integer.toString(version))) != null) {
|
||||
|
@ -95,7 +106,9 @@ public class GlobalBuildInfoPlugin implements Plugin<Project> {
|
|||
ext.set("gradleJavaVersion", Jvm.current().getJavaVersion());
|
||||
ext.set("gitRevision", gitRevision(project.getRootProject().getRootDir()));
|
||||
ext.set("buildDate", ZonedDateTime.now(ZoneOffset.UTC));
|
||||
ext.set("testSeed", testSeed);
|
||||
ext.set("isCi", System.getenv("JENKINS_URL") != null);
|
||||
ext.set("isInternal", GlobalBuildInfoPlugin.class.getResource("/buildSrc.marker") != null);
|
||||
});
|
||||
}
|
||||
|
||||
|
@ -265,5 +278,4 @@ public class GlobalBuildInfoPlugin implements Plugin<Project> {
|
|||
.findFirst()
|
||||
.orElseThrow(() -> new IOException("file [" + path + "] is empty"));
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -104,6 +104,7 @@ public class ElasticsearchCluster implements TestClusterConfiguration, Named {
|
|||
}
|
||||
}
|
||||
|
||||
@Internal
|
||||
ElasticsearchNode getFirstNode() {
|
||||
return nodes.getAt(clusterName + "-0");
|
||||
}
|
||||
|
|
|
@ -183,6 +183,7 @@ public class ElasticsearchNode implements TestClusterConfiguration {
|
|||
return distributions.get(currentDistro).getVersion();
|
||||
}
|
||||
|
||||
@Internal
|
||||
public Path getDistroDir() {
|
||||
return workingDir.resolve("distro").resolve(getVersion() + "-" + testDistribution);
|
||||
}
|
||||
|
|
|
@ -23,7 +23,7 @@ public class RunTask extends DefaultTestClustersTask {
|
|||
description = "Enable debugging configuration, to allow attaching a debugger to elasticsearch."
|
||||
)
|
||||
public void setDebug(boolean enabled) {
|
||||
this.debug = debug;
|
||||
this.debug = enabled;
|
||||
}
|
||||
|
||||
@Input
|
||||
|
|
|
@ -1,12 +1,9 @@
|
|||
package org.elasticsearch.gradle.tool;
|
||||
|
||||
public class ClasspathUtils {
|
||||
private static boolean isElasticsearchProject;
|
||||
import org.gradle.api.Project;
|
||||
import org.gradle.api.plugins.ExtraPropertiesExtension;
|
||||
|
||||
static {
|
||||
// look for buildSrc marker file, if it exists then we are running in the context of the elastic/elasticsearch build
|
||||
isElasticsearchProject = ClasspathUtils.class.getResource("/buildSrc.marker") != null;
|
||||
}
|
||||
public class ClasspathUtils {
|
||||
|
||||
private ClasspathUtils() {
|
||||
}
|
||||
|
@ -17,7 +14,8 @@ public class ClasspathUtils {
|
|||
*
|
||||
* @return if we are currently running in the `elastic/elasticsearch` project
|
||||
*/
|
||||
public static boolean isElasticsearchProject() {
|
||||
return isElasticsearchProject;
|
||||
public static boolean isElasticsearchProject(Project project) {
|
||||
ExtraPropertiesExtension ext = project.getExtensions().getByType(ExtraPropertiesExtension.class);
|
||||
return (boolean) ext.get("isInternal");
|
||||
}
|
||||
}
|
||||
|
|
|
@ -24,8 +24,8 @@ import org.elasticsearch.common.ParseField;
|
|||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.collect.Tuple;
|
||||
import org.elasticsearch.common.xcontent.ConstructingObjectParser;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.common.xcontent.ObjectParser.ValueType;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
|
||||
import java.util.Arrays;
|
||||
import java.util.Collection;
|
||||
|
@ -299,12 +299,14 @@ public final class Role {
|
|||
public static final String NONE = "none";
|
||||
public static final String ALL = "all";
|
||||
public static final String MONITOR = "monitor";
|
||||
public static final String MONITOR_DATA_FRAME_TRANSFORMS = "monitor_data_frame_transforms";
|
||||
public static final String MONITOR_TRANSFORM_DEPRECATED = "monitor_data_frame_transforms";
|
||||
public static final String MONITOR_TRANSFORM = "monitor_transform";
|
||||
public static final String MONITOR_ML = "monitor_ml";
|
||||
public static final String MONITOR_WATCHER = "monitor_watcher";
|
||||
public static final String MONITOR_ROLLUP = "monitor_rollup";
|
||||
public static final String MANAGE = "manage";
|
||||
public static final String MANAGE_DATA_FRAME_TRANSFORMS = "manage_data_frame_transforms";
|
||||
public static final String MANAGE_TRANSFORM_DEPRECATED = "manage_data_frame_transforms";
|
||||
public static final String MANAGE_TRANSFORM = "manage_transform";
|
||||
public static final String MANAGE_ML = "manage_ml";
|
||||
public static final String MANAGE_WATCHER = "manage_watcher";
|
||||
public static final String MANAGE_ROLLUP = "manage_rollup";
|
||||
|
@ -321,8 +323,8 @@ public final class Role {
|
|||
public static final String MANAGE_ILM = "manage_ilm";
|
||||
public static final String READ_ILM = "read_ilm";
|
||||
public static final String MANAGE_ENRICH = "manage_enrich";
|
||||
public static final String[] ALL_ARRAY = new String[] { NONE, ALL, MONITOR, MONITOR_DATA_FRAME_TRANSFORMS, MONITOR_ML,
|
||||
MONITOR_WATCHER, MONITOR_ROLLUP, MANAGE, MANAGE_DATA_FRAME_TRANSFORMS,
|
||||
public static final String[] ALL_ARRAY = new String[] { NONE, ALL, MONITOR, MONITOR_TRANSFORM_DEPRECATED, MONITOR_TRANSFORM,
|
||||
MONITOR_ML, MONITOR_WATCHER, MONITOR_ROLLUP, MANAGE, MANAGE_TRANSFORM_DEPRECATED, MANAGE_TRANSFORM,
|
||||
MANAGE_ML, MANAGE_WATCHER, MANAGE_ROLLUP, MANAGE_INDEX_TEMPLATES, MANAGE_INGEST_PIPELINES, TRANSPORT_CLIENT,
|
||||
MANAGE_SECURITY, MANAGE_SAML, MANAGE_OIDC, MANAGE_TOKEN, MANAGE_PIPELINE, MANAGE_CCR, READ_CCR, MANAGE_ILM, READ_ILM,
|
||||
MANAGE_ENRICH };
|
||||
|
|
|
@ -23,6 +23,7 @@ import org.elasticsearch.action.ActionListener;
|
|||
import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksRequest;
|
||||
import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksResponse;
|
||||
import org.elasticsearch.action.admin.cluster.node.tasks.list.TaskGroup;
|
||||
import org.elasticsearch.action.bulk.BulkItemResponse;
|
||||
import org.elasticsearch.action.bulk.BulkRequest;
|
||||
import org.elasticsearch.action.get.GetRequest;
|
||||
import org.elasticsearch.action.index.IndexRequest;
|
||||
|
@ -40,7 +41,6 @@ import org.elasticsearch.index.reindex.BulkByScrollResponse;
|
|||
import org.elasticsearch.index.reindex.DeleteByQueryAction;
|
||||
import org.elasticsearch.index.reindex.DeleteByQueryRequest;
|
||||
import org.elasticsearch.index.reindex.ReindexRequest;
|
||||
import org.elasticsearch.index.reindex.ScrollableHitSource;
|
||||
import org.elasticsearch.index.reindex.UpdateByQueryAction;
|
||||
import org.elasticsearch.index.reindex.UpdateByQueryRequest;
|
||||
import org.elasticsearch.rest.RestStatus;
|
||||
|
@ -179,10 +179,10 @@ public class ReindexIT extends ESRestHighLevelClientTestCase {
|
|||
final BulkByScrollResponse response = highLevelClient().reindex(reindexRequest, RequestOptions.DEFAULT);
|
||||
|
||||
assertThat(response.getVersionConflicts(), equalTo(2L));
|
||||
assertThat(response.getBulkFailures(), empty());
|
||||
assertThat(response.getSearchFailures(), hasSize(2));
|
||||
assertThat(response.getSearchFailures(), empty());
|
||||
assertThat(response.getBulkFailures(), hasSize(2));
|
||||
assertThat(
|
||||
response.getSearchFailures().stream().map(ScrollableHitSource.SearchFailure::toString).collect(Collectors.toSet()),
|
||||
response.getBulkFailures().stream().map(BulkItemResponse.Failure::getMessage).collect(Collectors.toSet()),
|
||||
everyItem(containsString("version conflict"))
|
||||
);
|
||||
|
||||
|
@ -328,10 +328,10 @@ public class ReindexIT extends ESRestHighLevelClientTestCase {
|
|||
final BulkByScrollResponse response = highLevelClient().updateByQuery(updateByQueryRequest, RequestOptions.DEFAULT);
|
||||
|
||||
assertThat(response.getVersionConflicts(), equalTo(1L));
|
||||
assertThat(response.getBulkFailures(), empty());
|
||||
assertThat(response.getSearchFailures(), hasSize(1));
|
||||
assertThat(response.getSearchFailures(), empty());
|
||||
assertThat(response.getBulkFailures(), hasSize(1));
|
||||
assertThat(
|
||||
response.getSearchFailures().stream().map(ScrollableHitSource.SearchFailure::toString).collect(Collectors.toSet()),
|
||||
response.getBulkFailures().stream().map(BulkItemResponse.Failure::getMessage).collect(Collectors.toSet()),
|
||||
everyItem(containsString("version conflict"))
|
||||
);
|
||||
|
||||
|
|
|
@ -28,6 +28,7 @@ import org.elasticsearch.client.ESRestHighLevelClientTestCase;
|
|||
import org.elasticsearch.client.RequestOptions;
|
||||
import org.elasticsearch.client.RestHighLevelClient;
|
||||
import org.elasticsearch.client.security.AuthenticateResponse;
|
||||
import org.elasticsearch.client.security.AuthenticateResponse.RealmInfo;
|
||||
import org.elasticsearch.client.security.ChangePasswordRequest;
|
||||
import org.elasticsearch.client.security.ClearRealmCacheRequest;
|
||||
import org.elasticsearch.client.security.ClearRealmCacheResponse;
|
||||
|
@ -79,7 +80,6 @@ import org.elasticsearch.client.security.PutUserRequest;
|
|||
import org.elasticsearch.client.security.PutUserResponse;
|
||||
import org.elasticsearch.client.security.RefreshPolicy;
|
||||
import org.elasticsearch.client.security.TemplateRoleName;
|
||||
import org.elasticsearch.client.security.AuthenticateResponse.RealmInfo;
|
||||
import org.elasticsearch.client.security.support.ApiKey;
|
||||
import org.elasticsearch.client.security.support.CertificateInfo;
|
||||
import org.elasticsearch.client.security.support.expressiondsl.RoleMapperExpression;
|
||||
|
@ -99,8 +99,6 @@ import org.elasticsearch.common.unit.TimeValue;
|
|||
import org.elasticsearch.common.util.set.Sets;
|
||||
import org.hamcrest.Matchers;
|
||||
|
||||
import javax.crypto.SecretKeyFactory;
|
||||
import javax.crypto.spec.PBEKeySpec;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.nio.file.Files;
|
||||
|
@ -120,6 +118,9 @@ import java.util.Set;
|
|||
import java.util.concurrent.CountDownLatch;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
import javax.crypto.SecretKeyFactory;
|
||||
import javax.crypto.spec.PBEKeySpec;
|
||||
|
||||
import static org.hamcrest.Matchers.contains;
|
||||
import static org.hamcrest.Matchers.containsInAnyOrder;
|
||||
import static org.hamcrest.Matchers.containsString;
|
||||
|
@ -679,8 +680,8 @@ public class SecurityDocumentationIT extends ESRestHighLevelClientTestCase {
|
|||
|
||||
List<Role> roles = response.getRoles();
|
||||
assertNotNull(response);
|
||||
// 27 system roles plus the three we created
|
||||
assertThat(roles.size(), equalTo(31));
|
||||
// 29 system roles plus the three we created
|
||||
assertThat(roles.size(), equalTo(33));
|
||||
}
|
||||
|
||||
{
|
||||
|
|
|
@ -21,9 +21,9 @@ POST /twitter/_split/split-twitter-index
|
|||
[[split-index-api-request]]
|
||||
==== {api-request-title}
|
||||
|
||||
`POST /<index>/_shrink/<target-index>`
|
||||
`POST /<index>/_split/<target-index>`
|
||||
|
||||
`PUT /<index>/_shrink/<target-index>`
|
||||
`PUT /<index>/_split/<target-index>`
|
||||
|
||||
|
||||
[[split-index-api-prereqs]]
|
||||
|
|
|
@ -46,6 +46,26 @@ If the destination index already exists, then it will be use as is. This makes
|
|||
it possible to set up the destination index in advance with custom settings
|
||||
and mappings.
|
||||
|
||||
[[ml-put-dfanalytics-supported-fields]]
|
||||
===== Supported fields
|
||||
|
||||
====== {oldetection-cap}
|
||||
|
||||
{oldetection-cap} requires numeric or boolean data to analyze. The algorithms
|
||||
don't support missing values therefore fields that have data types other than
|
||||
numeric or boolean are ignored. Documents where included fields contain missing
|
||||
values, null values, or an array are also ignored. Therefore the `dest` index
|
||||
may contain documents that don't have an {olscore}.
|
||||
|
||||
|
||||
====== {regression-cap}
|
||||
|
||||
{regression-cap} supports fields that are numeric, boolean, text, keyword and ip. It
|
||||
is also tolerant of missing values. Fields that are supported are included in
|
||||
the analysis, other fields are ignored. Documents where included fields contain
|
||||
an array with two or more values are also ignored. Documents in the `dest` index
|
||||
that don’t contain a results field are not included in the {reganalysis}.
|
||||
|
||||
|
||||
[[ml-put-dfanalytics-path-params]]
|
||||
==== {api-path-parms-title}
|
||||
|
@ -61,19 +81,21 @@ and mappings.
|
|||
==== {api-request-body-title}
|
||||
|
||||
`analysis`::
|
||||
(Required, object) Defines the type of {dfanalytics} you want to perform on your source
|
||||
index. For example: `outlier_detection`. See <<dfanalytics-types>>.
|
||||
(Required, object) Defines the type of {dfanalytics} you want to perform on
|
||||
your source index. For example: `outlier_detection`. See
|
||||
<<dfanalytics-types>>.
|
||||
|
||||
`analyzed_fields`::
|
||||
(Optional, object) You can specify both `includes` and/or `excludes` patterns.
|
||||
If `analyzed_fields` is not set, only the relevant fields will be included.
|
||||
For example, all the numeric fields for {oldetection}.
|
||||
For example, all the numeric fields for {oldetection}. For the supported field
|
||||
types, see <<ml-put-dfanalytics-supported-fields>>.
|
||||
|
||||
`analyzed_fields.includes`:::
|
||||
`includes`:::
|
||||
(Optional, array) An array of strings that defines the fields that will be
|
||||
included in the analysis.
|
||||
|
||||
`analyzed_fields.excludes`:::
|
||||
`excludes`:::
|
||||
(Optional, array) An array of strings that defines the fields that will be
|
||||
excluded from the analysis.
|
||||
|
||||
|
|
|
@ -4,11 +4,11 @@
|
|||
Starting an Elasticsearch cluster for the very first time requires the initial
|
||||
set of <<master-node,master-eligible nodes>> to be explicitly defined on one or
|
||||
more of the master-eligible nodes in the cluster. This is known as _cluster
|
||||
bootstrapping_. This is only required the very first time the cluster starts
|
||||
up: nodes that have already joined a cluster store this information in their
|
||||
data folder for use in a <<restart-upgrade,full cluster restart>>, and
|
||||
freshly-started nodes that are joining a running cluster obtain this
|
||||
information from the cluster's elected master.
|
||||
bootstrapping_. This is only required the first time a cluster starts up: nodes
|
||||
that have already joined a cluster store this information in their data folder
|
||||
for use in a <<restart-upgrade,full cluster restart>>, and freshly-started nodes
|
||||
that are joining a running cluster obtain this information from the cluster's
|
||||
elected master.
|
||||
|
||||
The initial set of master-eligible nodes is defined in the
|
||||
<<initial_master_nodes,`cluster.initial_master_nodes` setting>>. This should be
|
||||
|
@ -30,12 +30,8 @@ node:
|
|||
|
||||
When you start a master-eligible node, you can provide this setting on the
|
||||
command line or in the `elasticsearch.yml` file. After the cluster has formed,
|
||||
this setting is no longer required and is ignored. It need not be set on
|
||||
master-ineligible nodes, nor on master-eligible nodes that are started to join
|
||||
an existing cluster. Note that master-eligible nodes should use storage that
|
||||
persists across restarts. If they do not, and `cluster.initial_master_nodes` is
|
||||
set, and a full cluster restart occurs, then another brand-new cluster will
|
||||
form and this may result in data loss.
|
||||
this setting is no longer required. It should not be set for master-ineligible
|
||||
nodes, master-eligible nodes joining an existing cluster, or cluster restarts.
|
||||
|
||||
It is technically sufficient to set `cluster.initial_master_nodes` on a single
|
||||
master-eligible node in the cluster, and only to mention that single node in the
|
||||
|
|
|
@ -587,8 +587,9 @@ end::scroll_size[]
|
|||
|
||||
tag::search_timeout[]
|
||||
`search_timeout`::
|
||||
(Optional, <<time-units, time units>> Explicit timeout for each search
|
||||
request. Defaults to no timeout.
|
||||
(Optional, <<time-units, time units>>)
|
||||
Explicit timeout for each search request.
|
||||
Defaults to no timeout.
|
||||
end::search_timeout[]
|
||||
|
||||
tag::search_type[]
|
||||
|
|
|
@ -73,7 +73,7 @@ include::{docdir}/rest-api/common-parms.asciidoc[tag=search_type]
|
|||
|
||||
include::{docdir}/rest-api/common-parms.asciidoc[tag=terminate_after]
|
||||
|
||||
include::{docdir}/rest-api/common-parms.asciidoc[tag=timeout]
|
||||
include::{docdir}/rest-api/common-parms.asciidoc[tag=search_timeout]
|
||||
|
||||
|
||||
Out of the above, the `search_type`, `request_cache` and the
|
||||
|
|
|
@ -41,7 +41,8 @@ themselves. As this auto-bootstrapping is <<modules-discovery-quorums,inherently
|
|||
unsafe>>, when you start a brand new cluster in <<dev-vs-prod-mode,production
|
||||
mode>>, you must explicitly list the master-eligible nodes whose votes should be
|
||||
counted in the very first election. This list is set using the
|
||||
`cluster.initial_master_nodes` setting.
|
||||
`cluster.initial_master_nodes` setting. You should not use this setting when
|
||||
restarting a cluster or adding a new node to an existing cluster.
|
||||
|
||||
[source,yaml]
|
||||
--------------------------------------------------
|
||||
|
|
|
@ -86,6 +86,16 @@ the multiple of a day. E.g.: for `HISTOGRAM(CAST(birth_date AS DATE), INTERVAL '
|
|||
actually used will be `INTERVAL '2' DAY`. If the interval specified is less than 1 day, e.g.:
|
||||
`HISTOGRAM(CAST(birth_date AS DATE), INTERVAL '20' HOUR)` then the interval used will be `INTERVAL '1' DAY`.
|
||||
|
||||
[IMPORTANT]
|
||||
All intervals specified for a date/time HISTOGRAM will use a <<search-aggregations-bucket-datehistogram-aggregation,fixed interval>>
|
||||
in their `date_histogram` aggregation definition, with the notable exception of `INTERVAL '1' YEAR` where a calendar interval is used.
|
||||
The choice for a calendar interval was made for having a more intuitive result for YEAR groupings. Calendar intervals consider a one year
|
||||
bucket as the one starting on January 1st that specific year, whereas a fixed interval one-year-bucket considers one year as a number
|
||||
of milliseconds (for example, `31536000000ms` corresponding to 365 days, 24 hours per day, 60 minutes per hour etc.). With fixed intervals,
|
||||
the day of February 5th, 2019 for example, belongs to a bucket that starts on December 20th, 2018 and {es} (and implicitly {es-sql}) would
|
||||
have returned the year 2018 for a date that's actually in 2019. With calendar interval this behavior is more intuitive, having the day of
|
||||
February 5th, 2019 actually belonging to the 2019 year bucket.
|
||||
|
||||
[IMPORTANT]
|
||||
Histogram in SQL cannot be applied applied on **TIME** type.
|
||||
E.g.: `HISTOGRAM(CAST(birth_date AS TIME), INTERVAL '10' MINUTES)` is currently not supported.
|
||||
|
|
|
@ -28,8 +28,6 @@ import java.util.Map;
|
|||
*/
|
||||
public class Globals {
|
||||
private final Map<String,Constant> constantInitializers = new HashMap<>();
|
||||
private final Map<String,Class<?>> classBindings = new HashMap<>();
|
||||
private final Map<Object,String> instanceBindings = new HashMap<>();
|
||||
private final BitSet statements;
|
||||
|
||||
/** Create a new Globals from the set of statement boundaries */
|
||||
|
@ -44,34 +42,11 @@ public class Globals {
|
|||
}
|
||||
}
|
||||
|
||||
/** Adds a new class binding to be written as a local variable */
|
||||
public String addClassBinding(Class<?> type) {
|
||||
String name = "$class_binding$" + classBindings.size();
|
||||
classBindings.put(name, type);
|
||||
|
||||
return name;
|
||||
}
|
||||
|
||||
/** Adds a new binding to be written as a local variable */
|
||||
public String addInstanceBinding(Object instance) {
|
||||
return instanceBindings.computeIfAbsent(instance, key -> "$instance_binding$" + instanceBindings.size());
|
||||
}
|
||||
|
||||
/** Returns the current initializers */
|
||||
public Map<String,Constant> getConstantInitializers() {
|
||||
return constantInitializers;
|
||||
}
|
||||
|
||||
/** Returns the current bindings */
|
||||
public Map<String,Class<?>> getClassBindings() {
|
||||
return classBindings;
|
||||
}
|
||||
|
||||
/** Returns the current bindings */
|
||||
public Map<Object,String> getInstanceBindings() {
|
||||
return instanceBindings;
|
||||
}
|
||||
|
||||
/** Returns the set of statement boundaries */
|
||||
public BitSet getStatements() {
|
||||
return statements;
|
||||
|
|
|
@ -40,6 +40,9 @@ import java.util.Objects;
|
|||
import java.util.Set;
|
||||
|
||||
import static org.elasticsearch.painless.WriterConstants.CLASS_TYPE;
|
||||
import static org.objectweb.asm.Opcodes.ACC_PRIVATE;
|
||||
import static org.objectweb.asm.Opcodes.ACC_PUBLIC;
|
||||
import static org.objectweb.asm.Opcodes.ACC_STATIC;
|
||||
|
||||
/**
|
||||
* Represents a user-defined call.
|
||||
|
@ -54,6 +57,7 @@ public final class ECallLocal extends AExpression {
|
|||
private PainlessClassBinding classBinding = null;
|
||||
private int classBindingOffset = 0;
|
||||
private PainlessInstanceBinding instanceBinding = null;
|
||||
private String bindingName = null;
|
||||
|
||||
public ECallLocal(Location location, String name, List<AExpression> arguments) {
|
||||
super(location);
|
||||
|
@ -138,9 +142,15 @@ public final class ECallLocal extends AExpression {
|
|||
} else if (classBinding != null) {
|
||||
typeParameters = new ArrayList<>(classBinding.typeParameters);
|
||||
actual = classBinding.returnType;
|
||||
bindingName = scriptRoot.getNextSyntheticName("class_binding");
|
||||
scriptRoot.getClassNode().addField(new SField(location,
|
||||
ACC_PRIVATE, bindingName, classBinding.javaConstructor.getDeclaringClass(), null));
|
||||
} else if (instanceBinding != null) {
|
||||
typeParameters = new ArrayList<>(instanceBinding.typeParameters);
|
||||
actual = instanceBinding.returnType;
|
||||
bindingName = scriptRoot.getNextSyntheticName("instance_binding");
|
||||
scriptRoot.getClassNode().addField(new SField(location,
|
||||
ACC_STATIC | ACC_PUBLIC, bindingName, instanceBinding.targetInstance.getClass(), instanceBinding.targetInstance));
|
||||
} else {
|
||||
throw new IllegalStateException("Illegal tree structure.");
|
||||
}
|
||||
|
@ -178,14 +188,13 @@ public final class ECallLocal extends AExpression {
|
|||
methodWriter.invokeStatic(Type.getType(importedMethod.targetClass),
|
||||
new Method(importedMethod.javaMethod.getName(), importedMethod.methodType.toMethodDescriptorString()));
|
||||
} else if (classBinding != null) {
|
||||
String name = globals.addClassBinding(classBinding.javaConstructor.getDeclaringClass());
|
||||
Type type = Type.getType(classBinding.javaConstructor.getDeclaringClass());
|
||||
int javaConstructorParameterCount = classBinding.javaConstructor.getParameterCount() - classBindingOffset;
|
||||
|
||||
Label nonNull = new Label();
|
||||
|
||||
methodWriter.loadThis();
|
||||
methodWriter.getField(CLASS_TYPE, name, type);
|
||||
methodWriter.getField(CLASS_TYPE, bindingName, type);
|
||||
methodWriter.ifNonNull(nonNull);
|
||||
methodWriter.loadThis();
|
||||
methodWriter.newInstance(type);
|
||||
|
@ -200,11 +209,11 @@ public final class ECallLocal extends AExpression {
|
|||
}
|
||||
|
||||
methodWriter.invokeConstructor(type, Method.getMethod(classBinding.javaConstructor));
|
||||
methodWriter.putField(CLASS_TYPE, name, type);
|
||||
methodWriter.putField(CLASS_TYPE, bindingName, type);
|
||||
|
||||
methodWriter.mark(nonNull);
|
||||
methodWriter.loadThis();
|
||||
methodWriter.getField(CLASS_TYPE, name, type);
|
||||
methodWriter.getField(CLASS_TYPE, bindingName, type);
|
||||
|
||||
for (int argument = 0; argument < classBinding.javaMethod.getParameterCount(); ++argument) {
|
||||
arguments.get(argument + javaConstructorParameterCount).write(classWriter, methodWriter, globals);
|
||||
|
@ -212,11 +221,10 @@ public final class ECallLocal extends AExpression {
|
|||
|
||||
methodWriter.invokeVirtual(type, Method.getMethod(classBinding.javaMethod));
|
||||
} else if (instanceBinding != null) {
|
||||
String name = globals.addInstanceBinding(instanceBinding.targetInstance);
|
||||
Type type = Type.getType(instanceBinding.targetInstance.getClass());
|
||||
|
||||
methodWriter.loadThis();
|
||||
methodWriter.getStatic(CLASS_TYPE, name, type);
|
||||
methodWriter.getStatic(CLASS_TYPE, bindingName, type);
|
||||
|
||||
for (int argument = 0; argument < instanceBinding.javaMethod.getParameterCount(); ++argument) {
|
||||
arguments.get(argument).write(classWriter, methodWriter, globals);
|
||||
|
|
|
@ -83,6 +83,7 @@ public final class SClass extends AStatement {
|
|||
private final String name;
|
||||
private final Printer debugStream;
|
||||
private final List<SFunction> functions = new ArrayList<>();
|
||||
private final List<SField> fields = new ArrayList<>();
|
||||
private final Globals globals;
|
||||
private final List<AStatement> statements;
|
||||
|
||||
|
@ -112,6 +113,10 @@ public final class SClass extends AStatement {
|
|||
functions.add(function);
|
||||
}
|
||||
|
||||
void addField(SField field) {
|
||||
fields.add(field);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void storeSettings(CompilerSettings settings) {
|
||||
for (SFunction function : functions) {
|
||||
|
@ -289,6 +294,11 @@ public final class SClass extends AStatement {
|
|||
function.write(classWriter, globals);
|
||||
}
|
||||
|
||||
// Write all fields:
|
||||
for (SField field : fields) {
|
||||
field.write(classWriter);
|
||||
}
|
||||
|
||||
// Write the constants
|
||||
if (false == globals.getConstantInitializers().isEmpty()) {
|
||||
Collection<Constant> inits = globals.getConstantInitializers().values();
|
||||
|
@ -315,20 +325,6 @@ public final class SClass extends AStatement {
|
|||
clinit.endMethod();
|
||||
}
|
||||
|
||||
// Write class binding variables
|
||||
for (Map.Entry<String, Class<?>> classBinding : globals.getClassBindings().entrySet()) {
|
||||
String name = classBinding.getKey();
|
||||
String descriptor = Type.getType(classBinding.getValue()).getDescriptor();
|
||||
classVisitor.visitField(Opcodes.ACC_PRIVATE, name, descriptor, null, null).visitEnd();
|
||||
}
|
||||
|
||||
// Write instance binding variables
|
||||
for (Map.Entry<Object, String> instanceBinding : globals.getInstanceBindings().entrySet()) {
|
||||
String name = instanceBinding.getValue();
|
||||
String descriptor = Type.getType(instanceBinding.getKey().getClass()).getDescriptor();
|
||||
classVisitor.visitField(Opcodes.ACC_PUBLIC | Opcodes.ACC_STATIC, name, descriptor, null, null).visitEnd();
|
||||
}
|
||||
|
||||
// Write any needsVarName methods for used variables
|
||||
for (org.objectweb.asm.commons.Method needsMethod : scriptClassInfo.getNeedsMethods()) {
|
||||
String name = needsMethod.getName();
|
||||
|
@ -349,8 +345,10 @@ public final class SClass extends AStatement {
|
|||
Map<String, Object> statics = new HashMap<>();
|
||||
statics.put("$FUNCTIONS", table.getFunctionTable());
|
||||
|
||||
for (Map.Entry<Object, String> instanceBinding : globals.getInstanceBindings().entrySet()) {
|
||||
statics.put(instanceBinding.getValue(), instanceBinding.getKey());
|
||||
for (SField field : fields) {
|
||||
if (field.getInstance() != null) {
|
||||
statics.put(field.getName(), field.getInstance());
|
||||
}
|
||||
}
|
||||
|
||||
return statics;
|
||||
|
|
|
@ -0,0 +1,96 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.painless.node;
|
||||
|
||||
import org.elasticsearch.painless.ClassWriter;
|
||||
import org.elasticsearch.painless.CompilerSettings;
|
||||
import org.elasticsearch.painless.Globals;
|
||||
import org.elasticsearch.painless.Locals;
|
||||
import org.elasticsearch.painless.Location;
|
||||
import org.elasticsearch.painless.MethodWriter;
|
||||
import org.elasticsearch.painless.ScriptRoot;
|
||||
import org.objectweb.asm.Type;
|
||||
|
||||
import java.util.Set;
|
||||
|
||||
/**
|
||||
* Represents a member field for its parent class (internal only).
|
||||
*/
|
||||
public class SField extends ANode {
|
||||
|
||||
private final int access;
|
||||
private final String name;
|
||||
private final Class<?> type;
|
||||
private final Object instance;
|
||||
|
||||
/**
|
||||
* Standard constructor.
|
||||
* @param location original location in the source
|
||||
* @param access asm constants for field modifiers
|
||||
* @param name name of the field
|
||||
* @param type type of the field
|
||||
* @param instance initial value for the field
|
||||
*/
|
||||
public SField(Location location, int access, String name, Class<?> type, Object instance) {
|
||||
super(location);
|
||||
|
||||
this.access = access;
|
||||
this.name = name;
|
||||
this.type = type;
|
||||
this.instance = instance;
|
||||
}
|
||||
|
||||
public String getName() {
|
||||
return name;
|
||||
}
|
||||
|
||||
public Object getInstance() {
|
||||
return instance;
|
||||
}
|
||||
|
||||
@Override
|
||||
void storeSettings(CompilerSettings settings) {
|
||||
throw createError(new UnsupportedOperationException("unexpected node"));
|
||||
}
|
||||
|
||||
@Override
|
||||
void extractVariables(Set<String> variables) {
|
||||
throw createError(new UnsupportedOperationException("unexpected node"));
|
||||
}
|
||||
|
||||
@Override
|
||||
void analyze(ScriptRoot scriptRoot, Locals locals) {
|
||||
throw createError(new UnsupportedOperationException("unexpected node"));
|
||||
}
|
||||
|
||||
@Override
|
||||
void write(ClassWriter classWriter, MethodWriter methodWriter, Globals globals) {
|
||||
throw createError(new UnsupportedOperationException("unexpected node"));
|
||||
}
|
||||
|
||||
void write(ClassWriter classWriter) {
|
||||
classWriter.getClassVisitor().visitField(access, name, Type.getType(type).getDescriptor(), null, null).visitEnd();
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return singleLineToString(name, type);
|
||||
}
|
||||
}
|
|
@ -32,7 +32,7 @@ import org.elasticsearch.common.bytes.BytesReference;
|
|||
import org.elasticsearch.common.document.DocumentField;
|
||||
import org.elasticsearch.common.lucene.search.function.FunctionScoreQuery;
|
||||
import org.elasticsearch.common.text.Text;
|
||||
import org.elasticsearch.index.query.ParsedQuery;
|
||||
import org.elasticsearch.index.query.QueryShardContext;
|
||||
import org.elasticsearch.search.SearchHit;
|
||||
import org.elasticsearch.search.fetch.FetchSubPhase;
|
||||
import org.elasticsearch.search.fetch.subphase.highlight.HighlightField;
|
||||
|
@ -40,7 +40,6 @@ import org.elasticsearch.search.fetch.subphase.highlight.HighlightPhase;
|
|||
import org.elasticsearch.search.fetch.subphase.highlight.Highlighter;
|
||||
import org.elasticsearch.search.fetch.subphase.highlight.SearchContextHighlight;
|
||||
import org.elasticsearch.search.internal.SearchContext;
|
||||
import org.elasticsearch.search.internal.SubSearchContext;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
|
@ -100,15 +99,19 @@ final class PercolatorHighlightSubFetchPhase implements FetchSubPhase {
|
|||
for (Object matchedSlot : field.getValues()) {
|
||||
int slot = (int) matchedSlot;
|
||||
BytesReference document = percolateQuery.getDocuments().get(slot);
|
||||
SubSearchContext subSearchContext =
|
||||
createSubSearchContext(context, percolatorLeafReaderContext, document, slot);
|
||||
subSearchContext.parsedQuery(new ParsedQuery(query));
|
||||
SearchContextHighlight highlight = new SearchContextHighlight(context.highlight().fields());
|
||||
// Enforce highlighting by source, because MemoryIndex doesn't support stored fields.
|
||||
highlight.globalForceSource(true);
|
||||
QueryShardContext shardContext = new QueryShardContext(context.getQueryShardContext());
|
||||
shardContext.freezeContext();
|
||||
shardContext.lookup().source().setSegmentAndDocument(percolatorLeafReaderContext, slot);
|
||||
shardContext.lookup().source().setSource(document);
|
||||
hitContext.reset(
|
||||
new SearchHit(slot, "unknown", new Text(hit.getType()), Collections.emptyMap()),
|
||||
percolatorLeafReaderContext, slot, percolatorIndexSearcher
|
||||
);
|
||||
hitContext.cache().clear();
|
||||
highlightPhase.hitExecute(subSearchContext, hitContext);
|
||||
highlightPhase.hitExecute(context.shardTarget(), shardContext, query, highlight, hitContext);
|
||||
for (Map.Entry<String, HighlightField> entry : hitContext.hit().getHighlightFields().entrySet()) {
|
||||
if (percolateQuery.getDocuments().size() == 1) {
|
||||
String hlFieldName;
|
||||
|
@ -166,15 +169,4 @@ final class PercolatorHighlightSubFetchPhase implements FetchSubPhase {
|
|||
}
|
||||
return Collections.emptyList();
|
||||
}
|
||||
|
||||
private SubSearchContext createSubSearchContext(SearchContext context, LeafReaderContext leafReaderContext,
|
||||
BytesReference source, int docId) {
|
||||
SubSearchContext subSearchContext = new SubSearchContext(context);
|
||||
subSearchContext.highlight(new SearchContextHighlight(context.highlight().fields()));
|
||||
// Enforce highlighting by source, because MemoryIndex doesn't support stored fields.
|
||||
subSearchContext.highlight().globalForceSource(true);
|
||||
subSearchContext.lookup().source().setSegmentAndDocument(leafReaderContext, docId);
|
||||
subSearchContext.lookup().source().setSource(source);
|
||||
return subSearchContext;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -19,11 +19,10 @@
|
|||
|
||||
package org.elasticsearch.index.reindex;
|
||||
|
||||
import org.elasticsearch.ExceptionsHelper;
|
||||
import org.elasticsearch.action.bulk.BulkItemResponse.Failure;
|
||||
import org.elasticsearch.index.reindex.ScrollableHitSource.SearchFailure;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.index.reindex.ScrollableHitSource.SearchFailure;
|
||||
import org.elasticsearch.rest.BytesRestResponse;
|
||||
import org.elasticsearch.rest.RestChannel;
|
||||
import org.elasticsearch.rest.RestResponse;
|
||||
|
@ -67,7 +66,7 @@ public class BulkIndexByScrollResponseContentListener extends RestBuilderListene
|
|||
}
|
||||
}
|
||||
for (SearchFailure failure: response.getSearchFailures()) {
|
||||
RestStatus failureStatus = ExceptionsHelper.status(failure.getReason());
|
||||
RestStatus failureStatus = failure.getStatus();
|
||||
if (failureStatus.getStatus() > status.getStatus()) {
|
||||
status = failureStatus;
|
||||
}
|
||||
|
|
|
@ -26,9 +26,9 @@ import org.elasticsearch.index.mapper.DocumentMapper;
|
|||
import org.elasticsearch.index.mapper.MappedFieldType;
|
||||
import org.elasticsearch.index.mapper.annotatedtext.AnnotatedTextFieldMapper.AnnotatedHighlighterAnalyzer;
|
||||
import org.elasticsearch.index.mapper.annotatedtext.AnnotatedTextFieldMapper.AnnotatedText;
|
||||
import org.elasticsearch.index.query.QueryShardContext;
|
||||
import org.elasticsearch.search.fetch.FetchSubPhase.HitContext;
|
||||
import org.elasticsearch.search.fetch.subphase.highlight.SearchContextHighlight.Field;
|
||||
import org.elasticsearch.search.internal.SearchContext;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
|
@ -45,9 +45,12 @@ public class AnnotatedTextHighlighter extends UnifiedHighlighter {
|
|||
|
||||
// Convert the marked-up values held on-disk to plain-text versions for highlighting
|
||||
@Override
|
||||
protected List<Object> loadFieldValues(MappedFieldType fieldType, Field field, SearchContext context, HitContext hitContext)
|
||||
throws IOException {
|
||||
List<Object> fieldValues = super.loadFieldValues(fieldType, field, context, hitContext);
|
||||
protected List<Object> loadFieldValues(MappedFieldType fieldType,
|
||||
Field field,
|
||||
QueryShardContext context,
|
||||
HitContext hitContext,
|
||||
boolean forceSource) throws IOException {
|
||||
List<Object> fieldValues = super.loadFieldValues(fieldType, field, context, hitContext, forceSource);
|
||||
String[] fieldValuesAsString = fieldValues.toArray(new String[fieldValues.size()]);
|
||||
|
||||
AnnotatedText[] annotations = new AnnotatedText[fieldValuesAsString.length];
|
||||
|
|
|
@ -147,13 +147,7 @@ public class AzureBlobContainer extends AbstractBlobContainer {
|
|||
// Executing deletes in parallel since Azure SDK 8 is using blocking IO while Azure does not provide a bulk delete API endpoint
|
||||
// TODO: Upgrade to newer non-blocking Azure SDK 11 and execute delete requests in parallel that way.
|
||||
for (String blobName : blobNames) {
|
||||
executor.execute(new ActionRunnable<Void>(listener) {
|
||||
@Override
|
||||
protected void doRun() throws IOException {
|
||||
deleteBlobIgnoringIfNotExists(blobName);
|
||||
listener.onResponse(null);
|
||||
}
|
||||
});
|
||||
executor.execute(ActionRunnable.run(listener, () -> deleteBlobIgnoringIfNotExists(blobName)));
|
||||
}
|
||||
}
|
||||
try {
|
||||
|
|
|
@ -20,6 +20,8 @@
|
|||
package org.elasticsearch.action;
|
||||
|
||||
import org.elasticsearch.common.CheckedConsumer;
|
||||
import org.elasticsearch.common.CheckedRunnable;
|
||||
import org.elasticsearch.common.CheckedSupplier;
|
||||
import org.elasticsearch.common.util.concurrent.AbstractRunnable;
|
||||
|
||||
/**
|
||||
|
@ -30,6 +32,32 @@ public abstract class ActionRunnable<Response> extends AbstractRunnable {
|
|||
|
||||
protected final ActionListener<Response> listener;
|
||||
|
||||
/**
|
||||
* Creates a {@link Runnable} that invokes the given listener with {@code null} after the given runnable has executed.
|
||||
* @param listener Listener to invoke
|
||||
* @param runnable Runnable to execute
|
||||
* @return Wrapped {@code Runnable}
|
||||
*/
|
||||
public static <T> ActionRunnable<T> run(ActionListener<T> listener, CheckedRunnable<Exception> runnable) {
|
||||
return new ActionRunnable<T>(listener) {
|
||||
@Override
|
||||
protected void doRun() throws Exception {
|
||||
runnable.run();
|
||||
listener.onResponse(null);
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a {@link Runnable} that invokes the given listener with the return of the given supplier.
|
||||
* @param listener Listener to invoke
|
||||
* @param supplier Supplier that provides value to pass to listener
|
||||
* @return Wrapped {@code Runnable}
|
||||
*/
|
||||
public static <T> ActionRunnable<T> supply(ActionListener<T> listener, CheckedSupplier<T, Exception> supplier) {
|
||||
return ActionRunnable.wrap(listener, l -> l.onResponse(supplier.get()));
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a {@link Runnable} that wraps the given listener and a consumer of it that is executed when the {@link Runnable} is run.
|
||||
* Invokes {@link ActionListener#onFailure(Exception)} on it if an exception is thrown on executing the consumer.
|
||||
|
|
|
@ -121,8 +121,8 @@ public class TransportSnapshotsStatusAction extends TransportMasterNodeAction<Sn
|
|||
.snapshots(snapshots).timeout(request.masterNodeTimeout()),
|
||||
ActionListener.wrap(
|
||||
nodeSnapshotStatuses -> threadPool.executor(ThreadPool.Names.GENERIC).execute(
|
||||
ActionRunnable.wrap(listener, l -> l.onResponse(buildResponse(request, snapshotsService.currentSnapshots(
|
||||
request.repository(), Arrays.asList(request.snapshots())), nodeSnapshotStatuses)))), listener::onFailure));
|
||||
ActionRunnable.supply(listener, () -> buildResponse(request, snapshotsService.currentSnapshots(
|
||||
request.repository(), Arrays.asList(request.snapshots())), nodeSnapshotStatuses))), listener::onFailure));
|
||||
} else {
|
||||
// We don't have any in-progress shards, just return current stats
|
||||
listener.onResponse(buildResponse(request, currentSnapshots, null));
|
||||
|
|
|
@ -352,7 +352,7 @@ abstract class AbstractSearchAsyncAction<Result extends SearchPhaseResult> exten
|
|||
}
|
||||
}
|
||||
|
||||
private ShardSearchFailure[] buildShardFailures() {
|
||||
ShardSearchFailure[] buildShardFailures() {
|
||||
AtomicArray<ShardSearchFailure> shardFailures = this.shardFailures.get();
|
||||
if (shardFailures == null) {
|
||||
return ShardSearchFailure.EMPTY_ARRAY;
|
||||
|
@ -510,20 +510,23 @@ abstract class AbstractSearchAsyncAction<Result extends SearchPhaseResult> exten
|
|||
return request;
|
||||
}
|
||||
|
||||
protected final SearchResponse buildSearchResponse(InternalSearchResponse internalSearchResponse, String scrollId) {
|
||||
ShardSearchFailure[] failures = buildShardFailures();
|
||||
Boolean allowPartialResults = request.allowPartialSearchResults();
|
||||
assert allowPartialResults != null : "SearchRequest missing setting for allowPartialSearchResults";
|
||||
if (allowPartialResults == false && failures.length > 0){
|
||||
raisePhaseFailure(new SearchPhaseExecutionException("", "Shard failures", null, failures));
|
||||
}
|
||||
protected final SearchResponse buildSearchResponse(InternalSearchResponse internalSearchResponse,
|
||||
String scrollId,
|
||||
ShardSearchFailure[] failures) {
|
||||
return new SearchResponse(internalSearchResponse, scrollId, getNumShards(), successfulOps.get(),
|
||||
skippedOps.get(), buildTookInMillis(), failures, clusters);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void sendSearchResponse(InternalSearchResponse internalSearchResponse, String scrollId) {
|
||||
listener.onResponse(buildSearchResponse(internalSearchResponse, scrollId));
|
||||
ShardSearchFailure[] failures = buildShardFailures();
|
||||
Boolean allowPartialResults = request.allowPartialSearchResults();
|
||||
assert allowPartialResults != null : "SearchRequest missing setting for allowPartialSearchResults";
|
||||
if (allowPartialResults == false && failures.length > 0){
|
||||
raisePhaseFailure(new SearchPhaseExecutionException("", "Shard failures", null, failures));
|
||||
} else {
|
||||
listener.onResponse(buildSearchResponse(internalSearchResponse, scrollId, failures));
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -300,6 +300,6 @@ public abstract class TransportBroadcastAction<
|
|||
|
||||
private void asyncShardOperation(ShardRequest request, Task task, ActionListener<ShardResponse> listener) {
|
||||
transportService.getThreadPool().executor(shardExecutor)
|
||||
.execute(ActionRunnable.wrap(listener, l -> l.onResponse(shardOperation(request, task))));
|
||||
.execute(ActionRunnable.supply(listener, () -> shardOperation(request, task)));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -107,7 +107,7 @@ public abstract class TransportSingleShardAction<Request extends SingleShardRequ
|
|||
|
||||
protected void asyncShardOperation(Request request, ShardId shardId, ActionListener<Response> listener) throws IOException {
|
||||
threadPool.executor(getExecutor(request, shardId))
|
||||
.execute(ActionRunnable.wrap(listener, l -> l.onResponse((shardOperation(request, shardId)))));
|
||||
.execute(ActionRunnable.supply(listener, () -> shardOperation(request, shardId)));
|
||||
}
|
||||
|
||||
protected abstract Writeable.Reader<Response> getResponseReader();
|
||||
|
|
|
@ -337,7 +337,7 @@ public class GeoUtils {
|
|||
}
|
||||
}
|
||||
|
||||
private static double centeredModulus(double dividend, double divisor) {
|
||||
public static double centeredModulus(double dividend, double divisor) {
|
||||
double rtn = dividend % divisor;
|
||||
if (rtn <= 0) {
|
||||
rtn += divisor;
|
||||
|
|
|
@ -23,6 +23,7 @@ package org.elasticsearch.index.mapper;
|
|||
import org.apache.lucene.document.LatLonShape;
|
||||
import org.apache.lucene.index.IndexableField;
|
||||
import org.elasticsearch.common.collect.Tuple;
|
||||
import org.elasticsearch.common.geo.GeoUtils;
|
||||
import org.elasticsearch.geometry.Circle;
|
||||
import org.elasticsearch.geometry.Geometry;
|
||||
import org.elasticsearch.geometry.GeometryCollection;
|
||||
|
@ -50,6 +51,7 @@ import java.util.concurrent.atomic.AtomicBoolean;
|
|||
import static org.apache.lucene.geo.GeoUtils.orient;
|
||||
import static org.elasticsearch.common.geo.GeoUtils.normalizeLat;
|
||||
import static org.elasticsearch.common.geo.GeoUtils.normalizeLon;
|
||||
import static org.elasticsearch.common.geo.GeoUtils.normalizePoint;
|
||||
|
||||
/**
|
||||
* Utility class that converts geometries into Lucene-compatible form
|
||||
|
@ -160,8 +162,9 @@ public final class GeoShapeIndexer implements AbstractGeometryFieldMapper.Indexe
|
|||
|
||||
@Override
|
||||
public Geometry visit(Point point) {
|
||||
//TODO: Just remove altitude for now. We need to add normalization later
|
||||
return new Point(point.getX(), point.getY());
|
||||
double[] latlon = new double[]{point.getX(), point.getY()};
|
||||
normalizePoint(latlon);
|
||||
return new Point(latlon[0], latlon[1]);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -222,88 +225,113 @@ public final class GeoShapeIndexer implements AbstractGeometryFieldMapper.Indexe
|
|||
* Splits the specified line by datelines and adds them to the supplied lines array
|
||||
*/
|
||||
private List<Line> decomposeGeometry(Line line, List<Line> lines) {
|
||||
|
||||
for (Line partPlus : decompose(+DATELINE, line)) {
|
||||
for (Line partMinus : decompose(-DATELINE, partPlus)) {
|
||||
double[] lats = new double[partMinus.length()];
|
||||
double[] lons = new double[partMinus.length()];
|
||||
for (int i = 0; i < partMinus.length(); i++) {
|
||||
lats[i] = normalizeLat(partMinus.getY(i));
|
||||
lons[i] = normalizeLonMinus180Inclusive(partMinus.getX(i));
|
||||
for (Line part : decompose(line)) {
|
||||
double[] lats = new double[part.length()];
|
||||
double[] lons = new double[part.length()];
|
||||
for (int i = 0; i < part.length(); i++) {
|
||||
lats[i] = normalizeLat(part.getY(i));
|
||||
lons[i] = normalizeLonMinus180Inclusive(part.getX(i));
|
||||
}
|
||||
lines.add(new Line(lons, lats));
|
||||
}
|
||||
}
|
||||
return lines;
|
||||
}
|
||||
|
||||
/**
|
||||
* Decompose a linestring given as array of coordinates at a vertical line.
|
||||
*
|
||||
* @param dateline x-axis intercept of the vertical line
|
||||
* @param line linestring that should be decomposed
|
||||
* @return array of linestrings given as coordinate arrays
|
||||
* Calculates how many degres the given longitude needs to be moved east in order to be in -180 - +180. +180 is inclusive only
|
||||
* if include180 is true.
|
||||
*/
|
||||
private List<Line> decompose(double dateline, Line line) {
|
||||
double[] lons = line.getX();
|
||||
double[] lats = line.getY();
|
||||
return decompose(dateline, lons, lats);
|
||||
double calculateShift(double lon, boolean include180) {
|
||||
double normalized = GeoUtils.centeredModulus(lon, 360);
|
||||
double shift = Math.round(normalized - lon);
|
||||
if (!include180 && normalized == 180.0) {
|
||||
shift = shift - 360;
|
||||
}
|
||||
return shift;
|
||||
}
|
||||
|
||||
/**
|
||||
* Decompose a linestring given as two arrays of coordinates at a vertical line.
|
||||
* Decompose a linestring given as array of coordinates by anti-meridian.
|
||||
*
|
||||
* @param line linestring that should be decomposed
|
||||
* @return array of linestrings given as coordinate arrays
|
||||
*/
|
||||
private List<Line> decompose(double dateline, double[] lons, double[] lats) {
|
||||
private List<Line> decompose(Line line) {
|
||||
double[] lons = line.getX();
|
||||
double[] lats = line.getY();
|
||||
int offset = 0;
|
||||
ArrayList<Line> parts = new ArrayList<>();
|
||||
|
||||
double lastLon = lons[0];
|
||||
double shift = lastLon > DATELINE ? DATELINE : (lastLon < -DATELINE ? -DATELINE : 0);
|
||||
|
||||
for (int i = 1; i < lons.length; i++) {
|
||||
double t = intersection(lastLon, lons[i], dateline);
|
||||
lastLon = lons[i];
|
||||
double shift = 0;
|
||||
int i = 1;
|
||||
while (i < lons.length) {
|
||||
// Check where the line is going east (+1), west (-1) or directly north/south (0)
|
||||
int direction = Double.compare(lons[i], lons[i - 1]);
|
||||
double newShift = calculateShift(lons[i - 1], direction < 0);
|
||||
// first point lon + shift is always between -180.0 and +180.0
|
||||
if (i - offset > 1 && newShift != shift) {
|
||||
// Jumping over anti-meridian - we need to start a new segment
|
||||
double[] partLons = Arrays.copyOfRange(lons, offset, i);
|
||||
double[] partLats = Arrays.copyOfRange(lats, offset, i);
|
||||
performShift(shift, partLons);
|
||||
shift = newShift;
|
||||
offset = i - 1;
|
||||
parts.add(new Line(partLons, partLats));
|
||||
} else {
|
||||
// Check if new point intersects with anti-meridian
|
||||
shift = newShift;
|
||||
double t = intersection(lons[i - 1] + shift, lons[i] + shift);
|
||||
if (Double.isNaN(t) == false) {
|
||||
// Found intersection, all previous segments are now part of the linestring
|
||||
double[] partLons = Arrays.copyOfRange(lons, offset, i + 1);
|
||||
double[] partLats = Arrays.copyOfRange(lats, offset, i + 1);
|
||||
if (t < 1) {
|
||||
Point intersection = position(new Point(lons[i - 1], lats[i - 1]), new Point(lons[i], lats[i]), t);
|
||||
partLons[partLons.length - 1] = intersection.getX();
|
||||
partLats[partLats.length - 1] = intersection.getY();
|
||||
|
||||
lons[offset + i - 1] = intersection.getX();
|
||||
lats[offset + i - 1] = intersection.getY();
|
||||
|
||||
shift(shift, partLons);
|
||||
lons[i - 1] = partLons[partLons.length - 1] = (direction > 0 ? DATELINE : -DATELINE) - shift;
|
||||
lats[i - 1] = partLats[partLats.length - 1] = lats[i - 1] + (lats[i] - lats[i - 1]) * t;
|
||||
performShift(shift, partLons);
|
||||
offset = i - 1;
|
||||
shift = lons[i] > DATELINE ? DATELINE : (lons[i] < -DATELINE ? -DATELINE : 0);
|
||||
} else {
|
||||
shift(shift, partLons);
|
||||
offset = i;
|
||||
}
|
||||
parts.add(new Line(partLons, partLats));
|
||||
} else {
|
||||
// Didn't find intersection - just continue checking
|
||||
i++;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (offset == 0) {
|
||||
shift(shift, lons);
|
||||
performShift(shift, lons);
|
||||
parts.add(new Line(lons, lats));
|
||||
} else if (offset < lons.length - 1) {
|
||||
double[] partLons = Arrays.copyOfRange(lons, offset, lons.length);
|
||||
double[] partLats = Arrays.copyOfRange(lats, offset, lats.length);
|
||||
shift(shift, partLons);
|
||||
performShift(shift, partLons);
|
||||
parts.add(new Line(partLons, partLats));
|
||||
}
|
||||
return parts;
|
||||
}
|
||||
|
||||
/**
|
||||
* shifts all coordinates by (- shift * 2)
|
||||
* Checks it the segment from p1x to p2x intersects with anti-meridian
|
||||
* p1x must be with in -180 +180 range
|
||||
*/
|
||||
private static void shift(double shift, double[] lons) {
|
||||
private static double intersection(double p1x, double p2x) {
|
||||
if (p1x == p2x) {
|
||||
return Double.NaN;
|
||||
}
|
||||
final double t = ((p1x < p2x ? DATELINE : -DATELINE) - p1x) / (p2x - p1x);
|
||||
if (t >= 1 || t <= 0) {
|
||||
return Double.NaN;
|
||||
} else {
|
||||
return t;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* shifts all coordinates by shift
|
||||
*/
|
||||
private static void performShift(double shift, double[] lons) {
|
||||
if (shift != 0) {
|
||||
for (int j = 0; j < lons.length; j++) {
|
||||
lons[j] = lons[j] - 2 * shift;
|
||||
lons[j] = lons[j] + shift;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -241,10 +241,10 @@ public class BulkByScrollResponse extends ActionResponse implements ToXContentFr
|
|||
} else if (token == Token.START_OBJECT) {
|
||||
switch (name) {
|
||||
case SearchFailure.REASON_FIELD:
|
||||
bulkExc = ElasticsearchException.fromXContent(parser);
|
||||
searchExc = ElasticsearchException.fromXContent(parser);
|
||||
break;
|
||||
case Failure.CAUSE_FIELD:
|
||||
searchExc = ElasticsearchException.fromXContent(parser);
|
||||
bulkExc = ElasticsearchException.fromXContent(parser);
|
||||
break;
|
||||
default:
|
||||
parser.skipChildren();
|
||||
|
@ -285,7 +285,11 @@ public class BulkByScrollResponse extends ActionResponse implements ToXContentFr
|
|||
if (bulkExc != null) {
|
||||
return new Failure(index, type, id, bulkExc, RestStatus.fromCode(status));
|
||||
} else if (searchExc != null) {
|
||||
if (status == null) {
|
||||
return new SearchFailure(searchExc, index, shardId, nodeId);
|
||||
} else {
|
||||
return new SearchFailure(searchExc, index, shardId, nodeId, RestStatus.fromCode(status));
|
||||
}
|
||||
} else {
|
||||
throw new ElasticsearchParseException("failed to parse failures array. At least one of {reason,cause} must be present");
|
||||
}
|
||||
|
|
|
@ -21,8 +21,10 @@ package org.elasticsearch.index.reindex;
|
|||
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.ExceptionsHelper;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.bulk.BackoffPolicy;
|
||||
import org.elasticsearch.action.bulk.BulkItemResponse;
|
||||
import org.elasticsearch.action.search.ShardSearchFailure;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.Strings;
|
||||
|
@ -35,6 +37,7 @@ import org.elasticsearch.common.xcontent.ToXContentObject;
|
|||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.index.seqno.SequenceNumbers;
|
||||
import org.elasticsearch.rest.RestStatus;
|
||||
import org.elasticsearch.search.SearchHit;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
|
||||
|
@ -356,6 +359,7 @@ public abstract class ScrollableHitSource {
|
|||
*/
|
||||
public static class SearchFailure implements Writeable, ToXContentObject {
|
||||
private final Throwable reason;
|
||||
private final RestStatus status;
|
||||
@Nullable
|
||||
private final String index;
|
||||
@Nullable
|
||||
|
@ -367,12 +371,19 @@ public abstract class ScrollableHitSource {
|
|||
public static final String SHARD_FIELD = "shard";
|
||||
public static final String NODE_FIELD = "node";
|
||||
public static final String REASON_FIELD = "reason";
|
||||
public static final String STATUS_FIELD = BulkItemResponse.Failure.STATUS_FIELD;
|
||||
|
||||
public SearchFailure(Throwable reason, @Nullable String index, @Nullable Integer shardId, @Nullable String nodeId) {
|
||||
this(reason, index, shardId, nodeId, ExceptionsHelper.status(reason));
|
||||
}
|
||||
|
||||
public SearchFailure(Throwable reason, @Nullable String index, @Nullable Integer shardId, @Nullable String nodeId,
|
||||
RestStatus status) {
|
||||
this.index = index;
|
||||
this.shardId = shardId;
|
||||
this.reason = requireNonNull(reason, "reason cannot be null");
|
||||
this.nodeId = nodeId;
|
||||
this.status = status;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -390,6 +401,7 @@ public abstract class ScrollableHitSource {
|
|||
index = in.readOptionalString();
|
||||
shardId = in.readOptionalVInt();
|
||||
nodeId = in.readOptionalString();
|
||||
status = ExceptionsHelper.status(reason);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -408,6 +420,10 @@ public abstract class ScrollableHitSource {
|
|||
return shardId;
|
||||
}
|
||||
|
||||
public RestStatus getStatus() {
|
||||
return this.status;
|
||||
}
|
||||
|
||||
public Throwable getReason() {
|
||||
return reason;
|
||||
}
|
||||
|
@ -429,6 +445,7 @@ public abstract class ScrollableHitSource {
|
|||
if (nodeId != null) {
|
||||
builder.field(NODE_FIELD, nodeId);
|
||||
}
|
||||
builder.field(STATUS_FIELD, status.getStatus());
|
||||
builder.field(REASON_FIELD);
|
||||
{
|
||||
builder.startObject();
|
||||
|
|
|
@ -1678,6 +1678,8 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC
|
|||
* required generation
|
||||
*/
|
||||
public void trimUnreferencedReaders() throws IOException {
|
||||
// move most of the data to disk to reduce the time the lock is held
|
||||
sync();
|
||||
try (ReleasableLock ignored = writeLock.acquire()) {
|
||||
if (closed.get()) {
|
||||
// we're shutdown potentially on some tragic event, don't delete anything
|
||||
|
@ -1705,6 +1707,7 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC
|
|||
// We now update the checkpoint to ignore the file we are going to remove.
|
||||
// Note that there is a provision in recoverFromFiles to allow for the case where we synced the checkpoint
|
||||
// but crashed before we could delete the file.
|
||||
// sync at once to make sure that there's at most one unreferenced generation.
|
||||
current.sync();
|
||||
deleteReaderFiles(reader);
|
||||
}
|
||||
|
|
|
@ -409,10 +409,7 @@ public class RecoverySourceHandler {
|
|||
// TODO: We shouldn't use the generic thread pool here as we already execute this from the generic pool.
|
||||
// While practically unlikely at a min pool size of 128 we could technically block the whole pool by waiting on futures
|
||||
// below and thus make it impossible for the store release to execute which in turn would block the futures forever
|
||||
threadPool.generic().execute(ActionRunnable.wrap(future, l -> {
|
||||
store.decRef();
|
||||
l.onResponse(null);
|
||||
}));
|
||||
threadPool.generic().execute(ActionRunnable.run(future, store::decRef));
|
||||
FutureUtils.get(future);
|
||||
});
|
||||
}
|
||||
|
|
|
@ -490,14 +490,13 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp
|
|||
}, listener::onFailure), 2);
|
||||
|
||||
final Executor executor = threadPool.executor(ThreadPool.Names.SNAPSHOT);
|
||||
executor.execute(ActionRunnable.wrap(groupedListener, l -> {
|
||||
executor.execute(ActionRunnable.supply(groupedListener, () -> {
|
||||
List<String> deletedBlobs = cleanupStaleRootFiles(staleRootBlobs(newRepoData, rootBlobs.keySet()));
|
||||
l.onResponse(
|
||||
new DeleteResult(deletedBlobs.size(), deletedBlobs.stream().mapToLong(name -> rootBlobs.get(name).length()).sum()));
|
||||
return new DeleteResult(deletedBlobs.size(), deletedBlobs.stream().mapToLong(name -> rootBlobs.get(name).length()).sum());
|
||||
}));
|
||||
|
||||
final Set<String> survivingIndexIds = newRepoData.getIndices().values().stream().map(IndexId::getId).collect(Collectors.toSet());
|
||||
executor.execute(ActionRunnable.wrap(groupedListener, l -> l.onResponse(cleanupStaleIndices(foundIndices, survivingIndexIds))));
|
||||
executor.execute(ActionRunnable.supply(groupedListener, () -> cleanupStaleIndices(foundIndices, survivingIndexIds)));
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -712,26 +711,22 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp
|
|||
// that decrements the generation it points at
|
||||
|
||||
// Write Global MetaData
|
||||
executor.execute(ActionRunnable.wrap(allMetaListener, l -> {
|
||||
globalMetaDataFormat.write(clusterMetaData, blobContainer(), snapshotId.getUUID(), false);
|
||||
l.onResponse(null);
|
||||
}));
|
||||
executor.execute(ActionRunnable.run(allMetaListener,
|
||||
() -> globalMetaDataFormat.write(clusterMetaData, blobContainer(), snapshotId.getUUID(), false)));
|
||||
|
||||
// write the index metadata for each index in the snapshot
|
||||
for (IndexId index : indices) {
|
||||
executor.execute(ActionRunnable.wrap(allMetaListener, l -> {
|
||||
indexMetaDataFormat.write(clusterMetaData.index(index.getName()), indexContainer(index), snapshotId.getUUID(), false);
|
||||
l.onResponse(null);
|
||||
}));
|
||||
executor.execute(ActionRunnable.run(allMetaListener, () ->
|
||||
indexMetaDataFormat.write(clusterMetaData.index(index.getName()), indexContainer(index), snapshotId.getUUID(), false)));
|
||||
}
|
||||
|
||||
executor.execute(ActionRunnable.wrap(allMetaListener, afterMetaListener -> {
|
||||
executor.execute(ActionRunnable.supply(allMetaListener, () -> {
|
||||
final SnapshotInfo snapshotInfo = new SnapshotInfo(snapshotId,
|
||||
indices.stream().map(IndexId::getName).collect(Collectors.toList()),
|
||||
startTime, failure, threadPool.absoluteTimeInMillis(), totalShards, shardFailures,
|
||||
includeGlobalState, userMetadata);
|
||||
snapshotFormat.write(snapshotInfo, blobContainer(), snapshotId.getUUID(), false);
|
||||
afterMetaListener.onResponse(snapshotInfo);
|
||||
return snapshotInfo;
|
||||
}));
|
||||
}
|
||||
|
||||
|
|
|
@ -341,7 +341,7 @@ public class SearchService extends AbstractLifecycleComponent implements IndexEv
|
|||
}
|
||||
|
||||
private <T> void runAsync(long id, Supplier<T> executable, ActionListener<T> listener) {
|
||||
getExecutor(id).execute(ActionRunnable.wrap(listener, l -> l.onResponse(executable.get())));
|
||||
getExecutor(id).execute(ActionRunnable.supply(listener, executable::get));
|
||||
}
|
||||
|
||||
private SearchPhaseResult executeQueryPhase(ShardSearchRequest request, SearchTask task) throws Exception {
|
||||
|
@ -1053,7 +1053,7 @@ public class SearchService extends AbstractLifecycleComponent implements IndexEv
|
|||
Executor executor = getExecutor(shard);
|
||||
ActionListener<Rewriteable> actionListener = ActionListener.wrap(r ->
|
||||
// now we need to check if there is a pending refresh and register
|
||||
shard.awaitShardSearchActive(b -> executor.execute(ActionRunnable.wrap(listener, l -> l.onResponse(request)))),
|
||||
shard.awaitShardSearchActive(b -> executor.execute(ActionRunnable.supply(listener, () -> request))),
|
||||
listener::onFailure);
|
||||
// we also do rewrite on the coordinating node (TransportSearchService) but we also need to do it here for BWC as well as
|
||||
// AliasFilters that might need to be rewritten. These are edge-cases but we are every efficient doing the rewrite here so it's not
|
||||
|
|
|
@ -45,7 +45,7 @@ public final class InnerHitsFetchSubPhase implements FetchSubPhase {
|
|||
|
||||
@Override
|
||||
public void hitsExecute(SearchContext context, SearchHit[] hits) throws IOException {
|
||||
if (context.innerHits().isEmpty()) {
|
||||
if ((context.innerHits() != null && context.innerHits().size() > 0) == false) {
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -72,6 +72,10 @@ public final class InnerHitsFetchSubPhase implements FetchSubPhase {
|
|||
}
|
||||
innerHits.docIdsToLoad(docIdsToLoad, 0, docIdsToLoad.length);
|
||||
innerHits.setUid(new Uid(hit.getType(), hit.getId()));
|
||||
innerHits.lookup().source().setSource(context.lookup().source().internalSourceRef());
|
||||
if (context.lookup().source().source() != null) {
|
||||
innerHits.lookup().source().setSource(context.lookup().source().source());
|
||||
}
|
||||
fetchPhase.execute(innerHits);
|
||||
FetchSearchResult fetchResult = innerHits.fetchResult();
|
||||
SearchHit[] internalHits = fetchResult.fetchResult().hits().getHits();
|
||||
|
|
|
@ -37,11 +37,11 @@ import org.elasticsearch.common.settings.Setting;
|
|||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.text.Text;
|
||||
import org.elasticsearch.index.mapper.MappedFieldType;
|
||||
import org.elasticsearch.index.query.QueryShardContext;
|
||||
import org.elasticsearch.search.fetch.FetchPhaseExecutionException;
|
||||
import org.elasticsearch.search.fetch.FetchSubPhase;
|
||||
import org.elasticsearch.search.fetch.subphase.highlight.SearchContextHighlight.Field;
|
||||
import org.elasticsearch.search.fetch.subphase.highlight.SearchContextHighlight.FieldOptions;
|
||||
import org.elasticsearch.search.internal.SearchContext;
|
||||
|
||||
import java.text.BreakIterator;
|
||||
import java.util.Collections;
|
||||
|
@ -69,7 +69,7 @@ public class FastVectorHighlighter implements Highlighter {
|
|||
@Override
|
||||
public HighlightField highlight(HighlighterContext highlighterContext) {
|
||||
SearchContextHighlight.Field field = highlighterContext.field;
|
||||
SearchContext context = highlighterContext.context;
|
||||
QueryShardContext context = highlighterContext.context;
|
||||
FetchSubPhase.HitContext hitContext = highlighterContext.hitContext;
|
||||
MappedFieldType fieldType = highlighterContext.fieldType;
|
||||
|
||||
|
@ -93,7 +93,7 @@ public class FastVectorHighlighter implements Highlighter {
|
|||
BaseFragmentsBuilder fragmentsBuilder;
|
||||
|
||||
final BoundaryScanner boundaryScanner = getBoundaryScanner(field);
|
||||
boolean forceSource = context.highlight().forceSource(field);
|
||||
boolean forceSource = highlighterContext.highlight.forceSource(field);
|
||||
if (field.fieldOptions().numberOfFragments() == 0) {
|
||||
fragListBuilder = new SingleFragListBuilder();
|
||||
|
||||
|
@ -203,7 +203,7 @@ public class FastVectorHighlighter implements Highlighter {
|
|||
return null;
|
||||
|
||||
} catch (Exception e) {
|
||||
throw new FetchPhaseExecutionException(context.shardTarget(),
|
||||
throw new FetchPhaseExecutionException(highlighterContext.shardTarget,
|
||||
"Failed to highlight field [" + highlighterContext.fieldName + "]", e);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -25,6 +25,8 @@ import org.elasticsearch.index.mapper.KeywordFieldMapper;
|
|||
import org.elasticsearch.index.mapper.MappedFieldType;
|
||||
import org.elasticsearch.index.mapper.SourceFieldMapper;
|
||||
import org.elasticsearch.index.mapper.TextFieldMapper;
|
||||
import org.elasticsearch.index.query.QueryShardContext;
|
||||
import org.elasticsearch.search.SearchShardTarget;
|
||||
import org.elasticsearch.search.fetch.FetchSubPhase;
|
||||
import org.elasticsearch.search.internal.SearchContext;
|
||||
|
||||
|
@ -45,17 +47,25 @@ public class HighlightPhase implements FetchSubPhase {
|
|||
if (context.highlight() == null) {
|
||||
return;
|
||||
}
|
||||
hitExecute(context.shardTarget(), context.getQueryShardContext(), context.parsedQuery().query(), context.highlight(), hitContext);
|
||||
}
|
||||
|
||||
public void hitExecute(SearchShardTarget shardTarget,
|
||||
QueryShardContext context,
|
||||
Query query,
|
||||
SearchContextHighlight highlight,
|
||||
HitContext hitContext) {
|
||||
Map<String, HighlightField> highlightFields = new HashMap<>();
|
||||
for (SearchContextHighlight.Field field : context.highlight().fields()) {
|
||||
for (SearchContextHighlight.Field field : highlight.fields()) {
|
||||
Collection<String> fieldNamesToHighlight;
|
||||
if (Regex.isSimpleMatchPattern(field.field())) {
|
||||
fieldNamesToHighlight = context.mapperService().simpleMatchToFullName(field.field());
|
||||
fieldNamesToHighlight = context.getMapperService().simpleMatchToFullName(field.field());
|
||||
} else {
|
||||
fieldNamesToHighlight = Collections.singletonList(field.field());
|
||||
}
|
||||
|
||||
if (context.highlight().forceSource(field)) {
|
||||
SourceFieldMapper sourceFieldMapper = context.mapperService().documentMapper(hitContext.hit().getType()).sourceMapper();
|
||||
if (highlight.forceSource(field)) {
|
||||
SourceFieldMapper sourceFieldMapper = context.getMapperService().documentMapper(hitContext.hit().getType()).sourceMapper();
|
||||
if (!sourceFieldMapper.enabled()) {
|
||||
throw new IllegalArgumentException("source is forced for fields " + fieldNamesToHighlight
|
||||
+ " but type [" + hitContext.hit().getType() + "] has disabled _source");
|
||||
|
@ -64,7 +74,7 @@ public class HighlightPhase implements FetchSubPhase {
|
|||
|
||||
boolean fieldNameContainsWildcards = field.field().contains("*");
|
||||
for (String fieldName : fieldNamesToHighlight) {
|
||||
MappedFieldType fieldType = context.mapperService().fullName(fieldName);
|
||||
MappedFieldType fieldType = context.getMapperService().fullName(fieldName);
|
||||
if (fieldType == null) {
|
||||
continue;
|
||||
}
|
||||
|
@ -95,10 +105,10 @@ public class HighlightPhase implements FetchSubPhase {
|
|||
|
||||
Query highlightQuery = field.fieldOptions().highlightQuery();
|
||||
if (highlightQuery == null) {
|
||||
highlightQuery = context.parsedQuery().query();
|
||||
highlightQuery = query;
|
||||
}
|
||||
HighlighterContext highlighterContext = new HighlighterContext(fieldType.name(),
|
||||
field, fieldType, context, hitContext, highlightQuery);
|
||||
field, fieldType, shardTarget, context, highlight, hitContext, highlightQuery);
|
||||
|
||||
if ((highlighter.canHighlight(fieldType) == false) && fieldNameContainsWildcards) {
|
||||
// if several fieldnames matched the wildcard then we want to skip those that we cannot highlight
|
||||
|
|
|
@ -23,8 +23,8 @@ import org.apache.lucene.search.highlight.Encoder;
|
|||
import org.apache.lucene.search.highlight.SimpleHTMLEncoder;
|
||||
import org.elasticsearch.index.fieldvisitor.CustomFieldsVisitor;
|
||||
import org.elasticsearch.index.mapper.MappedFieldType;
|
||||
import org.elasticsearch.index.query.QueryShardContext;
|
||||
import org.elasticsearch.search.fetch.FetchSubPhase;
|
||||
import org.elasticsearch.search.internal.SearchContext;
|
||||
import org.elasticsearch.search.lookup.SourceLookup;
|
||||
|
||||
import java.io.IOException;
|
||||
|
@ -46,14 +46,13 @@ public final class HighlightUtils {
|
|||
/**
|
||||
* Load field values for highlighting.
|
||||
*/
|
||||
public static List<Object> loadFieldValues(SearchContextHighlight.Field field,
|
||||
MappedFieldType fieldType,
|
||||
SearchContext searchContext,
|
||||
FetchSubPhase.HitContext hitContext) throws IOException {
|
||||
public static List<Object> loadFieldValues(MappedFieldType fieldType,
|
||||
QueryShardContext context,
|
||||
FetchSubPhase.HitContext hitContext,
|
||||
boolean forceSource) throws IOException {
|
||||
//percolator needs to always load from source, thus it sets the global force source to true
|
||||
boolean forceSource = searchContext.highlight().forceSource(field);
|
||||
List<Object> textsToHighlight;
|
||||
if (!forceSource && fieldType.stored()) {
|
||||
if (forceSource == false && fieldType.stored()) {
|
||||
CustomFieldsVisitor fieldVisitor = new CustomFieldsVisitor(singleton(fieldType.name()), false);
|
||||
hitContext.reader().document(hitContext.docId(), fieldVisitor);
|
||||
textsToHighlight = fieldVisitor.fields().get(fieldType.name());
|
||||
|
@ -62,7 +61,7 @@ public final class HighlightUtils {
|
|||
textsToHighlight = Collections.emptyList();
|
||||
}
|
||||
} else {
|
||||
SourceLookup sourceLookup = searchContext.lookup().source();
|
||||
SourceLookup sourceLookup = context.lookup().source();
|
||||
sourceLookup.setSegmentAndDocument(hitContext.readerContext(), hitContext.docId());
|
||||
textsToHighlight = sourceLookup.extractRawValues(fieldType.name());
|
||||
}
|
||||
|
|
|
@ -20,28 +20,35 @@ package org.elasticsearch.search.fetch.subphase.highlight;
|
|||
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.elasticsearch.index.mapper.MappedFieldType;
|
||||
import org.elasticsearch.index.query.QueryShardContext;
|
||||
import org.elasticsearch.search.SearchShardTarget;
|
||||
import org.elasticsearch.search.fetch.FetchSubPhase;
|
||||
import org.elasticsearch.search.internal.SearchContext;
|
||||
|
||||
public class HighlighterContext {
|
||||
|
||||
public final String fieldName;
|
||||
public final SearchContextHighlight.Field field;
|
||||
public final MappedFieldType fieldType;
|
||||
public final SearchContext context;
|
||||
public final SearchShardTarget shardTarget;
|
||||
public final QueryShardContext context;
|
||||
public final SearchContextHighlight highlight;
|
||||
public final FetchSubPhase.HitContext hitContext;
|
||||
public final Query query;
|
||||
|
||||
public HighlighterContext(String fieldName,
|
||||
SearchContextHighlight.Field field,
|
||||
MappedFieldType fieldType,
|
||||
SearchContext context,
|
||||
SearchShardTarget shardTarget,
|
||||
QueryShardContext context,
|
||||
SearchContextHighlight highlight,
|
||||
FetchSubPhase.HitContext hitContext,
|
||||
Query query) {
|
||||
this.fieldName = fieldName;
|
||||
this.field = field;
|
||||
this.fieldType = fieldType;
|
||||
this.shardTarget = shardTarget;
|
||||
this.context = context;
|
||||
this.highlight = highlight;
|
||||
this.hitContext = hitContext;
|
||||
this.query = query;
|
||||
}
|
||||
|
|
|
@ -37,9 +37,9 @@ import org.elasticsearch.ExceptionsHelper;
|
|||
import org.elasticsearch.common.text.Text;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
import org.elasticsearch.index.mapper.MappedFieldType;
|
||||
import org.elasticsearch.index.query.QueryShardContext;
|
||||
import org.elasticsearch.search.fetch.FetchPhaseExecutionException;
|
||||
import org.elasticsearch.search.fetch.FetchSubPhase;
|
||||
import org.elasticsearch.search.internal.SearchContext;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
|
@ -56,7 +56,7 @@ public class PlainHighlighter implements Highlighter {
|
|||
@Override
|
||||
public HighlightField highlight(HighlighterContext highlighterContext) {
|
||||
SearchContextHighlight.Field field = highlighterContext.field;
|
||||
SearchContext context = highlighterContext.context;
|
||||
QueryShardContext context = highlighterContext.context;
|
||||
FetchSubPhase.HitContext hitContext = highlighterContext.hitContext;
|
||||
MappedFieldType fieldType = highlighterContext.fieldType;
|
||||
|
||||
|
@ -101,18 +101,19 @@ public class PlainHighlighter implements Highlighter {
|
|||
int numberOfFragments = field.fieldOptions().numberOfFragments() == 0 ? 1 : field.fieldOptions().numberOfFragments();
|
||||
ArrayList<TextFragment> fragsList = new ArrayList<>();
|
||||
List<Object> textsToHighlight;
|
||||
Analyzer analyzer = context.mapperService().documentMapper(hitContext.hit().getType()).mappers().indexAnalyzer();
|
||||
final int maxAnalyzedOffset = context.indexShard().indexSettings().getHighlightMaxAnalyzedOffset();
|
||||
Analyzer analyzer = context.getMapperService().documentMapper(hitContext.hit().getType()).mappers().indexAnalyzer();
|
||||
final int maxAnalyzedOffset = context.getIndexSettings().getHighlightMaxAnalyzedOffset();
|
||||
|
||||
try {
|
||||
textsToHighlight = HighlightUtils.loadFieldValues(field, fieldType, context, hitContext);
|
||||
textsToHighlight = HighlightUtils.loadFieldValues(fieldType, context, hitContext,
|
||||
highlighterContext.highlight.forceSource(field));
|
||||
|
||||
for (Object textToHighlight : textsToHighlight) {
|
||||
String text = convertFieldValue(fieldType, textToHighlight);
|
||||
if (text.length() > maxAnalyzedOffset) {
|
||||
throw new IllegalArgumentException(
|
||||
"The length of [" + highlighterContext.fieldName + "] field of [" + hitContext.hit().getId() +
|
||||
"] doc of [" + context.indexShard().shardId().getIndexName() + "] index " +
|
||||
"] doc of [" + context.index().getName() + "] index " +
|
||||
"has exceeded [" + maxAnalyzedOffset + "] - maximum allowed to be analyzed for highlighting. " +
|
||||
"This maximum can be set by changing the [" + IndexSettings.MAX_ANALYZED_OFFSET_SETTING.getKey() +
|
||||
"] index level setting. " + "For large texts, indexing with offsets or term vectors, and highlighting " +
|
||||
|
@ -139,7 +140,7 @@ public class PlainHighlighter implements Highlighter {
|
|||
// the plain highlighter will parse the source and try to analyze it.
|
||||
return null;
|
||||
} else {
|
||||
throw new FetchPhaseExecutionException(context.shardTarget(),
|
||||
throw new FetchPhaseExecutionException(highlighterContext.shardTarget,
|
||||
"Failed to highlight field [" + highlighterContext.fieldName + "]", e);
|
||||
}
|
||||
}
|
||||
|
@ -179,7 +180,7 @@ public class PlainHighlighter implements Highlighter {
|
|||
try {
|
||||
end = findGoodEndForNoHighlightExcerpt(noMatchSize, analyzer, fieldType.name(), fieldContents);
|
||||
} catch (Exception e) {
|
||||
throw new FetchPhaseExecutionException(context.shardTarget(),
|
||||
throw new FetchPhaseExecutionException(highlighterContext.shardTarget,
|
||||
"Failed to highlight field [" + highlighterContext.fieldName + "]", e);
|
||||
}
|
||||
if (end > 0) {
|
||||
|
|
|
@ -27,7 +27,7 @@ import org.apache.lucene.search.vectorhighlight.BoundaryScanner;
|
|||
import org.apache.lucene.search.vectorhighlight.FieldFragList.WeightedFragInfo;
|
||||
import org.apache.lucene.search.vectorhighlight.ScoreOrderFragmentsBuilder;
|
||||
import org.elasticsearch.index.mapper.MappedFieldType;
|
||||
import org.elasticsearch.search.internal.SearchContext;
|
||||
import org.elasticsearch.index.query.QueryShardContext;
|
||||
import org.elasticsearch.search.lookup.SourceLookup;
|
||||
|
||||
import java.io.IOException;
|
||||
|
@ -37,22 +37,22 @@ public class SourceScoreOrderFragmentsBuilder extends ScoreOrderFragmentsBuilder
|
|||
|
||||
private final MappedFieldType fieldType;
|
||||
|
||||
private final SearchContext searchContext;
|
||||
private final QueryShardContext context;
|
||||
|
||||
public SourceScoreOrderFragmentsBuilder(MappedFieldType fieldType,
|
||||
SearchContext searchContext,
|
||||
QueryShardContext context,
|
||||
String[] preTags,
|
||||
String[] postTags,
|
||||
BoundaryScanner boundaryScanner) {
|
||||
super(preTags, postTags, boundaryScanner);
|
||||
this.fieldType = fieldType;
|
||||
this.searchContext = searchContext;
|
||||
this.context = context;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Field[] getFields(IndexReader reader, int docId, String fieldName) throws IOException {
|
||||
// we know its low level reader, and matching docId, since that's how we call the highlighter with
|
||||
SourceLookup sourceLookup = searchContext.lookup().source();
|
||||
SourceLookup sourceLookup = context.lookup().source();
|
||||
sourceLookup.setSegmentAndDocument((LeafReaderContext) reader.getContext(), docId);
|
||||
|
||||
List<Object> values = sourceLookup.extractRawValues(fieldType.name());
|
||||
|
|
|
@ -24,7 +24,7 @@ import org.apache.lucene.index.IndexReader;
|
|||
import org.apache.lucene.index.LeafReaderContext;
|
||||
import org.apache.lucene.search.vectorhighlight.BoundaryScanner;
|
||||
import org.elasticsearch.index.mapper.MappedFieldType;
|
||||
import org.elasticsearch.search.internal.SearchContext;
|
||||
import org.elasticsearch.index.query.QueryShardContext;
|
||||
import org.elasticsearch.search.lookup.SourceLookup;
|
||||
|
||||
import java.io.IOException;
|
||||
|
@ -32,15 +32,15 @@ import java.util.List;
|
|||
|
||||
public class SourceSimpleFragmentsBuilder extends SimpleFragmentsBuilder {
|
||||
|
||||
private final SearchContext searchContext;
|
||||
private final QueryShardContext context;
|
||||
|
||||
public SourceSimpleFragmentsBuilder(MappedFieldType fieldType,
|
||||
SearchContext searchContext,
|
||||
QueryShardContext context,
|
||||
String[] preTags,
|
||||
String[] postTags,
|
||||
BoundaryScanner boundaryScanner) {
|
||||
super(fieldType, preTags, postTags, boundaryScanner);
|
||||
this.searchContext = searchContext;
|
||||
this.context = context;
|
||||
}
|
||||
|
||||
public static final Field[] EMPTY_FIELDS = new Field[0];
|
||||
|
@ -48,7 +48,7 @@ public class SourceSimpleFragmentsBuilder extends SimpleFragmentsBuilder {
|
|||
@Override
|
||||
protected Field[] getFields(IndexReader reader, int docId, String fieldName) throws IOException {
|
||||
// we know its low level reader, and matching docId, since that's how we call the highlighter with
|
||||
SourceLookup sourceLookup = searchContext.lookup().source();
|
||||
SourceLookup sourceLookup = context.lookup().source();
|
||||
sourceLookup.setSegmentAndDocument((LeafReaderContext) reader.getContext(), docId);
|
||||
|
||||
List<Object> values = sourceLookup.extractRawValues(fieldType.name());
|
||||
|
|
|
@ -37,10 +37,10 @@ import org.elasticsearch.index.IndexSettings;
|
|||
import org.elasticsearch.index.mapper.DocumentMapper;
|
||||
import org.elasticsearch.index.mapper.IdFieldMapper;
|
||||
import org.elasticsearch.index.mapper.MappedFieldType;
|
||||
import org.elasticsearch.index.query.QueryShardContext;
|
||||
import org.elasticsearch.search.fetch.FetchPhaseExecutionException;
|
||||
import org.elasticsearch.search.fetch.FetchSubPhase;
|
||||
import org.elasticsearch.search.fetch.FetchSubPhase.HitContext;
|
||||
import org.elasticsearch.search.internal.SearchContext;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.text.BreakIterator;
|
||||
|
@ -61,18 +61,19 @@ public class UnifiedHighlighter implements Highlighter {
|
|||
public HighlightField highlight(HighlighterContext highlighterContext) {
|
||||
MappedFieldType fieldType = highlighterContext.fieldType;
|
||||
SearchContextHighlight.Field field = highlighterContext.field;
|
||||
SearchContext context = highlighterContext.context;
|
||||
QueryShardContext context = highlighterContext.context;
|
||||
FetchSubPhase.HitContext hitContext = highlighterContext.hitContext;
|
||||
Encoder encoder = field.fieldOptions().encoder().equals("html") ? HighlightUtils.Encoders.HTML : HighlightUtils.Encoders.DEFAULT;
|
||||
final int maxAnalyzedOffset = context.indexShard().indexSettings().getHighlightMaxAnalyzedOffset();
|
||||
final int maxAnalyzedOffset = context.getIndexSettings().getHighlightMaxAnalyzedOffset();
|
||||
|
||||
List<Snippet> snippets = new ArrayList<>();
|
||||
int numberOfFragments;
|
||||
try {
|
||||
|
||||
final Analyzer analyzer = getAnalyzer(context.mapperService().documentMapper(hitContext.hit().getType()),
|
||||
final Analyzer analyzer = getAnalyzer(context.getMapperService().documentMapper(hitContext.hit().getType()),
|
||||
hitContext);
|
||||
List<Object> fieldValues = loadFieldValues(fieldType, field, context, hitContext);
|
||||
List<Object> fieldValues = loadFieldValues(fieldType, field, context, hitContext,
|
||||
highlighterContext.highlight.forceSource(field));
|
||||
if (fieldValues.size() == 0) {
|
||||
return null;
|
||||
}
|
||||
|
@ -84,7 +85,7 @@ public class UnifiedHighlighter implements Highlighter {
|
|||
if ((offsetSource == OffsetSource.ANALYSIS) && (fieldValue.length() > maxAnalyzedOffset)) {
|
||||
throw new IllegalArgumentException(
|
||||
"The length of [" + highlighterContext.fieldName + "] field of [" + hitContext.hit().getId() +
|
||||
"] doc of [" + context.indexShard().shardId().getIndexName() + "] index " + "has exceeded [" +
|
||||
"] doc of [" + context.index().getName() + "] index " + "has exceeded [" +
|
||||
maxAnalyzedOffset + "] - maximum allowed to be analyzed for highlighting. " +
|
||||
"This maximum can be set by changing the [" + IndexSettings.MAX_ANALYZED_OFFSET_SETTING.getKey() +
|
||||
"] index level setting. " + "For large texts, indexing with offsets or term vectors is recommended!");
|
||||
|
@ -123,7 +124,7 @@ public class UnifiedHighlighter implements Highlighter {
|
|||
}
|
||||
}
|
||||
} catch (IOException e) {
|
||||
throw new FetchPhaseExecutionException(context.shardTarget(),
|
||||
throw new FetchPhaseExecutionException(highlighterContext.shardTarget,
|
||||
"Failed to highlight field [" + highlighterContext.fieldName + "]", e);
|
||||
}
|
||||
|
||||
|
@ -154,9 +155,12 @@ public class UnifiedHighlighter implements Highlighter {
|
|||
return docMapper.mappers().indexAnalyzer();
|
||||
}
|
||||
|
||||
protected List<Object> loadFieldValues(MappedFieldType fieldType, SearchContextHighlight.Field field, SearchContext context,
|
||||
FetchSubPhase.HitContext hitContext) throws IOException {
|
||||
List<Object> fieldValues = HighlightUtils.loadFieldValues(field, fieldType, context, hitContext);
|
||||
protected List<Object> loadFieldValues(MappedFieldType fieldType,
|
||||
SearchContextHighlight.Field field,
|
||||
QueryShardContext context,
|
||||
FetchSubPhase.HitContext hitContext,
|
||||
boolean forceSource) throws IOException {
|
||||
List<Object> fieldValues = HighlightUtils.loadFieldValues(fieldType, context, hitContext, forceSource);
|
||||
fieldValues = fieldValues.stream()
|
||||
.map((s) -> convertFieldValue(fieldType, s))
|
||||
.collect(Collectors.toList());
|
||||
|
|
|
@ -31,6 +31,7 @@ import org.elasticsearch.search.fetch.subphase.DocValueFieldsContext;
|
|||
import org.elasticsearch.search.fetch.subphase.FetchSourceContext;
|
||||
import org.elasticsearch.search.fetch.subphase.ScriptFieldsContext;
|
||||
import org.elasticsearch.search.fetch.subphase.highlight.SearchContextHighlight;
|
||||
import org.elasticsearch.search.lookup.SearchLookup;
|
||||
import org.elasticsearch.search.query.QuerySearchResult;
|
||||
import org.elasticsearch.search.rescore.RescoreContext;
|
||||
import org.elasticsearch.search.sort.SortAndFormats;
|
||||
|
@ -381,4 +382,9 @@ public class SubSearchContext extends FilteredSearchContext {
|
|||
public void innerHits(Map<String, InnerHitContextBuilder> innerHits) {
|
||||
this.innerHits = innerHits;
|
||||
}
|
||||
|
||||
@Override
|
||||
public SearchLookup lookup() {
|
||||
return queryShardContext.lookup();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -163,7 +163,7 @@ public class AbstractSearchAsyncActionTests extends ESTestCase {
|
|||
new ArraySearchPhaseResults<>(10), null, false, new AtomicLong());
|
||||
String scrollId = randomBoolean() ? null : randomAlphaOfLengthBetween(5, 10);
|
||||
InternalSearchResponse internalSearchResponse = InternalSearchResponse.empty();
|
||||
SearchResponse searchResponse = action.buildSearchResponse(internalSearchResponse, scrollId);
|
||||
SearchResponse searchResponse = action.buildSearchResponse(internalSearchResponse, scrollId, action.buildShardFailures());
|
||||
assertEquals(scrollId, searchResponse.getScrollId());
|
||||
assertSame(searchResponse.getAggregations(), internalSearchResponse.aggregations());
|
||||
assertSame(searchResponse.getSuggest(), internalSearchResponse.suggest());
|
||||
|
@ -179,7 +179,7 @@ public class AbstractSearchAsyncActionTests extends ESTestCase {
|
|||
new IllegalArgumentException());
|
||||
String scrollId = randomBoolean() ? null : randomAlphaOfLengthBetween(5, 10);
|
||||
InternalSearchResponse internalSearchResponse = InternalSearchResponse.empty();
|
||||
SearchResponse searchResponse = action.buildSearchResponse(internalSearchResponse, scrollId);
|
||||
SearchResponse searchResponse = action.buildSearchResponse(internalSearchResponse, scrollId, action.buildShardFailures());
|
||||
assertEquals(scrollId, searchResponse.getScrollId());
|
||||
assertSame(searchResponse.getAggregations(), internalSearchResponse.aggregations());
|
||||
assertSame(searchResponse.getSuggest(), internalSearchResponse.suggest());
|
||||
|
@ -187,7 +187,7 @@ public class AbstractSearchAsyncActionTests extends ESTestCase {
|
|||
assertSame(searchResponse.getHits(), internalSearchResponse.hits());
|
||||
}
|
||||
|
||||
public void testBuildSearchResponseDisallowPartialFailures() {
|
||||
public void testSendSearchResponseDisallowPartialFailures() {
|
||||
SearchRequest searchRequest = new SearchRequest().allowPartialSearchResults(false);
|
||||
AtomicReference<Exception> exception = new AtomicReference<>();
|
||||
ActionListener<SearchResponse> listener = ActionListener.wrap(response -> fail("onResponse should not be called"), exception::set);
|
||||
|
@ -203,7 +203,7 @@ public class AbstractSearchAsyncActionTests extends ESTestCase {
|
|||
action.onShardFailure(i, new SearchShardTarget(failureNodeId, failureShardId, failureClusterAlias, OriginalIndices.NONE),
|
||||
new IllegalArgumentException());
|
||||
}
|
||||
action.buildSearchResponse(InternalSearchResponse.empty(), randomBoolean() ? null : randomAlphaOfLengthBetween(5, 10));
|
||||
action.sendSearchResponse(InternalSearchResponse.empty(), randomBoolean() ? null : randomAlphaOfLengthBetween(5, 10));
|
||||
assertThat(exception.get(), instanceOf(SearchPhaseExecutionException.class));
|
||||
SearchPhaseExecutionException searchPhaseExecutionException = (SearchPhaseExecutionException)exception.get();
|
||||
assertEquals(0, searchPhaseExecutionException.getSuppressed().length);
|
||||
|
|
|
@ -148,7 +148,7 @@ public class SearchAsyncActionTests extends ESTestCase {
|
|||
asyncAction.start();
|
||||
latch.await();
|
||||
assertTrue(searchPhaseDidRun.get());
|
||||
SearchResponse searchResponse = asyncAction.buildSearchResponse(null, null);
|
||||
SearchResponse searchResponse = asyncAction.buildSearchResponse(null, null, asyncAction.buildShardFailures());
|
||||
assertEquals(shardsIter.size() - numSkipped, numRequests.get());
|
||||
assertEquals(0, searchResponse.getFailedShards());
|
||||
assertEquals(numSkipped, searchResponse.getSkippedShards());
|
||||
|
|
|
@ -22,6 +22,7 @@ package org.elasticsearch.common.geo;
|
|||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentFactory;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.geo.GeometryTestUtils;
|
||||
import org.elasticsearch.geometry.Circle;
|
||||
import org.elasticsearch.geometry.Geometry;
|
||||
import org.elasticsearch.geometry.GeometryCollection;
|
||||
|
@ -32,7 +33,6 @@ import org.elasticsearch.geometry.MultiPoint;
|
|||
import org.elasticsearch.geometry.MultiPolygon;
|
||||
import org.elasticsearch.geometry.Point;
|
||||
import org.elasticsearch.geometry.Polygon;
|
||||
import org.elasticsearch.geometry.utils.WellKnownText;
|
||||
import org.elasticsearch.index.mapper.GeoShapeIndexer;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
|
||||
|
@ -41,12 +41,11 @@ import java.text.ParseException;
|
|||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
|
||||
import static org.hamcrest.Matchers.instanceOf;
|
||||
|
||||
public class GeometryIndexerTests extends ESTestCase {
|
||||
|
||||
GeoShapeIndexer indexer = new GeoShapeIndexer(true, "test");
|
||||
private static final WellKnownText WKT = new WellKnownText(true, geometry -> {
|
||||
});
|
||||
|
||||
|
||||
public void testCircle() {
|
||||
UnsupportedOperationException ex =
|
||||
|
@ -105,10 +104,96 @@ public class GeometryIndexerTests extends ESTestCase {
|
|||
new Line(new double[]{160, 180}, new double[]{0, 5}),
|
||||
new Line(new double[]{-180, -160, -180}, new double[]{5, 10, 15}),
|
||||
new Line(new double[]{180, 160}, new double[]{15, 20})
|
||||
)
|
||||
);
|
||||
));
|
||||
|
||||
assertEquals(indexed, indexer.prepareForIndexing(line));
|
||||
|
||||
line = new Line(new double[]{0, 720}, new double[]{0, 20});
|
||||
indexed = new MultiLine(Arrays.asList(
|
||||
new Line(new double[]{0, 180}, new double[]{0, 5}),
|
||||
new Line(new double[]{-180, 180}, new double[]{5, 15}),
|
||||
new Line(new double[]{-180, 0}, new double[]{15, 20})
|
||||
));
|
||||
|
||||
assertEquals(indexed, indexer.prepareForIndexing(line));
|
||||
|
||||
line = new Line(new double[]{160, 180, 180, 200, 160, 140}, new double[]{0, 10, 20, 30, 30, 40});
|
||||
indexed = new MultiLine(Arrays.asList(
|
||||
new Line(new double[]{160, 180}, new double[]{0, 10}),
|
||||
new Line(new double[]{-180, -180, -160, -180}, new double[]{10, 20, 30, 30}),
|
||||
new Line(new double[]{180, 160, 140}, new double[]{30, 30, 40})
|
||||
));
|
||||
|
||||
assertEquals(indexed, indexer.prepareForIndexing(line));
|
||||
|
||||
line = new Line(new double[]{-70, 180, 900}, new double[]{0, 0, 4});
|
||||
|
||||
indexed = new MultiLine(Arrays.asList(
|
||||
new Line(new double[]{-70, 180}, new double[]{0, 0}),
|
||||
new Line(new double[]{-180, 180}, new double[]{0, 2}),
|
||||
new Line(new double[]{-180, 180}, new double[]{2, 4})
|
||||
));
|
||||
|
||||
assertEquals(indexed, indexer.prepareForIndexing(line));
|
||||
|
||||
line = new Line(new double[]{160, 200, 160, 200, 160, 200}, new double[]{0, 10, 20, 30, 40, 50});
|
||||
|
||||
indexed = new MultiLine(Arrays.asList(
|
||||
new Line(new double[]{160, 180}, new double[]{0, 5}),
|
||||
new Line(new double[]{-180, -160, -180}, new double[]{5, 10, 15}),
|
||||
new Line(new double[]{180, 160, 180}, new double[]{15, 20, 25}),
|
||||
new Line(new double[]{-180, -160, -180}, new double[]{25, 30, 35}),
|
||||
new Line(new double[]{180, 160, 180}, new double[]{35, 40, 45}),
|
||||
new Line(new double[]{-180, -160}, new double[]{45, 50})
|
||||
));
|
||||
|
||||
assertEquals(indexed, indexer.prepareForIndexing(line));
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns a sum of Euclidean distances between points in the linestring.
|
||||
*/
|
||||
public double length(Line line) {
|
||||
double distance = 0;
|
||||
for (int i = 1; i < line.length(); i++) {
|
||||
distance += Math.sqrt((line.getLat(i) - line.getLat(i - 1)) * (line.getLat(i) - line.getLat(i - 1)) +
|
||||
(line.getLon(i) - line.getLon(i - 1)) * (line.getLon(i) - line.getLon(i - 1)));
|
||||
}
|
||||
return distance;
|
||||
}
|
||||
|
||||
/**
|
||||
* A simple tests that generates a random lines crossing anti-merdian and checks that the decomposed segments of this line
|
||||
* have the same total length (measured using Euclidean distances between neighboring points) as the original line.
|
||||
*/
|
||||
public void testRandomLine() {
|
||||
int size = randomIntBetween(2, 20);
|
||||
int shift = randomIntBetween(-2, 2);
|
||||
double[] originalLats = new double[size];
|
||||
double[] originalLons = new double[size];
|
||||
|
||||
for (int i = 0; i < size; i++) {
|
||||
originalLats[i] = GeometryTestUtils.randomLat();
|
||||
originalLons[i] = GeometryTestUtils.randomLon() + shift * 360;
|
||||
if (randomInt(3) == 0) {
|
||||
shift += randomFrom(-2, -1, 1, 2);
|
||||
}
|
||||
}
|
||||
Line original = new Line(originalLons, originalLats);
|
||||
|
||||
Geometry decomposed = indexer.prepareForIndexing(original);
|
||||
double decomposedLength = 0;
|
||||
if (decomposed instanceof Line) {
|
||||
decomposedLength = length((Line) decomposed);
|
||||
} else {
|
||||
assertThat(decomposed, instanceOf(MultiLine.class));
|
||||
MultiLine lines = (MultiLine) decomposed;
|
||||
for (int i = 0; i < lines.size(); i++) {
|
||||
decomposedLength += length(lines.get(i));
|
||||
}
|
||||
}
|
||||
|
||||
assertEquals("Different Lengths between " + original + " and " + decomposed, length(original), decomposedLength, 0.001);
|
||||
}
|
||||
|
||||
public void testMultiLine() {
|
||||
|
@ -137,6 +222,15 @@ public class GeometryIndexerTests extends ESTestCase {
|
|||
|
||||
point = new Point(2, 1, 3);
|
||||
assertEquals(indexed, indexer.prepareForIndexing(point));
|
||||
|
||||
point = new Point(362, 1);
|
||||
assertEquals(indexed, indexer.prepareForIndexing(point));
|
||||
|
||||
point = new Point(-178, 179);
|
||||
assertEquals(indexed, indexer.prepareForIndexing(point));
|
||||
|
||||
point = new Point(180, 180);
|
||||
assertEquals(new Point(0, 0), indexer.prepareForIndexing(point));
|
||||
}
|
||||
|
||||
public void testMultiPoint() {
|
||||
|
@ -254,5 +348,4 @@ public class GeometryIndexerTests extends ESTestCase {
|
|||
return geometryParser.parse(parser);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -20,14 +20,16 @@
|
|||
package org.elasticsearch.index.reindex;
|
||||
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.ResourceNotFoundException;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.bulk.BulkItemResponse.Failure;
|
||||
import org.elasticsearch.client.transport.NoNodeAvailableException;
|
||||
import org.elasticsearch.common.io.stream.BytesStreamOutput;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.test.AbstractXContentTestCase;
|
||||
import org.elasticsearch.index.reindex.BulkByScrollTask.Status;
|
||||
import org.elasticsearch.test.AbstractXContentTestCase;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.HashMap;
|
||||
|
@ -38,12 +40,12 @@ import static java.util.Collections.emptyList;
|
|||
import static java.util.Collections.singletonList;
|
||||
import static org.apache.lucene.util.TestUtil.randomSimpleString;
|
||||
import static org.elasticsearch.common.unit.TimeValue.timeValueMillis;
|
||||
import static org.hamcrest.Matchers.containsString;
|
||||
|
||||
public class BulkByScrollResponseTests extends AbstractXContentTestCase<BulkByScrollResponse> {
|
||||
|
||||
private boolean includeUpdated;
|
||||
private boolean includeCreated;
|
||||
private boolean testExceptions = randomBoolean();
|
||||
|
||||
public void testRountTrip() throws IOException {
|
||||
BulkByScrollResponse response = new BulkByScrollResponse(timeValueMillis(randomNonNegativeLong()),
|
||||
|
@ -76,7 +78,9 @@ public class BulkByScrollResponseTests extends AbstractXContentTestCase<BulkBySc
|
|||
shardId = randomInt();
|
||||
nodeId = usually() ? randomAlphaOfLength(5) : null;
|
||||
}
|
||||
return singletonList(new ScrollableHitSource.SearchFailure(new ElasticsearchException("foo"), index, shardId, nodeId));
|
||||
ElasticsearchException exception = randomFrom(new ResourceNotFoundException("bar"), new ElasticsearchException("foo"),
|
||||
new NoNodeAvailableException("baz"));
|
||||
return singletonList(new ScrollableHitSource.SearchFailure(exception, index, shardId, nodeId));
|
||||
}
|
||||
|
||||
private void assertResponseEquals(BulkByScrollResponse expected, BulkByScrollResponse actual) {
|
||||
|
@ -101,14 +105,14 @@ public class BulkByScrollResponseTests extends AbstractXContentTestCase<BulkBySc
|
|||
assertEquals(expectedFailure.getNodeId(), actualFailure.getNodeId());
|
||||
assertEquals(expectedFailure.getReason().getClass(), actualFailure.getReason().getClass());
|
||||
assertEquals(expectedFailure.getReason().getMessage(), actualFailure.getReason().getMessage());
|
||||
assertEquals(expectedFailure.getStatus(), actualFailure.getStatus());
|
||||
}
|
||||
}
|
||||
|
||||
public static void assertEqualBulkResponse(BulkByScrollResponse expected, BulkByScrollResponse actual,
|
||||
boolean includeUpdated, boolean includeCreated) {
|
||||
public static void assertEqualBulkResponse(BulkByScrollResponse expected, BulkByScrollResponse actual, boolean includeUpdated,
|
||||
boolean includeCreated) {
|
||||
assertEquals(expected.getTook(), actual.getTook());
|
||||
BulkByScrollTaskStatusTests
|
||||
.assertEqualStatus(expected.getStatus(), actual.getStatus(), includeUpdated, includeCreated);
|
||||
BulkByScrollTaskStatusTests.assertEqualStatus(expected.getStatus(), actual.getStatus(), includeUpdated, includeCreated);
|
||||
assertEquals(expected.getBulkFailures().size(), actual.getBulkFailures().size());
|
||||
for (int i = 0; i < expected.getBulkFailures().size(); i++) {
|
||||
Failure expectedFailure = expected.getBulkFailures().get(i);
|
||||
|
@ -116,7 +120,6 @@ public class BulkByScrollResponseTests extends AbstractXContentTestCase<BulkBySc
|
|||
assertEquals(expectedFailure.getIndex(), actualFailure.getIndex());
|
||||
assertEquals(expectedFailure.getType(), actualFailure.getType());
|
||||
assertEquals(expectedFailure.getId(), actualFailure.getId());
|
||||
assertThat(expectedFailure.getMessage(), containsString(actualFailure.getMessage()));
|
||||
assertEquals(expectedFailure.getStatus(), actualFailure.getStatus());
|
||||
}
|
||||
assertEquals(expected.getSearchFailures().size(), actual.getSearchFailures().size());
|
||||
|
@ -126,7 +129,7 @@ public class BulkByScrollResponseTests extends AbstractXContentTestCase<BulkBySc
|
|||
assertEquals(expectedFailure.getIndex(), actualFailure.getIndex());
|
||||
assertEquals(expectedFailure.getShardId(), actualFailure.getShardId());
|
||||
assertEquals(expectedFailure.getNodeId(), actualFailure.getNodeId());
|
||||
assertThat(expectedFailure.getReason().getMessage(), containsString(actualFailure.getReason().getMessage()));
|
||||
assertEquals(expectedFailure.getStatus(), actualFailure.getStatus());
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -137,12 +140,13 @@ public class BulkByScrollResponseTests extends AbstractXContentTestCase<BulkBySc
|
|||
|
||||
@Override
|
||||
protected BulkByScrollResponse createTestInstance() {
|
||||
// failures are tested separately, so we can test XContent equivalence at least when we have no failures
|
||||
return
|
||||
new BulkByScrollResponse(
|
||||
timeValueMillis(randomNonNegativeLong()), BulkByScrollTaskStatusTests.randomStatusWithoutException(),
|
||||
emptyList(), emptyList(), randomBoolean()
|
||||
);
|
||||
if (testExceptions) {
|
||||
return new BulkByScrollResponse(timeValueMillis(randomNonNegativeLong()), BulkByScrollTaskStatusTests.randomStatus(),
|
||||
randomIndexingFailures(), randomSearchFailures(), randomBoolean());
|
||||
} else {
|
||||
return new BulkByScrollResponse(timeValueMillis(randomNonNegativeLong()),
|
||||
BulkByScrollTaskStatusTests.randomStatusWithoutException(), emptyList(), emptyList(), randomBoolean());
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -150,6 +154,12 @@ public class BulkByScrollResponseTests extends AbstractXContentTestCase<BulkBySc
|
|||
return BulkByScrollResponse.fromXContent(parser);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected boolean assertToXContentEquivalence() {
|
||||
// XContentEquivalence fails in the exception case, due to how exceptions are serialized.
|
||||
return testExceptions == false;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected boolean supportsUnknownFields() {
|
||||
return true;
|
||||
|
|
|
@ -68,3 +68,7 @@ test {
|
|||
task integTest(type: Test) {
|
||||
include "**/*IT.class"
|
||||
}
|
||||
|
||||
tasks.register("verifyVersions") {
|
||||
dependsOn test
|
||||
}
|
||||
|
|
|
@ -81,13 +81,7 @@ public abstract class AbstractThirdPartyRepositoryTestCase extends ESSingleNodeT
|
|||
private void deleteAndAssertEmpty(BlobPath path) throws Exception {
|
||||
final BlobStoreRepository repo = getRepository();
|
||||
final PlainActionFuture<Void> future = PlainActionFuture.newFuture();
|
||||
repo.threadPool().generic().execute(new ActionRunnable<Void>(future) {
|
||||
@Override
|
||||
protected void doRun() throws Exception {
|
||||
repo.blobStore().blobContainer(path).delete();
|
||||
future.onResponse(null);
|
||||
}
|
||||
});
|
||||
repo.threadPool().generic().execute(ActionRunnable.run(future, () -> repo.blobStore().blobContainer(path).delete()));
|
||||
future.actionGet();
|
||||
final BlobPath parent = path.parent();
|
||||
if (parent == null) {
|
||||
|
@ -146,9 +140,7 @@ public abstract class AbstractThirdPartyRepositoryTestCase extends ESSingleNodeT
|
|||
final PlainActionFuture<Void> future = PlainActionFuture.newFuture();
|
||||
final Executor genericExec = repo.threadPool().generic();
|
||||
final int testBlobLen = randomIntBetween(1, 100);
|
||||
genericExec.execute(new ActionRunnable<Void>(future) {
|
||||
@Override
|
||||
protected void doRun() throws Exception {
|
||||
genericExec.execute(ActionRunnable.run(future, () -> {
|
||||
final BlobStore blobStore = repo.blobStore();
|
||||
blobStore.blobContainer(repo.basePath().add("foo"))
|
||||
.writeBlob("nested-blob", new ByteArrayInputStream(randomByteArrayOfLength(testBlobLen)), testBlobLen, false);
|
||||
|
@ -156,9 +148,7 @@ public abstract class AbstractThirdPartyRepositoryTestCase extends ESSingleNodeT
|
|||
.writeBlob("bar", new ByteArrayInputStream(randomByteArrayOfLength(testBlobLen)), testBlobLen, false);
|
||||
blobStore.blobContainer(repo.basePath().add("foo").add("nested2"))
|
||||
.writeBlob("blub", new ByteArrayInputStream(randomByteArrayOfLength(testBlobLen)), testBlobLen, false);
|
||||
future.onResponse(null);
|
||||
}
|
||||
});
|
||||
}));
|
||||
future.actionGet();
|
||||
assertChildren(repo.basePath(), Collections.singleton("foo"));
|
||||
assertBlobsByPrefix(repo.basePath(), "fo", Collections.emptyMap());
|
||||
|
@ -243,37 +233,27 @@ public abstract class AbstractThirdPartyRepositoryTestCase extends ESSingleNodeT
|
|||
|
||||
private void createDanglingIndex(final BlobStoreRepository repo, final Executor genericExec) throws Exception {
|
||||
final PlainActionFuture<Void> future = PlainActionFuture.newFuture();
|
||||
genericExec.execute(new ActionRunnable<Void>(future) {
|
||||
@Override
|
||||
protected void doRun() throws Exception {
|
||||
genericExec.execute(ActionRunnable.run(future, () -> {
|
||||
final BlobStore blobStore = repo.blobStore();
|
||||
blobStore.blobContainer(repo.basePath().add("indices").add("foo"))
|
||||
.writeBlob("bar", new ByteArrayInputStream(new byte[3]), 3, false);
|
||||
for (String prefix : Arrays.asList("snap-", "meta-")) {
|
||||
blobStore.blobContainer(repo.basePath())
|
||||
.writeBlob(prefix + "foo.dat", new ByteArrayInputStream(new byte[3]), 3, false);
|
||||
blobStore.blobContainer(repo.basePath()).writeBlob(prefix + "foo.dat", new ByteArrayInputStream(new byte[3]), 3, false);
|
||||
}
|
||||
future.onResponse(null);
|
||||
}
|
||||
});
|
||||
}));
|
||||
future.actionGet();
|
||||
assertTrue(assertCorruptionVisible(repo, genericExec));
|
||||
}
|
||||
|
||||
protected boolean assertCorruptionVisible(BlobStoreRepository repo, Executor executor) throws Exception {
|
||||
final PlainActionFuture<Boolean> future = PlainActionFuture.newFuture();
|
||||
executor.execute(new ActionRunnable<Boolean>(future) {
|
||||
@Override
|
||||
protected void doRun() throws Exception {
|
||||
executor.execute(ActionRunnable.supply(future, () -> {
|
||||
final BlobStore blobStore = repo.blobStore();
|
||||
future.onResponse(
|
||||
blobStore.blobContainer(repo.basePath().add("indices")).children().containsKey("foo")
|
||||
return blobStore.blobContainer(repo.basePath().add("indices")).children().containsKey("foo")
|
||||
&& BlobStoreTestUtil.blobExists(blobStore.blobContainer(repo.basePath().add("indices").add("foo")), "bar")
|
||||
&& BlobStoreTestUtil.blobExists(blobStore.blobContainer(repo.basePath()), "meta-foo.dat")
|
||||
&& BlobStoreTestUtil.blobExists(blobStore.blobContainer(repo.basePath()), "snap-foo.dat")
|
||||
);
|
||||
}
|
||||
});
|
||||
&& BlobStoreTestUtil.blobExists(blobStore.blobContainer(repo.basePath()), "snap-foo.dat");
|
||||
}));
|
||||
return future.actionGet();
|
||||
}
|
||||
|
||||
|
@ -298,13 +278,8 @@ public abstract class AbstractThirdPartyRepositoryTestCase extends ESSingleNodeT
|
|||
private Set<String> listChildren(BlobPath path) {
|
||||
final PlainActionFuture<Set<String>> future = PlainActionFuture.newFuture();
|
||||
final BlobStoreRepository repository = getRepository();
|
||||
repository.threadPool().generic().execute(new ActionRunnable<Set<String>>(future) {
|
||||
@Override
|
||||
protected void doRun() throws Exception {
|
||||
final BlobStore blobStore = repository.blobStore();
|
||||
future.onResponse(blobStore.blobContainer(path).children().keySet());
|
||||
}
|
||||
});
|
||||
repository.threadPool().generic().execute(
|
||||
ActionRunnable.supply(future, () -> repository.blobStore().blobContainer(path).children().keySet()));
|
||||
return future.actionGet();
|
||||
}
|
||||
|
||||
|
|
|
@ -89,9 +89,7 @@ public final class BlobStoreTestUtil {
|
|||
*/
|
||||
public static void assertConsistency(BlobStoreRepository repository, Executor executor) {
|
||||
final PlainActionFuture<Void> listener = PlainActionFuture.newFuture();
|
||||
executor.execute(new ActionRunnable<Void>(listener) {
|
||||
@Override
|
||||
protected void doRun() throws Exception {
|
||||
executor.execute(ActionRunnable.run(listener, () -> {
|
||||
final BlobContainer blobContainer = repository.blobContainer();
|
||||
final long latestGen;
|
||||
try (DataInputStream inputStream = new DataInputStream(blobContainer.readBlob("index.latest"))) {
|
||||
|
@ -108,9 +106,7 @@ public final class BlobStoreTestUtil {
|
|||
}
|
||||
assertIndexUUIDs(blobContainer, repositoryData);
|
||||
assertSnapshotUUIDs(repository, repositoryData);
|
||||
listener.onResponse(null);
|
||||
}
|
||||
});
|
||||
}));
|
||||
listener.actionGet(TimeValue.timeValueMinutes(1L));
|
||||
}
|
||||
|
||||
|
@ -186,9 +182,7 @@ public final class BlobStoreTestUtil {
|
|||
throws InterruptedException, ExecutionException {
|
||||
final PlainActionFuture<Void> future = PlainActionFuture.newFuture();
|
||||
final AtomicLong totalSize = new AtomicLong();
|
||||
repository.threadPool().generic().execute(new ActionRunnable<Void>(future) {
|
||||
@Override
|
||||
protected void doRun() throws Exception {
|
||||
repository.threadPool().generic().execute(ActionRunnable.run(future, () -> {
|
||||
final BlobStore blobStore = repository.blobStore();
|
||||
BlobContainer container =
|
||||
blobStore.blobContainer(repository.basePath().add("indices").add(name));
|
||||
|
@ -197,49 +191,37 @@ public final class BlobStoreTestUtil {
|
|||
totalSize.addAndGet(size);
|
||||
container.writeBlob(file, new ByteArrayInputStream(new byte[size]), size, false);
|
||||
}
|
||||
future.onResponse(null);
|
||||
}
|
||||
});
|
||||
}));
|
||||
future.get();
|
||||
return totalSize.get();
|
||||
}
|
||||
|
||||
public static void assertCorruptionVisible(BlobStoreRepository repository, Map<String, Set<String>> indexToFiles) {
|
||||
final PlainActionFuture<Boolean> future = PlainActionFuture.newFuture();
|
||||
repository.threadPool().generic().execute(new ActionRunnable<Boolean>(future) {
|
||||
@Override
|
||||
protected void doRun() throws Exception {
|
||||
repository.threadPool().generic().execute(ActionRunnable.supply(future, () -> {
|
||||
final BlobStore blobStore = repository.blobStore();
|
||||
for (String index : indexToFiles.keySet()) {
|
||||
if (blobStore.blobContainer(repository.basePath().add("indices"))
|
||||
.children().containsKey(index) == false) {
|
||||
future.onResponse(false);
|
||||
return;
|
||||
return false;
|
||||
}
|
||||
for (String file : indexToFiles.get(index)) {
|
||||
try (InputStream ignored =
|
||||
blobStore.blobContainer(repository.basePath().add("indices").add(index)).readBlob(file)) {
|
||||
} catch (NoSuchFileException e) {
|
||||
future.onResponse(false);
|
||||
return;
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
future.onResponse(true);
|
||||
}
|
||||
});
|
||||
return true;
|
||||
}));
|
||||
assertTrue(future.actionGet());
|
||||
}
|
||||
|
||||
public static void assertBlobsByPrefix(BlobStoreRepository repository, BlobPath path, String prefix, Map<String, BlobMetaData> blobs) {
|
||||
final PlainActionFuture<Map<String, BlobMetaData>> future = PlainActionFuture.newFuture();
|
||||
repository.threadPool().generic().execute(new ActionRunnable<Map<String, BlobMetaData>>(future) {
|
||||
@Override
|
||||
protected void doRun() throws Exception {
|
||||
final BlobStore blobStore = repository.blobStore();
|
||||
future.onResponse(blobStore.blobContainer(path).listBlobsByPrefix(prefix));
|
||||
}
|
||||
});
|
||||
repository.threadPool().generic().execute(
|
||||
ActionRunnable.supply(future, () -> repository.blobStore().blobContainer(path).listBlobsByPrefix(prefix)));
|
||||
Map<String, BlobMetaData> foundBlobs = future.actionGet();
|
||||
if (blobs.isEmpty()) {
|
||||
assertThat(foundBlobs.keySet(), empty());
|
||||
|
|
|
@ -81,11 +81,13 @@ A successful call returns an object with "cluster" and "index" fields.
|
|||
"manage_security",
|
||||
"manage_slm",
|
||||
"manage_token",
|
||||
"manage_transform",
|
||||
"manage_watcher",
|
||||
"monitor",
|
||||
"monitor_data_frame_transforms",
|
||||
"monitor_ml",
|
||||
"monitor_rollup",
|
||||
"monitor_transform",
|
||||
"monitor_watcher",
|
||||
"none",
|
||||
"read_ccr",
|
||||
|
|
|
@ -153,8 +153,21 @@ action.
|
|||
+
|
||||
--
|
||||
NOTE: This privilege does not restrict the index operation to the creation
|
||||
of documents but instead restricts API use to the index API. The index API allows a user
|
||||
to overwrite a previously indexed document.
|
||||
of documents but instead restricts API use to the index API. The index API
|
||||
allows a user to overwrite a previously indexed document. See the `create_doc`
|
||||
privilege for an alternative.
|
||||
|
||||
--
|
||||
|
||||
`create_doc`::
|
||||
Privilege to index documents. Also grants access to the update mapping action.
|
||||
However, it does not enable a user to update existing documents.
|
||||
+
|
||||
--
|
||||
NOTE: When indexing documents with an external `_id` either via the index API or
|
||||
the bulk API, the request must use `op_type` as `create`. If `_id`s are
|
||||
generated automatically, the authorization happens as if the `op_type` is set to
|
||||
`create`.
|
||||
|
||||
--
|
||||
|
||||
|
|
|
@ -596,6 +596,9 @@ public class AutoFollowCoordinator extends AbstractLifecycleComponent implements
|
|||
List<String> followedIndexUUIDs) {
|
||||
List<Index> leaderIndicesToFollow = new ArrayList<>();
|
||||
for (IndexMetaData leaderIndexMetaData : remoteClusterState.getMetaData()) {
|
||||
if (leaderIndexMetaData.getState() != IndexMetaData.State.OPEN) {
|
||||
continue;
|
||||
}
|
||||
if (autoFollowPattern.match(leaderIndexMetaData.getIndex().getName())) {
|
||||
IndexRoutingTable indexRoutingTable = remoteClusterState.routingTable().index(leaderIndexMetaData.getIndex());
|
||||
if (indexRoutingTable != null &&
|
||||
|
|
|
@ -5,8 +5,10 @@
|
|||
*/
|
||||
package org.elasticsearch.xpack.ccr.action;
|
||||
|
||||
import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse;
|
||||
import org.elasticsearch.action.support.replication.ClusterStateCreationUtils;
|
||||
import org.elasticsearch.client.Client;
|
||||
import org.elasticsearch.cluster.ClusterName;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
|
@ -18,11 +20,13 @@ import org.elasticsearch.cluster.routing.ShardRouting;
|
|||
import org.elasticsearch.cluster.routing.ShardRoutingState;
|
||||
import org.elasticsearch.cluster.routing.TestShardRouting;
|
||||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.common.UUIDs;
|
||||
import org.elasticsearch.common.collect.Tuple;
|
||||
import org.elasticsearch.common.settings.ClusterSettings;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.ByteSizeValue;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
|
||||
import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException;
|
||||
import org.elasticsearch.index.Index;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
|
@ -44,10 +48,12 @@ import java.util.HashMap;
|
|||
import java.util.LinkedList;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
import java.util.concurrent.ExecutorService;
|
||||
import java.util.concurrent.Executors;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
import java.util.concurrent.atomic.AtomicReference;
|
||||
import java.util.function.BiConsumer;
|
||||
import java.util.function.Consumer;
|
||||
import java.util.function.Function;
|
||||
|
@ -57,6 +63,7 @@ import static org.elasticsearch.xpack.ccr.action.AutoFollowCoordinator.AutoFollo
|
|||
import static org.elasticsearch.xpack.ccr.action.AutoFollowCoordinator.AutoFollower.recordLeaderIndexAsFollowFunction;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.hamcrest.Matchers.greaterThan;
|
||||
import static org.hamcrest.Matchers.hasItem;
|
||||
import static org.hamcrest.Matchers.is;
|
||||
import static org.hamcrest.Matchers.notNullValue;
|
||||
import static org.hamcrest.Matchers.nullValue;
|
||||
|
@ -416,6 +423,26 @@ public class AutoFollowCoordinatorTests extends ESTestCase {
|
|||
assertThat(result.get(1).getName(), equalTo("index2"));
|
||||
}
|
||||
|
||||
public void testGetLeaderIndicesToFollowWithClosedIndices() {
|
||||
final AutoFollowPattern autoFollowPattern = new AutoFollowPattern("remote", Collections.singletonList("*"),
|
||||
null, null, null, null, null, null, null, null, null, null, null);
|
||||
|
||||
// index is opened
|
||||
ClusterState remoteState = ClusterStateCreationUtils.stateWithActivePrimary("test-index", true, randomIntBetween(1, 3), 0);
|
||||
List<Index> result = AutoFollower.getLeaderIndicesToFollow(autoFollowPattern, remoteState, Collections.emptyList());
|
||||
assertThat(result.size(), equalTo(1));
|
||||
assertThat(result, hasItem(remoteState.metaData().index("test-index").getIndex()));
|
||||
|
||||
// index is closed
|
||||
remoteState = ClusterState.builder(remoteState)
|
||||
.metaData(MetaData.builder(remoteState.metaData())
|
||||
.put(IndexMetaData.builder(remoteState.metaData().index("test-index")).state(IndexMetaData.State.CLOSE).build(), true)
|
||||
.build())
|
||||
.build();
|
||||
result = AutoFollower.getLeaderIndicesToFollow(autoFollowPattern, remoteState, Collections.emptyList());
|
||||
assertThat(result.size(), equalTo(0));
|
||||
}
|
||||
|
||||
public void testRecordLeaderIndexAsFollowFunction() {
|
||||
AutoFollowMetadata autoFollowMetadata = new AutoFollowMetadata(Collections.emptyMap(),
|
||||
Collections.singletonMap("pattern1", Collections.emptyList()), Collections.emptyMap());
|
||||
|
@ -763,7 +790,9 @@ public class AutoFollowCoordinatorTests extends ESTestCase {
|
|||
autoFollower.start();
|
||||
assertThat(allResults.size(), equalTo(states.length));
|
||||
for (int i = 0; i < states.length; i++) {
|
||||
assertThat(allResults.get(i).autoFollowExecutionResults.containsKey(new Index("logs-" + i, "_na_")), is(true));
|
||||
final String indexName = "logs-" + i;
|
||||
assertThat(allResults.get(i).autoFollowExecutionResults.keySet().stream()
|
||||
.anyMatch(index -> index.getName().equals(indexName)), is(true));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1049,6 +1078,87 @@ public class AutoFollowCoordinatorTests extends ESTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
public void testClosedIndicesAreNotAutoFollowed() {
|
||||
final Client client = mock(Client.class);
|
||||
when(client.getRemoteClusterClient(anyString())).thenReturn(client);
|
||||
|
||||
final String pattern = "pattern1";
|
||||
final ClusterState localState = ClusterState.builder(new ClusterName("local"))
|
||||
.metaData(MetaData.builder()
|
||||
.putCustom(AutoFollowMetadata.TYPE,
|
||||
new AutoFollowMetadata(Collections.singletonMap(pattern,
|
||||
new AutoFollowPattern("remote", Collections.singletonList("docs-*"), null, null, null, null, null, null, null, null,
|
||||
null, null, null)),
|
||||
Collections.singletonMap(pattern, Collections.emptyList()),
|
||||
Collections.singletonMap(pattern, Collections.emptyMap()))))
|
||||
.build();
|
||||
|
||||
ClusterState remoteState = null;
|
||||
final int nbLeaderIndices = randomIntBetween(1, 15);
|
||||
for (int i = 0; i < nbLeaderIndices; i++) {
|
||||
String indexName = "docs-" + i;
|
||||
if (remoteState == null) {
|
||||
remoteState = createRemoteClusterState(indexName, true);
|
||||
} else {
|
||||
remoteState = createRemoteClusterState(remoteState, indexName);
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
// randomly close the index
|
||||
remoteState = ClusterState.builder(remoteState.getClusterName())
|
||||
.routingTable(remoteState.routingTable())
|
||||
.metaData(MetaData.builder(remoteState.metaData())
|
||||
.put(IndexMetaData.builder(remoteState.metaData().index(indexName)).state(IndexMetaData.State.CLOSE).build(), true)
|
||||
.build())
|
||||
.build();
|
||||
}
|
||||
}
|
||||
|
||||
final ClusterState finalRemoteState = remoteState;
|
||||
final AtomicReference<ClusterState> lastModifiedClusterState = new AtomicReference<>(localState);
|
||||
final List<AutoFollowCoordinator.AutoFollowResult> results = new ArrayList<>();
|
||||
final Set<Object> followedIndices = ConcurrentCollections.newConcurrentSet();
|
||||
final AutoFollower autoFollower =
|
||||
new AutoFollower("remote", results::addAll, localClusterStateSupplier(localState), () -> 1L, Runnable::run) {
|
||||
@Override
|
||||
void getRemoteClusterState(String remoteCluster,
|
||||
long metadataVersion,
|
||||
BiConsumer<ClusterStateResponse, Exception> handler) {
|
||||
assertThat(remoteCluster, equalTo("remote"));
|
||||
handler.accept(new ClusterStateResponse(new ClusterName("remote"), finalRemoteState, false), null);
|
||||
}
|
||||
|
||||
@Override
|
||||
void createAndFollow(Map<String, String> headers,
|
||||
PutFollowAction.Request followRequest,
|
||||
Runnable successHandler,
|
||||
Consumer<Exception> failureHandler) {
|
||||
followedIndices.add(followRequest.getLeaderIndex());
|
||||
successHandler.run();
|
||||
}
|
||||
|
||||
@Override
|
||||
void updateAutoFollowMetadata(Function<ClusterState, ClusterState> updateFunction, Consumer<Exception> handler) {
|
||||
lastModifiedClusterState.updateAndGet(updateFunction::apply);
|
||||
handler.accept(null);
|
||||
}
|
||||
|
||||
@Override
|
||||
void cleanFollowedRemoteIndices(ClusterState remoteClusterState, List<String> patterns) {
|
||||
// Ignore, to avoid invoking updateAutoFollowMetadata(...) twice
|
||||
}
|
||||
};
|
||||
autoFollower.start();
|
||||
|
||||
assertThat(results, notNullValue());
|
||||
assertThat(results.size(), equalTo(1));
|
||||
|
||||
for (ObjectObjectCursor<String, IndexMetaData> index : remoteState.metaData().indices()) {
|
||||
boolean expect = index.value.getState() == IndexMetaData.State.OPEN;
|
||||
assertThat(results.get(0).autoFollowExecutionResults.containsKey(index.value.getIndex()), is(expect));
|
||||
assertThat(followedIndices.contains(index.key), is(expect));
|
||||
}
|
||||
}
|
||||
|
||||
private static ClusterState createRemoteClusterState(String indexName, Boolean enableSoftDeletes) {
|
||||
Settings.Builder indexSettings;
|
||||
if (enableSoftDeletes != null) {
|
||||
|
@ -1075,11 +1185,13 @@ public class AutoFollowCoordinatorTests extends ESTestCase {
|
|||
|
||||
private static ClusterState createRemoteClusterState(ClusterState previous, String indexName) {
|
||||
IndexMetaData indexMetaData = IndexMetaData.builder(indexName)
|
||||
.settings(settings(Version.CURRENT).put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true))
|
||||
.settings(settings(Version.CURRENT)
|
||||
.put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true)
|
||||
.put(IndexMetaData.SETTING_INDEX_UUID, UUIDs.randomBase64UUID(random())))
|
||||
.numberOfShards(1)
|
||||
.numberOfReplicas(0)
|
||||
.build();
|
||||
ClusterState.Builder csBuilder = ClusterState.builder(new ClusterName("remote"))
|
||||
ClusterState.Builder csBuilder = ClusterState.builder(previous.getClusterName())
|
||||
.metaData(MetaData.builder(previous.metaData())
|
||||
.version(previous.metaData().version() + 1)
|
||||
.put(indexMetaData, true));
|
||||
|
@ -1087,7 +1199,7 @@ public class AutoFollowCoordinatorTests extends ESTestCase {
|
|||
ShardRouting shardRouting =
|
||||
TestShardRouting.newShardRouting(indexName, 0, "1", true, ShardRoutingState.INITIALIZING).moveToStarted();
|
||||
IndexRoutingTable indexRoutingTable = IndexRoutingTable.builder(indexMetaData.getIndex()).addShard(shardRouting).build();
|
||||
csBuilder.routingTable(RoutingTable.builder().add(indexRoutingTable).build()).build();
|
||||
csBuilder.routingTable(RoutingTable.builder(previous.routingTable()).add(indexRoutingTable).build()).build();
|
||||
|
||||
return csBuilder.build();
|
||||
}
|
||||
|
|
|
@ -50,7 +50,7 @@ public class ClusterPrivilegeResolver {
|
|||
private static final Set<String> MANAGE_TOKEN_PATTERN = Collections.singleton("cluster:admin/xpack/security/token/*");
|
||||
private static final Set<String> MANAGE_API_KEY_PATTERN = Collections.singleton("cluster:admin/xpack/security/api_key/*");
|
||||
private static final Set<String> MONITOR_PATTERN = Collections.singleton("cluster:monitor/*");
|
||||
private static final Set<String> MONITOR_DATA_FRAME_PATTERN = Collections.unmodifiableSet(
|
||||
private static final Set<String> MONITOR_TRANSFORM_PATTERN = Collections.unmodifiableSet(
|
||||
Sets.newHashSet("cluster:monitor/data_frame/*", "cluster:monitor/transform/*"));
|
||||
private static final Set<String> MONITOR_ML_PATTERN = Collections.singleton("cluster:monitor/xpack/ml/*");
|
||||
private static final Set<String> MONITOR_WATCHER_PATTERN = Collections.singleton("cluster:monitor/xpack/watcher/*");
|
||||
|
@ -59,7 +59,7 @@ public class ClusterPrivilegeResolver {
|
|||
Sets.newHashSet("cluster:*", "indices:admin/template/*"));
|
||||
private static final Set<String> MANAGE_ML_PATTERN = Collections.unmodifiableSet(
|
||||
Sets.newHashSet("cluster:admin/xpack/ml/*", "cluster:monitor/xpack/ml/*"));
|
||||
private static final Set<String> MANAGE_DATA_FRAME_PATTERN = Collections.unmodifiableSet(
|
||||
private static final Set<String> MANAGE_TRANSFORM_PATTERN = Collections.unmodifiableSet(
|
||||
Sets.newHashSet("cluster:admin/data_frame/*", "cluster:monitor/data_frame/*",
|
||||
"cluster:monitor/transform/*", "cluster:admin/transform/*"));
|
||||
private static final Set<String> MANAGE_WATCHER_PATTERN = Collections.unmodifiableSet(
|
||||
|
@ -90,14 +90,18 @@ public class ClusterPrivilegeResolver {
|
|||
public static final NamedClusterPrivilege ALL = new ActionClusterPrivilege("all", ALL_CLUSTER_PATTERN);
|
||||
public static final NamedClusterPrivilege MONITOR = new ActionClusterPrivilege("monitor", MONITOR_PATTERN);
|
||||
public static final NamedClusterPrivilege MONITOR_ML = new ActionClusterPrivilege("monitor_ml", MONITOR_ML_PATTERN);
|
||||
public static final NamedClusterPrivilege MONITOR_DATA_FRAME =
|
||||
new ActionClusterPrivilege("monitor_data_frame_transforms", MONITOR_DATA_FRAME_PATTERN);
|
||||
public static final NamedClusterPrivilege MONITOR_TRANSFORM_DEPRECATED =
|
||||
new ActionClusterPrivilege("monitor_data_frame_transforms", MONITOR_TRANSFORM_PATTERN);
|
||||
public static final NamedClusterPrivilege MONITOR_TRANSFORM =
|
||||
new ActionClusterPrivilege("monitor_transform", MONITOR_TRANSFORM_PATTERN);
|
||||
public static final NamedClusterPrivilege MONITOR_WATCHER = new ActionClusterPrivilege("monitor_watcher", MONITOR_WATCHER_PATTERN);
|
||||
public static final NamedClusterPrivilege MONITOR_ROLLUP = new ActionClusterPrivilege("monitor_rollup", MONITOR_ROLLUP_PATTERN);
|
||||
public static final NamedClusterPrivilege MANAGE = new ActionClusterPrivilege("manage", ALL_CLUSTER_PATTERN, ALL_SECURITY_PATTERN);
|
||||
public static final NamedClusterPrivilege MANAGE_ML = new ActionClusterPrivilege("manage_ml", MANAGE_ML_PATTERN);
|
||||
public static final NamedClusterPrivilege MANAGE_DATA_FRAME =
|
||||
new ActionClusterPrivilege("manage_data_frame_transforms", MANAGE_DATA_FRAME_PATTERN);
|
||||
public static final NamedClusterPrivilege MANAGE_TRANSFORM_DEPRECATED =
|
||||
new ActionClusterPrivilege("manage_data_frame_transforms", MANAGE_TRANSFORM_PATTERN);
|
||||
public static final NamedClusterPrivilege MANAGE_TRANSFORM =
|
||||
new ActionClusterPrivilege("manage_transform", MANAGE_TRANSFORM_PATTERN);
|
||||
public static final NamedClusterPrivilege MANAGE_TOKEN = new ActionClusterPrivilege("manage_token", MANAGE_TOKEN_PATTERN);
|
||||
public static final NamedClusterPrivilege MANAGE_WATCHER = new ActionClusterPrivilege("manage_watcher", MANAGE_WATCHER_PATTERN);
|
||||
public static final NamedClusterPrivilege MANAGE_ROLLUP = new ActionClusterPrivilege("manage_rollup", MANAGE_ROLLUP_PATTERN);
|
||||
|
@ -133,12 +137,14 @@ public class ClusterPrivilegeResolver {
|
|||
ALL,
|
||||
MONITOR,
|
||||
MONITOR_ML,
|
||||
MONITOR_DATA_FRAME,
|
||||
MONITOR_TRANSFORM_DEPRECATED,
|
||||
MONITOR_TRANSFORM,
|
||||
MONITOR_WATCHER,
|
||||
MONITOR_ROLLUP,
|
||||
MANAGE,
|
||||
MANAGE_ML,
|
||||
MANAGE_DATA_FRAME,
|
||||
MANAGE_TRANSFORM_DEPRECATED,
|
||||
MANAGE_TRANSFORM,
|
||||
MANAGE_TOKEN,
|
||||
MANAGE_WATCHER,
|
||||
MANAGE_IDX_TEMPLATES,
|
||||
|
|
|
@ -17,6 +17,7 @@ import org.elasticsearch.xpack.core.security.authz.privilege.ConfigurableCluster
|
|||
import org.elasticsearch.xpack.core.security.support.MetadataUtils;
|
||||
import org.elasticsearch.xpack.core.security.user.KibanaUser;
|
||||
import org.elasticsearch.xpack.core.security.user.UsernamesField;
|
||||
import org.elasticsearch.xpack.core.transform.transforms.persistence.TransformInternalIndexConstants;
|
||||
import org.elasticsearch.xpack.core.watcher.execution.TriggeredWatchStoreField;
|
||||
import org.elasticsearch.xpack.core.watcher.history.HistoryStoreField;
|
||||
import org.elasticsearch.xpack.core.watcher.watch.Watch;
|
||||
|
@ -179,28 +180,52 @@ public class ReservedRolesStore implements BiConsumer<Set<String>, ActionListene
|
|||
.application("kibana-*").resources("*").privileges("reserved_ml").build()
|
||||
},
|
||||
null, null, MetadataUtils.DEFAULT_RESERVED_METADATA, null))
|
||||
// DEPRECATED: to be removed in 9.0.0
|
||||
.put("data_frame_transforms_admin", new RoleDescriptor("data_frame_transforms_admin",
|
||||
new String[] { "manage_data_frame_transforms" },
|
||||
new RoleDescriptor.IndicesPrivileges[]{
|
||||
RoleDescriptor.IndicesPrivileges.builder()
|
||||
.indices(".data-frame-notifications*")
|
||||
.indices(TransformInternalIndexConstants.AUDIT_INDEX_PATTERN,
|
||||
TransformInternalIndexConstants.AUDIT_INDEX_PATTERN_DEPRECATED,
|
||||
TransformInternalIndexConstants.AUDIT_INDEX_READ_ALIAS)
|
||||
.privileges("view_index_metadata", "read").build()
|
||||
},
|
||||
new RoleDescriptor.ApplicationResourcePrivileges[] {
|
||||
RoleDescriptor.ApplicationResourcePrivileges.builder()
|
||||
.application("kibana-*").resources("*").privileges("reserved_ml").build()
|
||||
}, null, null, MetadataUtils.DEFAULT_RESERVED_METADATA, null))
|
||||
// DEPRECATED: to be removed in 9.0.0
|
||||
.put("data_frame_transforms_user", new RoleDescriptor("data_frame_transforms_user",
|
||||
new String[] { "monitor_data_frame_transforms" },
|
||||
new RoleDescriptor.IndicesPrivileges[]{
|
||||
RoleDescriptor.IndicesPrivileges.builder()
|
||||
.indices(".data-frame-notifications*")
|
||||
.indices(TransformInternalIndexConstants.AUDIT_INDEX_PATTERN,
|
||||
TransformInternalIndexConstants.AUDIT_INDEX_PATTERN_DEPRECATED,
|
||||
TransformInternalIndexConstants.AUDIT_INDEX_READ_ALIAS)
|
||||
.privileges("view_index_metadata", "read").build()
|
||||
},
|
||||
new RoleDescriptor.ApplicationResourcePrivileges[] {
|
||||
RoleDescriptor.ApplicationResourcePrivileges.builder()
|
||||
.application("kibana-*").resources("*").privileges("reserved_ml").build()
|
||||
}, null, null, MetadataUtils.DEFAULT_RESERVED_METADATA, null))
|
||||
.put("transform_admin", new RoleDescriptor("transform_admin",
|
||||
new String[] { "manage_transform" },
|
||||
new RoleDescriptor.IndicesPrivileges[]{
|
||||
RoleDescriptor.IndicesPrivileges.builder()
|
||||
.indices(TransformInternalIndexConstants.AUDIT_INDEX_PATTERN,
|
||||
TransformInternalIndexConstants.AUDIT_INDEX_PATTERN_DEPRECATED,
|
||||
TransformInternalIndexConstants.AUDIT_INDEX_READ_ALIAS)
|
||||
.privileges("view_index_metadata", "read").build()
|
||||
}, null, null, null, MetadataUtils.DEFAULT_RESERVED_METADATA, null))
|
||||
.put("transform_user", new RoleDescriptor("transform_user",
|
||||
new String[] { "monitor_transform" },
|
||||
new RoleDescriptor.IndicesPrivileges[]{
|
||||
RoleDescriptor.IndicesPrivileges.builder()
|
||||
.indices(TransformInternalIndexConstants.AUDIT_INDEX_PATTERN,
|
||||
TransformInternalIndexConstants.AUDIT_INDEX_PATTERN_DEPRECATED,
|
||||
TransformInternalIndexConstants.AUDIT_INDEX_READ_ALIAS)
|
||||
.privileges("view_index_metadata", "read").build()
|
||||
}, null, null, null, MetadataUtils.DEFAULT_RESERVED_METADATA, null))
|
||||
.put("watcher_admin", new RoleDescriptor("watcher_admin", new String[] { "manage_watcher" },
|
||||
new RoleDescriptor.IndicesPrivileges[] {
|
||||
RoleDescriptor.IndicesPrivileges.builder().indices(Watch.INDEX, TriggeredWatchStoreField.INDEX_NAME,
|
||||
|
|
|
@ -30,7 +30,11 @@ public final class TransformInternalIndexConstants {
|
|||
|
||||
// audit index
|
||||
public static final String AUDIT_TEMPLATE_VERSION = "1";
|
||||
public static final String AUDIT_INDEX_PREFIX = ".data-frame-notifications-";
|
||||
public static final String AUDIT_INDEX_PREFIX = ".transform-notifications-";
|
||||
public static final String AUDIT_INDEX_PATTERN = AUDIT_INDEX_PREFIX + "*";
|
||||
public static final String AUDIT_INDEX_PATTERN_DEPRECATED = ".data-frame-notifications-*";
|
||||
|
||||
public static final String AUDIT_INDEX_READ_ALIAS = ".transform-notifications-read";
|
||||
public static final String AUDIT_INDEX = AUDIT_INDEX_PREFIX + AUDIT_TEMPLATE_VERSION;
|
||||
|
||||
private TransformInternalIndexConstants() {
|
||||
|
|
|
@ -186,6 +186,8 @@ public class ReservedRolesStoreTests extends ESTestCase {
|
|||
assertThat(ReservedRolesStore.isReserved("machine_learning_admin"), is(true));
|
||||
assertThat(ReservedRolesStore.isReserved("data_frame_transforms_user"), is(true));
|
||||
assertThat(ReservedRolesStore.isReserved("data_frame_transforms_admin"), is(true));
|
||||
assertThat(ReservedRolesStore.isReserved("transform_user"), is(true));
|
||||
assertThat(ReservedRolesStore.isReserved("transform_admin"), is(true));
|
||||
assertThat(ReservedRolesStore.isReserved("watcher_user"), is(true));
|
||||
assertThat(ReservedRolesStore.isReserved("watcher_admin"), is(true));
|
||||
assertThat(ReservedRolesStore.isReserved("kibana_dashboard_only_user"), is(true));
|
||||
|
@ -1121,11 +1123,16 @@ public class ReservedRolesStoreTests extends ESTestCase {
|
|||
new ApplicationPrivilege(otherApplication, "app-reserved_ml", "reserved_ml"), "*"), is(false));
|
||||
}
|
||||
|
||||
public void testDataFrameTransformsAdminRole() {
|
||||
public void testTransformAdminRole() {
|
||||
final TransportRequest request = mock(TransportRequest.class);
|
||||
final Authentication authentication = mock(Authentication.class);
|
||||
|
||||
RoleDescriptor roleDescriptor = new ReservedRolesStore().roleDescriptor("data_frame_transforms_admin");
|
||||
RoleDescriptor[] roleDescriptors = {
|
||||
new ReservedRolesStore().roleDescriptor("data_frame_transforms_admin"),
|
||||
new ReservedRolesStore().roleDescriptor("transform_admin")
|
||||
};
|
||||
|
||||
for (RoleDescriptor roleDescriptor : roleDescriptors) {
|
||||
assertNotNull(roleDescriptor);
|
||||
assertThat(roleDescriptor.getMetadata(), hasEntry("_reserved", true));
|
||||
|
||||
|
@ -1141,7 +1148,9 @@ public class ReservedRolesStoreTests extends ESTestCase {
|
|||
|
||||
assertThat(role.runAs().check(randomAlphaOfLengthBetween(1, 30)), is(false));
|
||||
|
||||
assertOnlyReadAllowed(role, TransformInternalIndexConstants.AUDIT_INDEX);
|
||||
assertOnlyReadAllowed(role, TransformInternalIndexConstants.AUDIT_INDEX_READ_ALIAS);
|
||||
assertOnlyReadAllowed(role, TransformInternalIndexConstants.AUDIT_INDEX_PATTERN);
|
||||
assertOnlyReadAllowed(role, TransformInternalIndexConstants.AUDIT_INDEX_PATTERN_DEPRECATED);
|
||||
assertNoAccessAllowed(role, "foo");
|
||||
assertNoAccessAllowed(role, TransformInternalIndexConstants.LATEST_INDEX_NAME); // internal use only
|
||||
|
||||
|
@ -1150,21 +1159,32 @@ public class ReservedRolesStoreTests extends ESTestCase {
|
|||
final String kibanaApplicationWithRandomIndex = "kibana-" + randomFrom(randomAlphaOfLengthBetween(8, 24), ".kibana");
|
||||
assertThat(role.application().grants(
|
||||
new ApplicationPrivilege(kibanaApplicationWithRandomIndex, "app-foo", "foo"), "*"), is(false));
|
||||
assertThat(role.application().grants(
|
||||
new ApplicationPrivilege(kibanaApplicationWithRandomIndex, "app-reserved_ml", "reserved_ml"), "*"), is(true));
|
||||
|
||||
if (roleDescriptor.getName().equals("data_frame_transforms_admin")) {
|
||||
assertThat(role.application()
|
||||
.grants(new ApplicationPrivilege(kibanaApplicationWithRandomIndex, "app-reserved_ml", "reserved_ml"), "*"), is(true));
|
||||
}
|
||||
|
||||
final String otherApplication = "logstash-" + randomAlphaOfLengthBetween(8, 24);
|
||||
assertThat(role.application().grants(
|
||||
new ApplicationPrivilege(otherApplication, "app-foo", "foo"), "*"), is(false));
|
||||
if (roleDescriptor.getName().equals("data_frame_transforms_admin")) {
|
||||
assertThat(role.application().grants(
|
||||
new ApplicationPrivilege(otherApplication, "app-reserved_ml", "reserved_ml"), "*"), is(false));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public void testDataFrameTransformsUserRole() {
|
||||
final TransportRequest request = mock(TransportRequest.class);
|
||||
final Authentication authentication = mock(Authentication.class);
|
||||
|
||||
RoleDescriptor roleDescriptor = new ReservedRolesStore().roleDescriptor("data_frame_transforms_user");
|
||||
RoleDescriptor[] roleDescriptors = {
|
||||
new ReservedRolesStore().roleDescriptor("data_frame_transforms_user"),
|
||||
new ReservedRolesStore().roleDescriptor("transform_user")
|
||||
};
|
||||
|
||||
for (RoleDescriptor roleDescriptor : roleDescriptors) {
|
||||
assertNotNull(roleDescriptor);
|
||||
assertThat(roleDescriptor.getMetadata(), hasEntry("_reserved", true));
|
||||
|
||||
|
@ -1180,7 +1200,9 @@ public class ReservedRolesStoreTests extends ESTestCase {
|
|||
|
||||
assertThat(role.runAs().check(randomAlphaOfLengthBetween(1, 30)), is(false));
|
||||
|
||||
assertOnlyReadAllowed(role, TransformInternalIndexConstants.AUDIT_INDEX);
|
||||
assertOnlyReadAllowed(role, TransformInternalIndexConstants.AUDIT_INDEX_READ_ALIAS);
|
||||
assertOnlyReadAllowed(role, TransformInternalIndexConstants.AUDIT_INDEX_PATTERN);
|
||||
assertOnlyReadAllowed(role, TransformInternalIndexConstants.AUDIT_INDEX_PATTERN_DEPRECATED);
|
||||
assertNoAccessAllowed(role, "foo");
|
||||
assertNoAccessAllowed(role, TransformInternalIndexConstants.LATEST_INDEX_NAME);
|
||||
|
||||
|
@ -1189,15 +1211,21 @@ public class ReservedRolesStoreTests extends ESTestCase {
|
|||
final String kibanaApplicationWithRandomIndex = "kibana-" + randomFrom(randomAlphaOfLengthBetween(8, 24), ".kibana");
|
||||
assertThat(role.application().grants(
|
||||
new ApplicationPrivilege(kibanaApplicationWithRandomIndex, "app-foo", "foo"), "*"), is(false));
|
||||
|
||||
if (roleDescriptor.getName().equals("data_frame_transforms_user")) {
|
||||
assertThat(role.application().grants(
|
||||
new ApplicationPrivilege(kibanaApplicationWithRandomIndex, "app-reserved_ml", "reserved_ml"), "*"), is(true));
|
||||
}
|
||||
|
||||
final String otherApplication = "logstash-" + randomAlphaOfLengthBetween(8, 24);
|
||||
assertThat(role.application().grants(
|
||||
new ApplicationPrivilege(otherApplication, "app-foo", "foo"), "*"), is(false));
|
||||
if (roleDescriptor.getName().equals("data_frame_transforms_user")) {
|
||||
assertThat(role.application().grants(
|
||||
new ApplicationPrivilege(otherApplication, "app-reserved_ml", "reserved_ml"), "*"), is(false));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public void testWatcherAdminRole() {
|
||||
final TransportRequest request = mock(TransportRequest.class);
|
||||
|
|
|
@ -162,6 +162,9 @@ teardown:
|
|||
|
||||
---
|
||||
"Test All Indexes Lifecycle Explain":
|
||||
- skip:
|
||||
reason: https://github.com/elastic/elasticsearch/issues/47275
|
||||
version: "6.7.0 - "
|
||||
|
||||
- do:
|
||||
ilm.explain_lifecycle:
|
||||
|
|
|
@ -117,7 +117,7 @@ public class SLMSnapshotBlockingIntegTests extends ESIntegTestCase {
|
|||
return settings.build();
|
||||
}
|
||||
|
||||
|
||||
@AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/47689")
|
||||
public void testSnapshotInProgress() throws Exception {
|
||||
final String indexName = "test";
|
||||
final String policyName = "test-policy";
|
||||
|
@ -167,6 +167,7 @@ public class SLMSnapshotBlockingIntegTests extends ESIntegTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
@AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/47834")
|
||||
public void testRetentionWhileSnapshotInProgress() throws Exception {
|
||||
final String indexName = "test";
|
||||
final String policyId = "slm-policy";
|
||||
|
|
|
@ -49,11 +49,19 @@ public class ClusterAlertsUtil {
|
|||
private static final Pattern UNIQUE_WATCH_ID_PROPERTY =
|
||||
Pattern.compile(Pattern.quote("${monitoring.watch.unique_id}"));
|
||||
|
||||
/**
|
||||
* Replace the <code>${monitoring.watch.unique_id}</code> field in the watches.
|
||||
*
|
||||
* @see #createUniqueWatchId(ClusterService, String)
|
||||
*/
|
||||
private static final Pattern VERSION_CREATED_PROPERTY =
|
||||
Pattern.compile(Pattern.quote("${monitoring.version_created}"));
|
||||
|
||||
/**
|
||||
* The last time that all watches were updated. For now, all watches have been updated in the same version and should all be replaced
|
||||
* together.
|
||||
*/
|
||||
public static final int LAST_UPDATED_VERSION = Version.V_7_0_0.id;
|
||||
public static final int LAST_UPDATED_VERSION = Version.V_7_5_0.id;
|
||||
|
||||
/**
|
||||
* An unsorted list of Watch IDs representing resource files for Monitoring Cluster Alerts.
|
||||
|
@ -113,6 +121,7 @@ public class ClusterAlertsUtil {
|
|||
source = CLUSTER_UUID_PROPERTY.matcher(source).replaceAll(clusterUuid);
|
||||
source = WATCH_ID_PROPERTY.matcher(source).replaceAll(watchId);
|
||||
source = UNIQUE_WATCH_ID_PROPERTY.matcher(source).replaceAll(uniqueWatchId);
|
||||
source = VERSION_CREATED_PROPERTY.matcher(source).replaceAll(Integer.toString(LAST_UPDATED_VERSION));
|
||||
|
||||
return source;
|
||||
} catch (final IOException e) {
|
||||
|
|
|
@ -7,7 +7,7 @@
|
|||
"link": "elasticsearch/indices",
|
||||
"severity": 2100,
|
||||
"type": "monitoring",
|
||||
"version_created": 7000099,
|
||||
"version_created": "${monitoring.version_created}",
|
||||
"watch": "${monitoring.watch.id}"
|
||||
}
|
||||
},
|
||||
|
@ -134,11 +134,23 @@
|
|||
},
|
||||
"transform": {
|
||||
"script": {
|
||||
"source": "ctx.vars.email_recipient = (ctx.payload.kibana_settings.hits.total > 0 && ctx.payload.kibana_settings.hits.hits[0]._source.kibana_settings.xpack != null) ? ctx.payload.kibana_settings.hits.hits[0]._source.kibana_settings.xpack.default_admin_email : null;ctx.vars.is_new = ctx.vars.fails_check && !ctx.vars.not_resolved;ctx.vars.is_resolved = !ctx.vars.fails_check && ctx.vars.not_resolved;def state = ctx.payload.check.hits.hits[0]._source.cluster_state.status;if (ctx.vars.not_resolved){ctx.payload = ctx.payload.alert.hits.hits[0]._source;if (ctx.vars.fails_check == false) {ctx.payload.resolved_timestamp = ctx.execution_time;}} else {ctx.payload = ['timestamp': ctx.execution_time, 'metadata': ctx.metadata.xpack];}if (ctx.vars.fails_check) {ctx.payload.prefix = 'Elasticsearch cluster status is ' + state + '.';if (state == 'red') {ctx.payload.message = 'Allocate missing primary shards and replica shards.';ctx.payload.metadata.severity = 2100;} else {ctx.payload.message = 'Allocate missing replica shards.';ctx.payload.metadata.severity = 1100;}}ctx.vars.state = state.toUpperCase();ctx.payload.update_timestamp = ctx.execution_time;return ctx.payload;"
|
||||
"source": "ctx.vars.email_recipient = (ctx.payload.kibana_settings.hits.total > 0 && ctx.payload.kibana_settings.hits.hits[0]._source.kibana_settings.xpack != null) ? ctx.payload.kibana_settings.hits.hits[0]._source.kibana_settings.xpack.default_admin_email : null;ctx.vars.is_new = ctx.vars.fails_check && !ctx.vars.not_resolved;ctx.vars.is_resolved = !ctx.vars.fails_check && ctx.vars.not_resolved;ctx.vars.found_state = ctx.payload.check.hits.total != 0;def state = ctx.vars.found_state ? ctx.payload.check.hits.hits[0]._source.cluster_state.status : 'unknown';if (ctx.vars.not_resolved){ctx.payload = ctx.payload.alert.hits.hits[0]._source;if (ctx.vars.fails_check == false) {ctx.payload.resolved_timestamp = ctx.execution_time;}} else {ctx.payload = ['timestamp': ctx.execution_time, 'metadata': ctx.metadata.xpack];}if (ctx.vars.fails_check) {ctx.payload.prefix = 'Elasticsearch cluster status is ' + state + '.';if (state == 'red') {ctx.payload.message = 'Allocate missing primary shards and replica shards.';ctx.payload.metadata.severity = 2100;} else {ctx.payload.message = 'Allocate missing replica shards.';ctx.payload.metadata.severity = 1100;}}ctx.vars.state = state.toUpperCase();ctx.payload.update_timestamp = ctx.execution_time;return ctx.payload;"
|
||||
}
|
||||
},
|
||||
"actions": {
|
||||
"log_state_not_found": {
|
||||
"condition": {
|
||||
"script": "!ctx.vars.found_state"
|
||||
},
|
||||
"logging" : {
|
||||
"text" : "Watch [{{ctx.metadata.xpack.watch}}] could not determine cluster state for cluster [{{ctx.metadata.xpack.cluster_uuid}}]. This likely means the cluster has not sent any monitoring data recently.",
|
||||
"level" : "debug"
|
||||
}
|
||||
},
|
||||
"add_to_alerts_index": {
|
||||
"condition": {
|
||||
"script": "ctx.vars.found_state"
|
||||
},
|
||||
"index": {
|
||||
"index": ".monitoring-alerts-7",
|
||||
"doc_id": "${monitoring.watch.unique_id}"
|
||||
|
@ -146,7 +158,7 @@
|
|||
},
|
||||
"send_email_to_admin": {
|
||||
"condition": {
|
||||
"script": "return ctx.vars.email_recipient != null && (ctx.vars.is_new || ctx.vars.is_resolved)"
|
||||
"script": "return ctx.vars.email_recipient != null && ctx.vars.found_state && (ctx.vars.is_new || ctx.vars.is_resolved)"
|
||||
},
|
||||
"email": {
|
||||
"to": "X-Pack Admin <{{ctx.vars.email_recipient}}>",
|
||||
|
|
|
@ -7,7 +7,7 @@
|
|||
"link": "elasticsearch/nodes",
|
||||
"severity": 1999,
|
||||
"type": "monitoring",
|
||||
"version_created": 7000099,
|
||||
"version_created": "${monitoring.version_created}",
|
||||
"watch": "${monitoring.watch.id}"
|
||||
}
|
||||
},
|
||||
|
|
|
@ -7,7 +7,7 @@
|
|||
"link": "elasticsearch/nodes",
|
||||
"severity": 1000,
|
||||
"type": "monitoring",
|
||||
"version_created": 7000099,
|
||||
"version_created": "${monitoring.version_created}",
|
||||
"watch": "${monitoring.watch.id}"
|
||||
}
|
||||
},
|
||||
|
|
|
@ -7,7 +7,7 @@
|
|||
"link": "kibana/instances",
|
||||
"severity": 1000,
|
||||
"type": "monitoring",
|
||||
"version_created": 7000099,
|
||||
"version_created": "${monitoring.version_created}",
|
||||
"watch": "${monitoring.watch.id}"
|
||||
}
|
||||
},
|
||||
|
|
|
@ -7,7 +7,7 @@
|
|||
"link": "logstash/instances",
|
||||
"severity": 1000,
|
||||
"type": "monitoring",
|
||||
"version_created": 7000099,
|
||||
"version_created": "${monitoring.version_created}",
|
||||
"watch": "${monitoring.watch.id}"
|
||||
}
|
||||
},
|
||||
|
|
|
@ -8,7 +8,7 @@
|
|||
"alert_index": ".monitoring-alerts-7",
|
||||
"cluster_uuid": "${monitoring.watch.cluster_uuid}",
|
||||
"type": "monitoring",
|
||||
"version_created": 7000099,
|
||||
"version_created": "${monitoring.version_created}",
|
||||
"watch": "${monitoring.watch.id}"
|
||||
}
|
||||
},
|
||||
|
|
|
@ -68,6 +68,7 @@ public class ClusterAlertsUtilTests extends ESTestCase {
|
|||
assertThat(watch, notNullValue());
|
||||
assertThat(watch, containsString(clusterUuid));
|
||||
assertThat(watch, containsString(watchId));
|
||||
assertThat(watch, containsString(String.valueOf(ClusterAlertsUtil.LAST_UPDATED_VERSION)));
|
||||
|
||||
if ("elasticsearch_nodes".equals(watchId) == false) {
|
||||
assertThat(watch, containsString(clusterUuid + "_" + watchId));
|
||||
|
|
|
@ -452,7 +452,6 @@ public class RollupIndexerIndexingTests extends AggregatorTestCase {
|
|||
});
|
||||
}
|
||||
|
||||
@AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/34762")
|
||||
public void testRandomizedDateHisto() throws Exception {
|
||||
String rollupIndex = randomAlphaOfLengthBetween(5, 10);
|
||||
|
||||
|
@ -468,7 +467,9 @@ public class RollupIndexerIndexingTests extends AggregatorTestCase {
|
|||
final List<Map<String, Object>> dataset = new ArrayList<>();
|
||||
int numDocs = randomIntBetween(1,100);
|
||||
for (int i = 0; i < numDocs; i++) {
|
||||
long timestamp = new DateTime().minusHours(randomIntBetween(1,100)).getMillis();
|
||||
// Make sure the timestamp is sufficiently in the past that we don't get bitten
|
||||
// by internal rounding, causing no docs to match
|
||||
long timestamp = new DateTime().minusDays(2).minusHours(randomIntBetween(11,100)).getMillis();
|
||||
dataset.add(asMap(timestampField, timestamp, valueField, randomLongBetween(1, 100)));
|
||||
}
|
||||
executeTestCase(dataset, job, System.currentTimeMillis(), (resp) -> {
|
||||
|
|
|
@ -83,13 +83,8 @@ class LdapUserSearchSessionFactory extends PoolingSessionFactory {
|
|||
final String dn = entry.getDN();
|
||||
final byte[] passwordBytes = CharArrays.toUtf8Bytes(password.getChars());
|
||||
final SimpleBindRequest bind = new SimpleBindRequest(dn, passwordBytes);
|
||||
LdapUtils.maybeForkThenBindAndRevert(connectionPool, bind, threadPool, new ActionRunnable<LdapSession>(listener) {
|
||||
@Override
|
||||
protected void doRun() throws Exception {
|
||||
listener.onResponse(new LdapSession(logger, config, connectionPool, dn, groupResolver, metaDataResolver, timeout,
|
||||
entry.getAttributes()));
|
||||
}
|
||||
});
|
||||
LdapUtils.maybeForkThenBindAndRevert(connectionPool, bind, threadPool, ActionRunnable.supply(listener, () ->
|
||||
new LdapSession(logger, config, connectionPool, dn, groupResolver, metaDataResolver, timeout, entry.getAttributes())));
|
||||
}
|
||||
}, listener::onFailure));
|
||||
}
|
||||
|
|
|
@ -274,23 +274,22 @@ schema::h:ts|c:l
|
|||
SELECT HISTOGRAM(birth_date, INTERVAL 1 YEAR) AS h, COUNT(*) as c FROM test_emp GROUP BY h;
|
||||
|
||||
h | c
|
||||
--------------------+---------------
|
||||
------------------------+---------------
|
||||
null |10
|
||||
1951-04-11T00:00:00Z|1
|
||||
1952-04-05T00:00:00Z|10
|
||||
1953-03-31T00:00:00Z|10
|
||||
1954-03-26T00:00:00Z|7
|
||||
1955-03-21T00:00:00Z|4
|
||||
1956-03-15T00:00:00Z|4
|
||||
1957-03-10T00:00:00Z|6
|
||||
1958-03-05T00:00:00Z|6
|
||||
1959-02-28T00:00:00Z|9
|
||||
1960-02-23T00:00:00Z|7
|
||||
1961-02-17T00:00:00Z|8
|
||||
1962-02-12T00:00:00Z|6
|
||||
1963-02-07T00:00:00Z|7
|
||||
1964-02-02T00:00:00Z|5
|
||||
|
||||
1952-01-01T00:00:00.000Z|8
|
||||
1953-01-01T00:00:00.000Z|11
|
||||
1954-01-01T00:00:00.000Z|8
|
||||
1955-01-01T00:00:00.000Z|4
|
||||
1956-01-01T00:00:00.000Z|5
|
||||
1957-01-01T00:00:00.000Z|4
|
||||
1958-01-01T00:00:00.000Z|7
|
||||
1959-01-01T00:00:00.000Z|9
|
||||
1960-01-01T00:00:00.000Z|8
|
||||
1961-01-01T00:00:00.000Z|8
|
||||
1962-01-01T00:00:00.000Z|6
|
||||
1963-01-01T00:00:00.000Z|7
|
||||
1964-01-01T00:00:00.000Z|4
|
||||
1965-01-01T00:00:00.000Z|1
|
||||
;
|
||||
|
||||
histogramDateTimeWithCountAndOrder
|
||||
|
@ -298,21 +297,21 @@ schema::h:ts|c:l
|
|||
SELECT HISTOGRAM(birth_date, INTERVAL 1 YEAR) AS h, COUNT(*) as c FROM test_emp GROUP BY h ORDER BY h DESC;
|
||||
|
||||
h | c
|
||||
--------------------+---------------
|
||||
1964-02-02T00:00:00Z|5
|
||||
1963-02-07T00:00:00Z|7
|
||||
1962-02-12T00:00:00Z|6
|
||||
1961-02-17T00:00:00Z|8
|
||||
1960-02-23T00:00:00Z|7
|
||||
1959-02-28T00:00:00Z|9
|
||||
1958-03-05T00:00:00Z|6
|
||||
1957-03-10T00:00:00Z|6
|
||||
1956-03-15T00:00:00Z|4
|
||||
1955-03-21T00:00:00Z|4
|
||||
1954-03-26T00:00:00Z|7
|
||||
1953-03-31T00:00:00Z|10
|
||||
1952-04-05T00:00:00Z|10
|
||||
1951-04-11T00:00:00Z|1
|
||||
------------------------+---------------
|
||||
1965-01-01T00:00:00.000Z|1
|
||||
1964-01-01T00:00:00.000Z|4
|
||||
1963-01-01T00:00:00.000Z|7
|
||||
1962-01-01T00:00:00.000Z|6
|
||||
1961-01-01T00:00:00.000Z|8
|
||||
1960-01-01T00:00:00.000Z|8
|
||||
1959-01-01T00:00:00.000Z|9
|
||||
1958-01-01T00:00:00.000Z|7
|
||||
1957-01-01T00:00:00.000Z|4
|
||||
1956-01-01T00:00:00.000Z|5
|
||||
1955-01-01T00:00:00.000Z|4
|
||||
1954-01-01T00:00:00.000Z|8
|
||||
1953-01-01T00:00:00.000Z|11
|
||||
1952-01-01T00:00:00.000Z|8
|
||||
null |10
|
||||
;
|
||||
|
||||
|
@ -370,21 +369,21 @@ schema::h:ts|c:l
|
|||
SELECT HISTOGRAM(birth_date, INTERVAL 1 YEAR) AS h, COUNT(*) as c FROM test_emp GROUP BY HISTOGRAM(birth_date, INTERVAL 1 YEAR) ORDER BY h DESC;
|
||||
|
||||
h | c
|
||||
--------------------+---------------
|
||||
1964-02-02T00:00:00Z|5
|
||||
1963-02-07T00:00:00Z|7
|
||||
1962-02-12T00:00:00Z|6
|
||||
1961-02-17T00:00:00Z|8
|
||||
1960-02-23T00:00:00Z|7
|
||||
1959-02-28T00:00:00Z|9
|
||||
1958-03-05T00:00:00Z|6
|
||||
1957-03-10T00:00:00Z|6
|
||||
1956-03-15T00:00:00Z|4
|
||||
1955-03-21T00:00:00Z|4
|
||||
1954-03-26T00:00:00Z|7
|
||||
1953-03-31T00:00:00Z|10
|
||||
1952-04-05T00:00:00Z|10
|
||||
1951-04-11T00:00:00Z|1
|
||||
------------------------+---------------
|
||||
1965-01-01T00:00:00.000Z|1
|
||||
1964-01-01T00:00:00.000Z|4
|
||||
1963-01-01T00:00:00.000Z|7
|
||||
1962-01-01T00:00:00.000Z|6
|
||||
1961-01-01T00:00:00.000Z|8
|
||||
1960-01-01T00:00:00.000Z|8
|
||||
1959-01-01T00:00:00.000Z|9
|
||||
1958-01-01T00:00:00.000Z|7
|
||||
1957-01-01T00:00:00.000Z|4
|
||||
1956-01-01T00:00:00.000Z|5
|
||||
1955-01-01T00:00:00.000Z|4
|
||||
1954-01-01T00:00:00.000Z|8
|
||||
1953-01-01T00:00:00.000Z|11
|
||||
1952-01-01T00:00:00.000Z|8
|
||||
null |10
|
||||
;
|
||||
|
||||
|
|
|
@ -348,3 +348,104 @@ SELECT CONVERT(IIF(languages > 1, IIF(languages = 3, '3')), SQL_BIGINT) AS cond
|
|||
3
|
||||
null
|
||||
;
|
||||
|
||||
ifNullWithCompatibleDateBasedValues
|
||||
schema::replacement:ts
|
||||
SELECT IFNULL(birth_date, {d '2110-04-12'}) AS replacement FROM test_emp GROUP BY 1 ORDER BY replacement DESC LIMIT 5;
|
||||
|
||||
replacement
|
||||
------------------------
|
||||
2110-04-12T00:00:00.000Z
|
||||
1965-01-03T00:00:00.000Z
|
||||
1964-10-18T00:00:00.000Z
|
||||
1964-06-11T00:00:00.000Z
|
||||
1964-06-02T00:00:00.000Z
|
||||
;
|
||||
|
||||
caseWithCompatibleIntervals_1
|
||||
schema::date_math:ts|c:l
|
||||
SELECT birth_date + (CASE WHEN gender='M' THEN INTERVAL 1 YEAR ELSE INTERVAL 6 MONTH END) AS date_math, COUNT(*) c FROM test_emp GROUP BY 1 ORDER BY 1 DESC LIMIT 5;
|
||||
|
||||
date_math | c
|
||||
------------------------+---------------
|
||||
1966-01-03T00:00:00.000Z|1
|
||||
1965-06-11T00:00:00.000Z|1
|
||||
1965-04-18T00:00:00.000Z|2
|
||||
1964-12-02T00:00:00.000Z|1
|
||||
1964-11-26T00:00:00.000Z|1
|
||||
;
|
||||
|
||||
caseWithCompatibleIntervals_2
|
||||
SELECT hire_date, birth_date, (CASE WHEN birth_date > {d '1960-01-01'} THEN INTERVAL 1 YEAR ELSE INTERVAL 1 MONTH END) AS x FROM test_emp WHERE x + hire_date > {d '1995-01-01'} ORDER BY hire_date;
|
||||
|
||||
hire_date | birth_date | x
|
||||
------------------------+------------------------+---------------
|
||||
1994-04-09T00:00:00.000Z|1962-11-07T00:00:00.000Z|+1-0
|
||||
1995-01-27T00:00:00.000Z|1961-05-02T00:00:00.000Z|+1-0
|
||||
1995-03-13T00:00:00.000Z|1957-04-04T00:00:00.000Z|+0-1
|
||||
1995-03-20T00:00:00.000Z|1953-04-03T00:00:00.000Z|+0-1
|
||||
1995-08-22T00:00:00.000Z|1952-07-08T00:00:00.000Z|+0-1
|
||||
1995-12-15T00:00:00.000Z|1960-05-25T00:00:00.000Z|+1-0
|
||||
1996-11-05T00:00:00.000Z|1964-06-11T00:00:00.000Z|+1-0
|
||||
1997-05-19T00:00:00.000Z|1958-09-05T00:00:00.000Z|+0-1
|
||||
1999-04-30T00:00:00.000Z|1953-01-23T00:00:00.000Z|+0-1
|
||||
;
|
||||
|
||||
iifWithCompatibleIntervals
|
||||
schema::hire_date + IIF(salary > 70000, INTERVAL 2 HOURS, INTERVAL 2 DAYS):ts|salary:i
|
||||
SELECT hire_date + IIF(salary > 70000, INTERVAL 2 HOURS, INTERVAL 2 DAYS), salary FROM test_emp ORDER BY salary DESC LIMIT 10;
|
||||
|
||||
hire_date + IIF(salary > 70000, INTERVAL 2 HOURS, INTERVAL 2 DAYS)| salary
|
||||
------------------------------------------------------------------+---------------
|
||||
1985-11-20T02:00:00.000Z |74999
|
||||
1989-09-02T02:00:00.000Z |74970
|
||||
1989-02-10T02:00:00.000Z |74572
|
||||
1989-07-07T02:00:00.000Z |73851
|
||||
1999-04-30T02:00:00.000Z |73717
|
||||
1988-10-18T02:00:00.000Z |73578
|
||||
1990-09-15T02:00:00.000Z |71165
|
||||
1987-03-18T02:00:00.000Z |70011
|
||||
1987-05-28T00:00:00.000Z |69904
|
||||
1990-02-18T00:00:00.000Z |68547
|
||||
;
|
||||
|
||||
isNullWithIntervalMath
|
||||
SELECT ISNULL(birth_date, INTERVAL '23:45' HOUR TO MINUTES + {d '2019-09-17'}) AS c, salary, birth_date, hire_date FROM test_emp ORDER BY salary DESC LIMIT 5;
|
||||
|
||||
c:ts | salary:i | birth_date:ts | hire_date:ts
|
||||
------------------------+-----------------+------------------------+------------------------
|
||||
1956-12-13T00:00:00.000Z|74999 |1956-12-13T00:00:00.000Z|1985-11-20T00:00:00.000Z
|
||||
2019-09-17T00:00:00.000Z|74970 |null |1989-09-02T00:00:00.000Z
|
||||
1957-05-23T00:00:00.000Z|74572 |1957-05-23T00:00:00.000Z|1989-02-10T00:00:00.000Z
|
||||
1962-07-10T00:00:00.000Z|73851 |1962-07-10T00:00:00.000Z|1989-07-07T00:00:00.000Z
|
||||
1953-01-23T00:00:00.000Z|73717 |1953-01-23T00:00:00.000Z|1999-04-30T00:00:00.000Z
|
||||
;
|
||||
|
||||
coalesceWithCompatibleDateBasedTypes
|
||||
SELECT COALESCE(birth_date, CAST(birth_date AS DATE), CAST(hire_date AS DATETIME)) AS coalesce FROM test_emp ORDER BY 1 LIMIT 5;
|
||||
|
||||
coalesce:ts
|
||||
------------------------
|
||||
1952-02-27T00:00:00.000Z
|
||||
1952-04-19T00:00:00.000Z
|
||||
1952-05-15T00:00:00.000Z
|
||||
1952-06-13T00:00:00.000Z
|
||||
1952-07-08T00:00:00.000Z
|
||||
;
|
||||
|
||||
greatestWithCompatibleDateBasedTypes
|
||||
SELECT GREATEST(null, null, birth_date + INTERVAL 25 YEARS, hire_date + INTERVAL 2 DAYS, CAST(hire_date + INTERVAL 2 DAYS AS DATE)) AS greatest, birth_date, hire_date FROM test_emp ORDER BY 1 LIMIT 10;
|
||||
|
||||
greatest:ts | birth_date:ts | hire_date:ts
|
||||
------------------------+------------------------+------------------------
|
||||
1985-02-20T00:00:00.000Z|1952-04-19T00:00:00.000Z|1985-02-18T00:00:00.000Z
|
||||
1985-02-26T00:00:00.000Z|null |1985-02-24T00:00:00.000Z
|
||||
1985-07-11T00:00:00.000Z|1952-06-13T00:00:00.000Z|1985-07-09T00:00:00.000Z
|
||||
1985-10-16T00:00:00.000Z|1955-08-20T00:00:00.000Z|1985-10-14T00:00:00.000Z
|
||||
1985-11-21T00:00:00.000Z|1957-12-03T00:00:00.000Z|1985-11-19T00:00:00.000Z
|
||||
1985-11-22T00:00:00.000Z|1956-12-13T00:00:00.000Z|1985-11-20T00:00:00.000Z
|
||||
1985-11-22T00:00:00.000Z|1959-04-07T00:00:00.000Z|1985-11-20T00:00:00.000Z
|
||||
1986-02-06T00:00:00.000Z|1954-09-13T00:00:00.000Z|1986-02-04T00:00:00.000Z
|
||||
1986-02-28T00:00:00.000Z|1952-11-13T00:00:00.000Z|1986-02-26T00:00:00.000Z
|
||||
1986-05-30T00:00:00.000Z|1961-05-30T00:00:00.000Z|1986-03-14T00:00:00.000Z
|
||||
;
|
||||
|
|
|
@ -812,22 +812,22 @@ SELECT HISTOGRAM(birth_date, INTERVAL 1 YEAR) AS h, COUNT(*) AS c FROM emp GROUP
|
|||
|
||||
|
||||
h | c
|
||||
--------------------+---------------
|
||||
------------------------+---------------
|
||||
null |10
|
||||
1951-04-11T00:00:00Z|1
|
||||
1952-04-05T00:00:00Z|10
|
||||
1953-03-31T00:00:00Z|10
|
||||
1954-03-26T00:00:00Z|7
|
||||
1955-03-21T00:00:00Z|4
|
||||
1956-03-15T00:00:00Z|4
|
||||
1957-03-10T00:00:00Z|6
|
||||
1958-03-05T00:00:00Z|6
|
||||
1959-02-28T00:00:00Z|9
|
||||
1960-02-23T00:00:00Z|7
|
||||
1961-02-17T00:00:00Z|8
|
||||
1962-02-12T00:00:00Z|6
|
||||
1963-02-07T00:00:00Z|7
|
||||
1964-02-02T00:00:00Z|5
|
||||
1952-01-01T00:00:00.000Z|8
|
||||
1953-01-01T00:00:00.000Z|11
|
||||
1954-01-01T00:00:00.000Z|8
|
||||
1955-01-01T00:00:00.000Z|4
|
||||
1956-01-01T00:00:00.000Z|5
|
||||
1957-01-01T00:00:00.000Z|4
|
||||
1958-01-01T00:00:00.000Z|7
|
||||
1959-01-01T00:00:00.000Z|9
|
||||
1960-01-01T00:00:00.000Z|8
|
||||
1961-01-01T00:00:00.000Z|8
|
||||
1962-01-01T00:00:00.000Z|6
|
||||
1963-01-01T00:00:00.000Z|7
|
||||
1964-01-01T00:00:00.000Z|4
|
||||
1965-01-01T00:00:00.000Z|1
|
||||
|
||||
// end::histogramDateTime
|
||||
;
|
||||
|
|
|
@ -119,3 +119,23 @@ SELECT COUNT(*), TRUNCATE(emp_no, -2) t FROM test_emp WHERE 'aaabbb' RLIKE 'a{2,
|
|||
99 |10000
|
||||
1 |10100
|
||||
;
|
||||
|
||||
inWithCompatibleDateTypes
|
||||
SELECT birth_date FROM test_emp WHERE birth_date IN ({d '1959-07-23'},CAST('1959-12-25T12:12:12' AS TIMESTAMP)) OR birth_date IS NULL ORDER BY birth_date;
|
||||
|
||||
birth_date:ts
|
||||
------------------------
|
||||
1959-07-23T00:00:00.000Z
|
||||
1959-07-23T00:00:00.000Z
|
||||
1959-12-25T00:00:00.000Z
|
||||
null
|
||||
null
|
||||
null
|
||||
null
|
||||
null
|
||||
null
|
||||
null
|
||||
null
|
||||
null
|
||||
null
|
||||
;
|
||||
|
|
|
@ -101,13 +101,13 @@ SELECT MIN(salary) mi, MAX(salary) ma, YEAR(hire_date) year, ROUND(AVG(languages
|
|||
|
||||
mi:i | ma:i | year:i |ROUND(AVG(languages), 1):d|TRUNCATE(AVG(languages), 1):d| COUNT(*):l
|
||||
---------------+---------------+---------------+--------------------------+-----------------------------+---------------
|
||||
25324 |70011 |1986 |3.0 |3.0 |15
|
||||
25945 |73578 |1987 |2.9 |2.8 |9
|
||||
25976 |74970 |1988 |3.0 |3.0 |13
|
||||
31120 |71165 |1989 |3.1 |3.0 |12
|
||||
30404 |58715 |1992 |3.0 |3.0 |3
|
||||
35742 |67492 |1993 |2.8 |2.7 |4
|
||||
45656 |45656 |1995 |3.0 |3.0 |1
|
||||
25324 |70011 |1987 |3.0 |3.0 |15
|
||||
25945 |73578 |1988 |2.9 |2.8 |9
|
||||
25976 |74970 |1989 |3.0 |3.0 |13
|
||||
31120 |71165 |1990 |3.1 |3.0 |12
|
||||
30404 |58715 |1993 |3.0 |3.0 |3
|
||||
35742 |67492 |1994 |2.8 |2.7 |4
|
||||
45656 |45656 |1996 |3.0 |3.0 |1
|
||||
;
|
||||
|
||||
minMaxRoundWithHavingRound
|
||||
|
@ -115,17 +115,17 @@ SELECT MIN(salary) mi, MAX(salary) ma, YEAR(hire_date) year, ROUND(AVG(languages
|
|||
|
||||
mi:i | ma:i | year:i |ROUND(AVG(languages),1):d| COUNT(*):l
|
||||
---------------+---------------+---------------+-------------------------+---------------
|
||||
26436 |74999 |1984 |3.1 |11
|
||||
31897 |61805 |1985 |3.5 |11
|
||||
25324 |70011 |1986 |3.0 |15
|
||||
25945 |73578 |1987 |2.9 |9
|
||||
25976 |74970 |1988 |3.0 |13
|
||||
31120 |71165 |1989 |3.1 |12
|
||||
32568 |65030 |1990 |3.3 |6
|
||||
27215 |60781 |1991 |4.1 |8
|
||||
30404 |58715 |1992 |3.0 |3
|
||||
35742 |67492 |1993 |2.8 |4
|
||||
45656 |45656 |1995 |3.0 |1
|
||||
26436 |74999 |1985 |3.1 |11
|
||||
31897 |61805 |1986 |3.5 |11
|
||||
25324 |70011 |1987 |3.0 |15
|
||||
25945 |73578 |1988 |2.9 |9
|
||||
25976 |74970 |1989 |3.0 |13
|
||||
31120 |71165 |1990 |3.1 |12
|
||||
32568 |65030 |1991 |3.3 |6
|
||||
27215 |60781 |1992 |4.1 |8
|
||||
30404 |58715 |1993 |3.0 |3
|
||||
35742 |67492 |1994 |2.8 |4
|
||||
45656 |45656 |1996 |3.0 |1
|
||||
;
|
||||
|
||||
groupByAndOrderByTruncateWithPositiveParameter
|
||||
|
|
|
@ -12,7 +12,6 @@ import org.elasticsearch.xpack.sql.expression.Literal;
|
|||
import org.elasticsearch.xpack.sql.tree.NodeInfo;
|
||||
import org.elasticsearch.xpack.sql.tree.Source;
|
||||
import org.elasticsearch.xpack.sql.type.DataType;
|
||||
import org.elasticsearch.xpack.sql.type.DataTypes;
|
||||
|
||||
import java.time.ZoneId;
|
||||
import java.util.Collections;
|
||||
|
@ -48,7 +47,7 @@ public class Histogram extends GroupingFunction {
|
|||
if (resolution == TypeResolution.TYPE_RESOLVED) {
|
||||
// interval must be Literal interval
|
||||
if (field().dataType().isDateBased()) {
|
||||
resolution = isType(interval, DataTypes::isInterval, "(Date) HISTOGRAM", ParamOrdinal.SECOND, "interval");
|
||||
resolution = isType(interval, DataType::isInterval, "(Date) HISTOGRAM", ParamOrdinal.SECOND, "interval");
|
||||
} else {
|
||||
resolution = isNumeric(interval, "(Numeric) HISTOGRAM", ParamOrdinal.SECOND);
|
||||
}
|
||||
|
|
|
@ -20,19 +20,15 @@ import java.util.Objects;
|
|||
import static org.elasticsearch.common.logging.LoggerMessageFormat.format;
|
||||
import static org.elasticsearch.xpack.sql.expression.TypeResolutions.isDate;
|
||||
import static org.elasticsearch.xpack.sql.expression.TypeResolutions.isString;
|
||||
import static org.elasticsearch.xpack.sql.expression.function.scalar.datetime.BinaryDateTimeProcessor.BinaryDateOperation;
|
||||
import static org.elasticsearch.xpack.sql.expression.gen.script.ParamsBuilder.paramsBuilder;
|
||||
|
||||
public abstract class BinaryDateTimeFunction extends BinaryScalarFunction {
|
||||
|
||||
private final ZoneId zoneId;
|
||||
private final BinaryDateOperation operation;
|
||||
|
||||
public BinaryDateTimeFunction(Source source, Expression datePart, Expression timestamp, ZoneId zoneId,
|
||||
BinaryDateOperation operation) {
|
||||
public BinaryDateTimeFunction(Source source, Expression datePart, Expression timestamp, ZoneId zoneId) {
|
||||
super(source, datePart, timestamp);
|
||||
this.zoneId = zoneId;
|
||||
this.operation = operation;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -47,7 +43,7 @@ public abstract class BinaryDateTimeFunction extends BinaryScalarFunction {
|
|||
if (datePartValue != null && resolveDateTimeField(datePartValue) == false) {
|
||||
List<String> similar = findSimilarDateTimeFields(datePartValue);
|
||||
if (similar.isEmpty()) {
|
||||
return new TypeResolution(format(null, "first argument of [{}] must be one of {} or their aliases, found value [{}]",
|
||||
return new TypeResolution(format(null, "first argument of [{}] must be one of {} or their aliases; found value [{}]",
|
||||
sourceText(),
|
||||
validDateTimeFieldValues(),
|
||||
Expressions.name(left())));
|
||||
|
@ -78,9 +74,11 @@ public abstract class BinaryDateTimeFunction extends BinaryScalarFunction {
|
|||
|
||||
@Override
|
||||
protected Pipe makePipe() {
|
||||
return new BinaryDateTimePipe(source(), this, Expressions.pipe(left()), Expressions.pipe(right()), zoneId, operation);
|
||||
return createPipe(Expressions.pipe(left()), Expressions.pipe(right()), zoneId);
|
||||
}
|
||||
|
||||
protected abstract Pipe createPipe(Pipe left, Pipe right, ZoneId zoneId);
|
||||
|
||||
@Override
|
||||
public Nullability nullable() {
|
||||
return Nullability.TRUE;
|
||||
|
@ -101,7 +99,7 @@ public abstract class BinaryDateTimeFunction extends BinaryScalarFunction {
|
|||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(super.hashCode(), zoneId, operation);
|
||||
return Objects.hash(super.hashCode(), zoneId);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -116,6 +114,6 @@ public abstract class BinaryDateTimeFunction extends BinaryScalarFunction {
|
|||
return false;
|
||||
}
|
||||
BinaryDateTimeFunction that = (BinaryDateTimeFunction) o;
|
||||
return zoneId.equals(that.zoneId) && operation == that.operation;
|
||||
return zoneId.equals(that.zoneId);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -9,50 +9,34 @@ import org.elasticsearch.xpack.sql.expression.Expression;
|
|||
import org.elasticsearch.xpack.sql.expression.gen.pipeline.BinaryPipe;
|
||||
import org.elasticsearch.xpack.sql.expression.gen.pipeline.Pipe;
|
||||
import org.elasticsearch.xpack.sql.expression.gen.processor.Processor;
|
||||
import org.elasticsearch.xpack.sql.tree.NodeInfo;
|
||||
import org.elasticsearch.xpack.sql.tree.Source;
|
||||
|
||||
import java.time.ZoneId;
|
||||
import java.util.Objects;
|
||||
|
||||
public class BinaryDateTimePipe extends BinaryPipe {
|
||||
public abstract class BinaryDateTimePipe extends BinaryPipe {
|
||||
|
||||
private final ZoneId zoneId;
|
||||
private final BinaryDateTimeProcessor.BinaryDateOperation operation;
|
||||
|
||||
public BinaryDateTimePipe(Source source, Expression expression, Pipe left, Pipe right, ZoneId zoneId,
|
||||
BinaryDateTimeProcessor.BinaryDateOperation operation) {
|
||||
public BinaryDateTimePipe(Source source, Expression expression, Pipe left, Pipe right, ZoneId zoneId) {
|
||||
super(source, expression, left, right);
|
||||
this.zoneId = zoneId;
|
||||
this.operation = operation;
|
||||
}
|
||||
|
||||
ZoneId zoneId() {
|
||||
return zoneId;
|
||||
}
|
||||
|
||||
BinaryDateTimeProcessor.BinaryDateOperation operation() {
|
||||
return operation;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected NodeInfo<BinaryDateTimePipe> info() {
|
||||
return NodeInfo.create(this, BinaryDateTimePipe::new, expression(), left(), right(), zoneId, operation);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected BinaryPipe replaceChildren(Pipe left, Pipe right) {
|
||||
return new BinaryDateTimePipe(source(), expression(), left, right, zoneId, operation);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Processor asProcessor() {
|
||||
return BinaryDateTimeProcessor.asProcessor(operation, left().asProcessor(), right().asProcessor(), zoneId);
|
||||
return makeProcessor(left().asProcessor(), right().asProcessor(), zoneId);
|
||||
}
|
||||
|
||||
protected abstract Processor makeProcessor(Processor left, Processor right, ZoneId zoneId);
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(super.hashCode(), zoneId, operation);
|
||||
return Objects.hash(super.hashCode(), zoneId);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -67,7 +51,6 @@ public class BinaryDateTimePipe extends BinaryPipe {
|
|||
return false;
|
||||
}
|
||||
BinaryDateTimePipe that = (BinaryDateTimePipe) o;
|
||||
return Objects.equals(zoneId, that.zoneId) &&
|
||||
operation == that.operation;
|
||||
return Objects.equals(zoneId, that.zoneId);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -15,16 +15,8 @@ import java.io.IOException;
|
|||
import java.time.ZoneId;
|
||||
import java.util.Objects;
|
||||
|
||||
import static org.elasticsearch.xpack.sql.expression.function.scalar.datetime.BinaryDateTimeProcessor.BinaryDateOperation.TRUNC;
|
||||
|
||||
public abstract class BinaryDateTimeProcessor extends BinaryProcessor {
|
||||
|
||||
// TODO: Remove and in favour of inheritance (subclasses which implement abstract methods)
|
||||
public enum BinaryDateOperation {
|
||||
TRUNC,
|
||||
PART;
|
||||
}
|
||||
|
||||
private final ZoneId zoneId;
|
||||
|
||||
public BinaryDateTimeProcessor(Processor source1, Processor source2, ZoneId zoneId) {
|
||||
|
@ -48,28 +40,24 @@ public abstract class BinaryDateTimeProcessor extends BinaryProcessor {
|
|||
@Override
|
||||
protected abstract Object doProcess(Object left, Object right);
|
||||
|
||||
public static BinaryDateTimeProcessor asProcessor(BinaryDateOperation operation, Processor left, Processor right, ZoneId zoneId) {
|
||||
if (operation == TRUNC) {
|
||||
return new DateTruncProcessor(left, right, zoneId);
|
||||
} else {
|
||||
return new DatePartProcessor(left, right, zoneId);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(zoneId);
|
||||
return Objects.hash(left(), right(), zoneId);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
if (this == o) {
|
||||
public boolean equals(Object obj) {
|
||||
if (this == obj) {
|
||||
return true;
|
||||
}
|
||||
if (o == null || getClass() != o.getClass()) {
|
||||
|
||||
if (obj == null || getClass() != obj.getClass()) {
|
||||
return false;
|
||||
}
|
||||
BinaryDateTimeProcessor that = (BinaryDateTimeProcessor) o;
|
||||
return zoneId.equals(that.zoneId);
|
||||
|
||||
BinaryDateTimeProcessor other = (BinaryDateTimeProcessor) obj;
|
||||
return Objects.equals(left(), other.left())
|
||||
&& Objects.equals(right(), other.right())
|
||||
&& Objects.equals(zoneId(), other.zoneId());
|
||||
}
|
||||
}
|
||||
|
|
|
@ -9,6 +9,7 @@ import org.elasticsearch.xpack.sql.expression.Expression;
|
|||
import org.elasticsearch.xpack.sql.expression.Nullability;
|
||||
import org.elasticsearch.xpack.sql.expression.function.scalar.BinaryScalarFunction;
|
||||
import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.DateTimeProcessor.DateTimeExtractor;
|
||||
import org.elasticsearch.xpack.sql.expression.gen.pipeline.Pipe;
|
||||
import org.elasticsearch.xpack.sql.tree.NodeInfo;
|
||||
import org.elasticsearch.xpack.sql.tree.Source;
|
||||
import org.elasticsearch.xpack.sql.type.DataType;
|
||||
|
@ -78,7 +79,7 @@ public class DatePart extends BinaryDateTimeFunction {
|
|||
}
|
||||
|
||||
public DatePart(Source source, Expression truncateTo, Expression timestamp, ZoneId zoneId) {
|
||||
super(source, truncateTo, timestamp, zoneId, BinaryDateTimeProcessor.BinaryDateOperation.PART);
|
||||
super(source, truncateTo, timestamp, zoneId);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -101,16 +102,6 @@ public class DatePart extends BinaryDateTimeFunction {
|
|||
return Nullability.TRUE;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected boolean resolveDateTimeField(String dateTimeField) {
|
||||
return Part.resolve(dateTimeField) != null;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected List<String> findSimilarDateTimeFields(String dateTimeField) {
|
||||
return Part.findSimilar(dateTimeField);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected String scriptMethodName() {
|
||||
return "datePart";
|
||||
|
@ -121,6 +112,21 @@ public class DatePart extends BinaryDateTimeFunction {
|
|||
return DatePartProcessor.process(left().fold(), right().fold(), zoneId());
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Pipe createPipe(Pipe left, Pipe right, ZoneId zoneId) {
|
||||
return new DatePartPipe(source(), this, left, right, zoneId);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected boolean resolveDateTimeField(String dateTimeField) {
|
||||
return Part.resolve(dateTimeField) != null;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected List<String> findSimilarDateTimeFields(String dateTimeField) {
|
||||
return Part.findSimilar(dateTimeField);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected List<String> validDateTimeFieldValues() {
|
||||
return Part.VALID_VALUES;
|
||||
|
|
|
@ -0,0 +1,36 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
*/
|
||||
package org.elasticsearch.xpack.sql.expression.function.scalar.datetime;
|
||||
|
||||
import org.elasticsearch.xpack.sql.expression.Expression;
|
||||
import org.elasticsearch.xpack.sql.expression.gen.pipeline.Pipe;
|
||||
import org.elasticsearch.xpack.sql.expression.gen.processor.Processor;
|
||||
import org.elasticsearch.xpack.sql.tree.NodeInfo;
|
||||
import org.elasticsearch.xpack.sql.tree.Source;
|
||||
|
||||
import java.time.ZoneId;
|
||||
|
||||
public class DatePartPipe extends BinaryDateTimePipe {
|
||||
|
||||
public DatePartPipe(Source source, Expression expression, Pipe left, Pipe right, ZoneId zoneId) {
|
||||
super(source, expression, left, right, zoneId);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected NodeInfo<DatePartPipe> info() {
|
||||
return NodeInfo.create(this, DatePartPipe::new, expression(), left(), right(), zoneId());
|
||||
}
|
||||
|
||||
@Override
|
||||
protected DatePartPipe replaceChildren(Pipe left, Pipe right) {
|
||||
return new DatePartPipe(source(), expression(), left, right, zoneId());
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Processor makeProcessor(Processor left, Processor right, ZoneId zoneId) {
|
||||
return new DatePartProcessor(left, right, zoneId);
|
||||
}
|
||||
}
|
|
@ -34,36 +34,36 @@ public class DatePartProcessor extends BinaryDateTimeProcessor {
|
|||
}
|
||||
|
||||
@Override
|
||||
protected Object doProcess(Object left, Object right) {
|
||||
return process(left, right, zoneId());
|
||||
protected Object doProcess(Object part, Object timestamp) {
|
||||
return process(part, timestamp, zoneId());
|
||||
}
|
||||
|
||||
/**
|
||||
* Used in Painless scripting
|
||||
*/
|
||||
public static Object process(Object source1, Object source2, ZoneId zoneId) {
|
||||
if (source1 == null || source2 == null) {
|
||||
public static Object process(Object part, Object timestamp, ZoneId zoneId) {
|
||||
if (part == null || timestamp == null) {
|
||||
return null;
|
||||
}
|
||||
if (source1 instanceof String == false) {
|
||||
throw new SqlIllegalArgumentException("A string is required; received [{}]", source1);
|
||||
if (part instanceof String == false) {
|
||||
throw new SqlIllegalArgumentException("A string is required; received [{}]", part);
|
||||
}
|
||||
Part datePartField = Part.resolve((String) source1);
|
||||
Part datePartField = Part.resolve((String) part);
|
||||
if (datePartField == null) {
|
||||
List<String> similar = Part.findSimilar((String) source1);
|
||||
List<String> similar = Part.findSimilar((String) part);
|
||||
if (similar.isEmpty()) {
|
||||
throw new SqlIllegalArgumentException("A value of {} or their aliases is required; received [{}]",
|
||||
Part.values(), source1);
|
||||
Part.values(), part);
|
||||
} else {
|
||||
throw new SqlIllegalArgumentException("Received value [{}] is not valid date part for extraction; " +
|
||||
"did you mean {}?", source1, similar);
|
||||
"did you mean {}?", part, similar);
|
||||
}
|
||||
}
|
||||
|
||||
if (source2 instanceof ZonedDateTime == false) {
|
||||
throw new SqlIllegalArgumentException("A date/datetime is required; received [{}]", source2);
|
||||
if (timestamp instanceof ZonedDateTime == false) {
|
||||
throw new SqlIllegalArgumentException("A date/datetime is required; received [{}]", timestamp);
|
||||
}
|
||||
|
||||
return datePartField.extract(((ZonedDateTime) source2).withZoneSameInstant(zoneId));
|
||||
return datePartField.extract(((ZonedDateTime) timestamp).withZoneSameInstant(zoneId));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -22,7 +22,13 @@ public abstract class DateTimeHistogramFunction extends DateTimeFunction {
|
|||
}
|
||||
|
||||
/**
|
||||
* used for aggregration (date histogram)
|
||||
* used for aggregation (date histogram)
|
||||
*/
|
||||
public abstract long interval();
|
||||
public long fixedInterval() {
|
||||
return -1;
|
||||
}
|
||||
|
||||
public String calendarInterval() {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -8,6 +8,7 @@ package org.elasticsearch.xpack.sql.expression.function.scalar.datetime;
|
|||
import org.elasticsearch.xpack.sql.expression.Expression;
|
||||
import org.elasticsearch.xpack.sql.expression.Nullability;
|
||||
import org.elasticsearch.xpack.sql.expression.function.scalar.BinaryScalarFunction;
|
||||
import org.elasticsearch.xpack.sql.expression.gen.pipeline.Pipe;
|
||||
import org.elasticsearch.xpack.sql.tree.NodeInfo;
|
||||
import org.elasticsearch.xpack.sql.tree.Source;
|
||||
import org.elasticsearch.xpack.sql.type.DataType;
|
||||
|
@ -21,9 +22,7 @@ import java.util.HashSet;
|
|||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.function.Function;
|
||||
|
||||
import static org.elasticsearch.xpack.sql.expression.function.scalar.datetime.BinaryDateTimeProcessor.BinaryDateOperation.TRUNC;
|
||||
import java.util.function.UnaryOperator;
|
||||
|
||||
public class DateTrunc extends BinaryDateTimeFunction {
|
||||
|
||||
|
@ -109,10 +108,10 @@ public class DateTrunc extends BinaryDateTimeFunction {
|
|||
VALID_VALUES = DateTimeField.initializeValidValues(values());
|
||||
}
|
||||
|
||||
private Function<ZonedDateTime, ZonedDateTime> truncateFunction;
|
||||
private UnaryOperator<ZonedDateTime> truncateFunction;
|
||||
private Set<String> aliases;
|
||||
|
||||
Part(Function<ZonedDateTime, ZonedDateTime> truncateFunction, String... aliases) {
|
||||
Part(UnaryOperator<ZonedDateTime> truncateFunction, String... aliases) {
|
||||
this.truncateFunction = truncateFunction;
|
||||
this.aliases = Collections.unmodifiableSet(new HashSet<>(Arrays.asList(aliases)));
|
||||
}
|
||||
|
@ -136,7 +135,7 @@ public class DateTrunc extends BinaryDateTimeFunction {
|
|||
}
|
||||
|
||||
public DateTrunc(Source source, Expression truncateTo, Expression timestamp, ZoneId zoneId) {
|
||||
super(source, truncateTo, timestamp, zoneId, TRUNC);
|
||||
super(source, truncateTo, timestamp, zoneId);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -159,16 +158,6 @@ public class DateTrunc extends BinaryDateTimeFunction {
|
|||
return Nullability.TRUE;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected boolean resolveDateTimeField(String dateTimeField) {
|
||||
return Part.resolve(dateTimeField) != null;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected List<String> findSimilarDateTimeFields(String dateTimeField) {
|
||||
return Part.findSimilar(dateTimeField);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected String scriptMethodName() {
|
||||
return "dateTrunc";
|
||||
|
@ -179,6 +168,21 @@ public class DateTrunc extends BinaryDateTimeFunction {
|
|||
return DateTruncProcessor.process(left().fold(), right().fold(), zoneId());
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Pipe createPipe(Pipe left, Pipe right, ZoneId zoneId) {
|
||||
return new DateTruncPipe(source(), this, left, right, zoneId);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected boolean resolveDateTimeField(String dateTimeField) {
|
||||
return Part.resolve(dateTimeField) != null;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected List<String> findSimilarDateTimeFields(String dateTimeField) {
|
||||
return Part.findSimilar(dateTimeField);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected List<String> validDateTimeFieldValues() {
|
||||
return Part.VALID_VALUES;
|
||||
|
|
|
@ -0,0 +1,36 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
*/
|
||||
package org.elasticsearch.xpack.sql.expression.function.scalar.datetime;
|
||||
|
||||
import org.elasticsearch.xpack.sql.expression.Expression;
|
||||
import org.elasticsearch.xpack.sql.expression.gen.pipeline.Pipe;
|
||||
import org.elasticsearch.xpack.sql.expression.gen.processor.Processor;
|
||||
import org.elasticsearch.xpack.sql.tree.NodeInfo;
|
||||
import org.elasticsearch.xpack.sql.tree.Source;
|
||||
|
||||
import java.time.ZoneId;
|
||||
|
||||
public class DateTruncPipe extends BinaryDateTimePipe {
|
||||
|
||||
public DateTruncPipe(Source source, Expression expression, Pipe left, Pipe right, ZoneId zoneId) {
|
||||
super(source, expression, left, right, zoneId);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected NodeInfo<DateTruncPipe> info() {
|
||||
return NodeInfo.create(this, DateTruncPipe::new, expression(), left(), right(), zoneId());
|
||||
}
|
||||
|
||||
@Override
|
||||
protected DateTruncPipe replaceChildren(Pipe left, Pipe right) {
|
||||
return new DateTruncPipe(source(), expression(), left, right, zoneId());
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Processor makeProcessor(Processor left, Processor right, ZoneId zoneId) {
|
||||
return new DateTruncProcessor(left, right, zoneId);
|
||||
}
|
||||
}
|
|
@ -34,36 +34,36 @@ public class DateTruncProcessor extends BinaryDateTimeProcessor {
|
|||
}
|
||||
|
||||
@Override
|
||||
protected Object doProcess(Object left, Object right) {
|
||||
return process(left, right, zoneId());
|
||||
protected Object doProcess(Object truncateTo, Object timestamp) {
|
||||
return process(truncateTo, timestamp, zoneId());
|
||||
}
|
||||
|
||||
/**
|
||||
* Used in Painless scripting
|
||||
*/
|
||||
public static Object process(Object source1, Object source2, ZoneId zoneId) {
|
||||
if (source1 == null || source2 == null) {
|
||||
public static Object process(Object truncateTo, Object timestamp, ZoneId zoneId) {
|
||||
if (truncateTo == null || timestamp == null) {
|
||||
return null;
|
||||
}
|
||||
if (source1 instanceof String == false) {
|
||||
throw new SqlIllegalArgumentException("A string is required; received [{}]", source1);
|
||||
if (truncateTo instanceof String == false) {
|
||||
throw new SqlIllegalArgumentException("A string is required; received [{}]", truncateTo);
|
||||
}
|
||||
Part truncateDateField = Part.resolve((String) source1);
|
||||
Part truncateDateField = Part.resolve((String) truncateTo);
|
||||
if (truncateDateField == null) {
|
||||
List<String> similar = Part.findSimilar((String) source1);
|
||||
List<String> similar = Part.findSimilar((String) truncateTo);
|
||||
if (similar.isEmpty()) {
|
||||
throw new SqlIllegalArgumentException("A value of {} or their aliases is required; received [{}]",
|
||||
Part.values(), source1);
|
||||
Part.values(), truncateTo);
|
||||
} else {
|
||||
throw new SqlIllegalArgumentException("Received value [{}] is not valid date part for truncation; " +
|
||||
"did you mean {}?", source1, similar);
|
||||
"did you mean {}?", truncateTo, similar);
|
||||
}
|
||||
}
|
||||
|
||||
if (source2 instanceof ZonedDateTime == false) {
|
||||
throw new SqlIllegalArgumentException("A date/datetime is required; received [{}]", source2);
|
||||
if (timestamp instanceof ZonedDateTime == false) {
|
||||
throw new SqlIllegalArgumentException("A date/datetime is required; received [{}]", timestamp);
|
||||
}
|
||||
|
||||
return truncateDateField.truncate(((ZonedDateTime) source2).withZoneSameInstant(zoneId));
|
||||
return truncateDateField.truncate(((ZonedDateTime) timestamp).withZoneSameInstant(zoneId));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -5,20 +5,20 @@
|
|||
*/
|
||||
package org.elasticsearch.xpack.sql.expression.function.scalar.datetime;
|
||||
|
||||
import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval;
|
||||
import org.elasticsearch.xpack.sql.expression.Expression;
|
||||
import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.DateTimeProcessor.DateTimeExtractor;
|
||||
import org.elasticsearch.xpack.sql.tree.Source;
|
||||
import org.elasticsearch.xpack.sql.tree.NodeInfo.NodeCtor2;
|
||||
import org.elasticsearch.xpack.sql.tree.Source;
|
||||
|
||||
import java.time.ZoneId;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
/**
|
||||
* Extract the year from a datetime.
|
||||
*/
|
||||
public class Year extends DateTimeHistogramFunction {
|
||||
|
||||
private static long YEAR_IN_MILLIS = TimeUnit.DAYS.toMillis(1) * 365L;
|
||||
public static String YEAR_INTERVAL = DateHistogramInterval.YEAR.toString();
|
||||
|
||||
public Year(Source source, Expression field, ZoneId zoneId) {
|
||||
super(source, field, zoneId, DateTimeExtractor.YEAR);
|
||||
|
@ -45,7 +45,7 @@ public class Year extends DateTimeHistogramFunction {
|
|||
}
|
||||
|
||||
@Override
|
||||
public long interval() {
|
||||
return YEAR_IN_MILLIS;
|
||||
public String calendarInterval() {
|
||||
return YEAR_INTERVAL;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -11,7 +11,6 @@ import org.elasticsearch.xpack.sql.expression.predicate.operator.arithmetic.Bina
|
|||
import org.elasticsearch.xpack.sql.tree.Source;
|
||||
import org.elasticsearch.xpack.sql.type.DataType;
|
||||
import org.elasticsearch.xpack.sql.type.DataTypeConversion;
|
||||
import org.elasticsearch.xpack.sql.type.DataTypes;
|
||||
|
||||
import static org.elasticsearch.common.logging.LoggerMessageFormat.format;
|
||||
|
||||
|
@ -41,7 +40,7 @@ abstract class DateTimeArithmeticOperation extends ArithmeticOperation {
|
|||
return TypeResolution.TYPE_RESOLVED;
|
||||
}
|
||||
// 2. 3. 4. intervals
|
||||
if ((DataTypes.isInterval(l) || DataTypes.isInterval(r))) {
|
||||
if (l.isInterval() || r.isInterval()) {
|
||||
if (DataTypeConversion.commonType(l, r) == null) {
|
||||
return new TypeResolution(format(null, "[{}] has arguments with incompatible types [{}] and [{}]", symbol(), l, r));
|
||||
} else {
|
||||
|
@ -57,7 +56,7 @@ abstract class DateTimeArithmeticOperation extends ArithmeticOperation {
|
|||
DataType l = left().dataType();
|
||||
DataType r = right().dataType();
|
||||
|
||||
if (!(r.isDateOrTimeBased() || DataTypes.isInterval(r))|| !(l.isDateOrTimeBased() || DataTypes.isInterval(l))) {
|
||||
if (!(r.isDateOrTimeBased() || r.isInterval())|| !(l.isDateOrTimeBased() || l.isInterval())) {
|
||||
return new TypeResolution(format(null, "[{}] has arguments with incompatible types [{}] and [{}]", symbol(), l, r));
|
||||
}
|
||||
return TypeResolution.TYPE_RESOLVED;
|
||||
|
|
|
@ -7,10 +7,9 @@ package org.elasticsearch.xpack.sql.expression.predicate.operator.arithmetic;
|
|||
|
||||
import org.elasticsearch.xpack.sql.expression.Expression;
|
||||
import org.elasticsearch.xpack.sql.expression.predicate.operator.arithmetic.BinaryArithmeticProcessor.BinaryArithmeticOperation;
|
||||
import org.elasticsearch.xpack.sql.tree.Source;
|
||||
import org.elasticsearch.xpack.sql.tree.NodeInfo;
|
||||
import org.elasticsearch.xpack.sql.tree.Source;
|
||||
import org.elasticsearch.xpack.sql.type.DataType;
|
||||
import org.elasticsearch.xpack.sql.type.DataTypes;
|
||||
|
||||
import static org.elasticsearch.common.logging.LoggerMessageFormat.format;
|
||||
|
||||
|
@ -39,10 +38,10 @@ public class Mul extends ArithmeticOperation {
|
|||
return TypeResolution.TYPE_RESOLVED;
|
||||
}
|
||||
|
||||
if (DataTypes.isInterval(l) && r.isInteger()) {
|
||||
if (l.isInterval() && r.isInteger()) {
|
||||
dataType = l;
|
||||
return TypeResolution.TYPE_RESOLVED;
|
||||
} else if (DataTypes.isInterval(r) && l.isInteger()) {
|
||||
} else if (r.isInterval() && l.isInteger()) {
|
||||
dataType = r;
|
||||
return TypeResolution.TYPE_RESOLVED;
|
||||
}
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue