Merge branch '7.x' into enrich-7.x
This commit is contained in:
commit
323251c3d1
15
build.gradle
15
build.gradle
|
@ -619,21 +619,6 @@ allprojects {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
subprojects {
|
|
||||||
// Common config when running with a FIPS-140 runtime JVM
|
|
||||||
if (project.ext.has("inFipsJvm") && project.ext.inFipsJvm) {
|
|
||||||
tasks.withType(Test) {
|
|
||||||
systemProperty 'javax.net.ssl.trustStorePassword', 'password'
|
|
||||||
systemProperty 'javax.net.ssl.keyStorePassword', 'password'
|
|
||||||
}
|
|
||||||
project.pluginManager.withPlugin("elasticsearch.testclusters") {
|
|
||||||
project.testClusters.all {
|
|
||||||
systemProperty 'javax.net.ssl.trustStorePassword', 'password'
|
|
||||||
systemProperty 'javax.net.ssl.keyStorePassword', 'password'
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -116,6 +116,22 @@ class BuildPlugin implements Plugin<Project> {
|
||||||
configureTestTasks(project)
|
configureTestTasks(project)
|
||||||
configurePrecommit(project)
|
configurePrecommit(project)
|
||||||
configureDependenciesInfo(project)
|
configureDependenciesInfo(project)
|
||||||
|
|
||||||
|
// Common config when running with a FIPS-140 runtime JVM
|
||||||
|
// Need to do it here to support external plugins
|
||||||
|
if (project.ext.inFipsJvm) {
|
||||||
|
project.tasks.withType(Test) {
|
||||||
|
systemProperty 'javax.net.ssl.trustStorePassword', 'password'
|
||||||
|
systemProperty 'javax.net.ssl.keyStorePassword', 'password'
|
||||||
|
}
|
||||||
|
project.pluginManager.withPlugin("elasticsearch.testclusters") {
|
||||||
|
project.testClusters.all {
|
||||||
|
systemProperty 'javax.net.ssl.trustStorePassword', 'password'
|
||||||
|
systemProperty 'javax.net.ssl.keyStorePassword', 'password'
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -21,7 +21,6 @@ package org.elasticsearch.gradle;
|
||||||
|
|
||||||
import org.elasticsearch.gradle.test.GradleIntegrationTestCase;
|
import org.elasticsearch.gradle.test.GradleIntegrationTestCase;
|
||||||
import org.gradle.testkit.runner.BuildResult;
|
import org.gradle.testkit.runner.BuildResult;
|
||||||
import org.gradle.testkit.runner.GradleRunner;
|
|
||||||
|
|
||||||
|
|
||||||
public class ExportElasticsearchBuildResourcesTaskIT extends GradleIntegrationTestCase {
|
public class ExportElasticsearchBuildResourcesTaskIT extends GradleIntegrationTestCase {
|
||||||
|
@ -29,25 +28,19 @@ public class ExportElasticsearchBuildResourcesTaskIT extends GradleIntegrationTe
|
||||||
public static final String PROJECT_NAME = "elasticsearch-build-resources";
|
public static final String PROJECT_NAME = "elasticsearch-build-resources";
|
||||||
|
|
||||||
public void testUpToDateWithSourcesConfigured() {
|
public void testUpToDateWithSourcesConfigured() {
|
||||||
GradleRunner.create()
|
getGradleRunner(PROJECT_NAME)
|
||||||
.withProjectDir(getProjectDir(PROJECT_NAME))
|
|
||||||
.withArguments("clean", "-s")
|
.withArguments("clean", "-s")
|
||||||
.withPluginClasspath()
|
|
||||||
.build();
|
.build();
|
||||||
|
|
||||||
BuildResult result = GradleRunner.create()
|
BuildResult result = getGradleRunner(PROJECT_NAME)
|
||||||
.withProjectDir(getProjectDir(PROJECT_NAME))
|
|
||||||
.withArguments("buildResources", "-s", "-i")
|
.withArguments("buildResources", "-s", "-i")
|
||||||
.withPluginClasspath()
|
|
||||||
.build();
|
.build();
|
||||||
assertTaskSuccessful(result, ":buildResources");
|
assertTaskSuccessful(result, ":buildResources");
|
||||||
assertBuildFileExists(result, PROJECT_NAME, "build-tools-exported/checkstyle.xml");
|
assertBuildFileExists(result, PROJECT_NAME, "build-tools-exported/checkstyle.xml");
|
||||||
assertBuildFileExists(result, PROJECT_NAME, "build-tools-exported/checkstyle_suppressions.xml");
|
assertBuildFileExists(result, PROJECT_NAME, "build-tools-exported/checkstyle_suppressions.xml");
|
||||||
|
|
||||||
result = GradleRunner.create()
|
result = getGradleRunner(PROJECT_NAME)
|
||||||
.withProjectDir(getProjectDir(PROJECT_NAME))
|
|
||||||
.withArguments("buildResources", "-s", "-i")
|
.withArguments("buildResources", "-s", "-i")
|
||||||
.withPluginClasspath()
|
|
||||||
.build();
|
.build();
|
||||||
assertTaskUpToDate(result, ":buildResources");
|
assertTaskUpToDate(result, ":buildResources");
|
||||||
assertBuildFileExists(result, PROJECT_NAME, "build-tools-exported/checkstyle.xml");
|
assertBuildFileExists(result, PROJECT_NAME, "build-tools-exported/checkstyle.xml");
|
||||||
|
@ -55,10 +48,8 @@ public class ExportElasticsearchBuildResourcesTaskIT extends GradleIntegrationTe
|
||||||
}
|
}
|
||||||
|
|
||||||
public void testImplicitTaskDependencyCopy() {
|
public void testImplicitTaskDependencyCopy() {
|
||||||
BuildResult result = GradleRunner.create()
|
BuildResult result = getGradleRunner(PROJECT_NAME)
|
||||||
.withProjectDir(getProjectDir(PROJECT_NAME))
|
|
||||||
.withArguments("clean", "sampleCopyAll", "-s", "-i")
|
.withArguments("clean", "sampleCopyAll", "-s", "-i")
|
||||||
.withPluginClasspath()
|
|
||||||
.build();
|
.build();
|
||||||
|
|
||||||
assertTaskSuccessful(result, ":buildResources");
|
assertTaskSuccessful(result, ":buildResources");
|
||||||
|
@ -69,10 +60,8 @@ public class ExportElasticsearchBuildResourcesTaskIT extends GradleIntegrationTe
|
||||||
}
|
}
|
||||||
|
|
||||||
public void testImplicitTaskDependencyInputFileOfOther() {
|
public void testImplicitTaskDependencyInputFileOfOther() {
|
||||||
BuildResult result = GradleRunner.create()
|
BuildResult result = getGradleRunner(PROJECT_NAME)
|
||||||
.withProjectDir(getProjectDir(PROJECT_NAME))
|
|
||||||
.withArguments("clean", "sample", "-s", "-i")
|
.withArguments("clean", "sample", "-s", "-i")
|
||||||
.withPluginClasspath()
|
|
||||||
.build();
|
.build();
|
||||||
|
|
||||||
assertTaskSuccessful(result, ":sample");
|
assertTaskSuccessful(result, ":sample");
|
||||||
|
@ -81,11 +70,12 @@ public class ExportElasticsearchBuildResourcesTaskIT extends GradleIntegrationTe
|
||||||
}
|
}
|
||||||
|
|
||||||
public void testIncorrectUsage() {
|
public void testIncorrectUsage() {
|
||||||
BuildResult result = GradleRunner.create()
|
assertOutputContains(
|
||||||
.withProjectDir(getProjectDir(PROJECT_NAME))
|
getGradleRunner(PROJECT_NAME)
|
||||||
.withArguments("noConfigAfterExecution", "-s", "-i")
|
.withArguments("noConfigAfterExecution", "-s", "-i")
|
||||||
.withPluginClasspath()
|
.buildAndFail()
|
||||||
.buildAndFail();
|
.getOutput(),
|
||||||
assertOutputContains("buildResources can't be configured after the task ran");
|
"buildResources can't be configured after the task ran"
|
||||||
|
);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -2,7 +2,6 @@ package org.elasticsearch.gradle.precommit;
|
||||||
|
|
||||||
import org.elasticsearch.gradle.test.GradleIntegrationTestCase;
|
import org.elasticsearch.gradle.test.GradleIntegrationTestCase;
|
||||||
import org.gradle.testkit.runner.BuildResult;
|
import org.gradle.testkit.runner.BuildResult;
|
||||||
import org.gradle.testkit.runner.GradleRunner;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Licensed to Elasticsearch under one or more contributor
|
* Licensed to Elasticsearch under one or more contributor
|
||||||
|
@ -25,10 +24,8 @@ import org.gradle.testkit.runner.GradleRunner;
|
||||||
public class JarHellTaskIT extends GradleIntegrationTestCase {
|
public class JarHellTaskIT extends GradleIntegrationTestCase {
|
||||||
|
|
||||||
public void testJarHellDetected() {
|
public void testJarHellDetected() {
|
||||||
BuildResult result = GradleRunner.create()
|
BuildResult result = getGradleRunner("jarHell")
|
||||||
.withProjectDir(getProjectDir("jarHell"))
|
|
||||||
.withArguments("clean", "precommit", "-s", "-Dlocal.repo.path=" + getLocalTestRepoPath())
|
.withArguments("clean", "precommit", "-s", "-Dlocal.repo.path=" + getLocalTestRepoPath())
|
||||||
.withPluginClasspath()
|
|
||||||
.buildAndFail();
|
.buildAndFail();
|
||||||
|
|
||||||
assertTaskFailed(result, ":jarHell");
|
assertTaskFailed(result, ":jarHell");
|
||||||
|
|
|
@ -4,8 +4,12 @@ import org.gradle.testkit.runner.BuildResult;
|
||||||
import org.gradle.testkit.runner.BuildTask;
|
import org.gradle.testkit.runner.BuildTask;
|
||||||
import org.gradle.testkit.runner.GradleRunner;
|
import org.gradle.testkit.runner.GradleRunner;
|
||||||
import org.gradle.testkit.runner.TaskOutcome;
|
import org.gradle.testkit.runner.TaskOutcome;
|
||||||
|
import org.junit.Rule;
|
||||||
|
import org.junit.rules.TemporaryFolder;
|
||||||
|
|
||||||
import java.io.File;
|
import java.io.File;
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.io.UncheckedIOException;
|
||||||
import java.nio.file.Files;
|
import java.nio.file.Files;
|
||||||
import java.nio.file.Path;
|
import java.nio.file.Path;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
|
@ -16,6 +20,9 @@ import java.util.stream.Stream;
|
||||||
|
|
||||||
public abstract class GradleIntegrationTestCase extends GradleUnitTestCase {
|
public abstract class GradleIntegrationTestCase extends GradleUnitTestCase {
|
||||||
|
|
||||||
|
@Rule
|
||||||
|
public TemporaryFolder testkitTmpDir = new TemporaryFolder();
|
||||||
|
|
||||||
protected File getProjectDir(String name) {
|
protected File getProjectDir(String name) {
|
||||||
File root = new File("src/testKit/");
|
File root = new File("src/testKit/");
|
||||||
if (root.exists() == false) {
|
if (root.exists() == false) {
|
||||||
|
@ -26,9 +33,16 @@ public abstract class GradleIntegrationTestCase extends GradleUnitTestCase {
|
||||||
}
|
}
|
||||||
|
|
||||||
protected GradleRunner getGradleRunner(String sampleProject) {
|
protected GradleRunner getGradleRunner(String sampleProject) {
|
||||||
|
File testkit;
|
||||||
|
try {
|
||||||
|
testkit = testkitTmpDir.newFolder();
|
||||||
|
} catch (IOException e) {
|
||||||
|
throw new UncheckedIOException(e);
|
||||||
|
}
|
||||||
return GradleRunner.create()
|
return GradleRunner.create()
|
||||||
.withProjectDir(getProjectDir(sampleProject))
|
.withProjectDir(getProjectDir(sampleProject))
|
||||||
.withPluginClasspath();
|
.withPluginClasspath()
|
||||||
|
.withTestKitDir(testkit);
|
||||||
}
|
}
|
||||||
|
|
||||||
protected File getBuildDir(String name) {
|
protected File getBuildDir(String name) {
|
||||||
|
|
|
@ -21,12 +21,21 @@ package org.elasticsearch.gradle.testclusters;
|
||||||
import org.elasticsearch.gradle.test.GradleIntegrationTestCase;
|
import org.elasticsearch.gradle.test.GradleIntegrationTestCase;
|
||||||
import org.gradle.testkit.runner.BuildResult;
|
import org.gradle.testkit.runner.BuildResult;
|
||||||
import org.gradle.testkit.runner.GradleRunner;
|
import org.gradle.testkit.runner.GradleRunner;
|
||||||
|
import org.junit.Before;
|
||||||
import org.junit.Ignore;
|
import org.junit.Ignore;
|
||||||
|
|
||||||
|
|
||||||
import java.util.Arrays;
|
import java.util.Arrays;
|
||||||
|
|
||||||
public class TestClustersPluginIT extends GradleIntegrationTestCase {
|
public class TestClustersPluginIT extends GradleIntegrationTestCase {
|
||||||
|
|
||||||
|
private GradleRunner runner;
|
||||||
|
|
||||||
|
@Before
|
||||||
|
public void setUp() throws Exception {
|
||||||
|
runner = getGradleRunner("testclusters");
|
||||||
|
}
|
||||||
|
|
||||||
public void testListClusters() {
|
public void testListClusters() {
|
||||||
BuildResult result = getTestClustersRunner("listTestClusters").build();
|
BuildResult result = getTestClustersRunner("listTestClusters").build();
|
||||||
|
|
||||||
|
@ -190,10 +199,7 @@ public class TestClustersPluginIT extends GradleIntegrationTestCase {
|
||||||
arguments[tasks.length] = "-s";
|
arguments[tasks.length] = "-s";
|
||||||
arguments[tasks.length + 1] = "-i";
|
arguments[tasks.length + 1] = "-i";
|
||||||
arguments[tasks.length + 2] = "-Dlocal.repo.path=" + getLocalTestRepoPath();
|
arguments[tasks.length + 2] = "-Dlocal.repo.path=" + getLocalTestRepoPath();
|
||||||
return GradleRunner.create()
|
return runner.withArguments(arguments);
|
||||||
.withProjectDir(getProjectDir("testclusters"))
|
|
||||||
.withArguments(arguments)
|
|
||||||
.withPluginClasspath();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
private void assertStartedAndStoppedOnce(BuildResult result, String nodeName) {
|
private void assertStartedAndStoppedOnce(BuildResult result, String nodeName) {
|
||||||
|
|
|
@ -21,7 +21,7 @@ slf4j = 1.6.2
|
||||||
jna = 4.5.1
|
jna = 4.5.1
|
||||||
|
|
||||||
netty = 4.1.35.Final
|
netty = 4.1.35.Final
|
||||||
joda = 2.10.1
|
joda = 2.10.2
|
||||||
|
|
||||||
# when updating this version, you need to ensure compatibility with:
|
# when updating this version, you need to ensure compatibility with:
|
||||||
# - plugins/ingest-attachment (transitive dependency, check the upstream POM)
|
# - plugins/ingest-attachment (transitive dependency, check the upstream POM)
|
||||||
|
|
|
@ -22,6 +22,7 @@ import org.elasticsearch.client.Validatable;
|
||||||
import org.elasticsearch.client.ValidationException;
|
import org.elasticsearch.client.ValidationException;
|
||||||
import org.elasticsearch.common.Nullable;
|
import org.elasticsearch.common.Nullable;
|
||||||
import org.elasticsearch.common.ParseField;
|
import org.elasticsearch.common.ParseField;
|
||||||
|
import org.elasticsearch.common.unit.TimeValue;
|
||||||
import org.elasticsearch.common.xcontent.ConstructingObjectParser;
|
import org.elasticsearch.common.xcontent.ConstructingObjectParser;
|
||||||
import org.elasticsearch.common.xcontent.ToXContentObject;
|
import org.elasticsearch.common.xcontent.ToXContentObject;
|
||||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||||
|
@ -30,8 +31,11 @@ import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInter
|
||||||
import org.joda.time.DateTimeZone;
|
import org.joda.time.DateTimeZone;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
import java.util.Collections;
|
||||||
|
import java.util.HashSet;
|
||||||
import java.util.Objects;
|
import java.util.Objects;
|
||||||
import java.util.Optional;
|
import java.util.Optional;
|
||||||
|
import java.util.Set;
|
||||||
|
|
||||||
import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg;
|
import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg;
|
||||||
import static org.elasticsearch.common.xcontent.ConstructingObjectParser.optionalConstructorArg;
|
import static org.elasticsearch.common.xcontent.ConstructingObjectParser.optionalConstructorArg;
|
||||||
|
@ -59,13 +63,62 @@ public class DateHistogramGroupConfig implements Validatable, ToXContentObject {
|
||||||
private static final String TIME_ZONE = "time_zone";
|
private static final String TIME_ZONE = "time_zone";
|
||||||
private static final String DELAY = "delay";
|
private static final String DELAY = "delay";
|
||||||
private static final String DEFAULT_TIMEZONE = "UTC";
|
private static final String DEFAULT_TIMEZONE = "UTC";
|
||||||
|
private static final String CALENDAR_INTERVAL = "calendar_interval";
|
||||||
|
private static final String FIXED_INTERVAL = "fixed_interval";
|
||||||
|
|
||||||
|
// From DateHistogramAggregationBuilder in core, transplanted and modified to a set
|
||||||
|
// so we don't need to import a dependency on the class
|
||||||
|
private static final Set<String> DATE_FIELD_UNITS;
|
||||||
|
static {
|
||||||
|
Set<String> dateFieldUnits = new HashSet<>();
|
||||||
|
dateFieldUnits.add("year");
|
||||||
|
dateFieldUnits.add("1y");
|
||||||
|
dateFieldUnits.add("quarter");
|
||||||
|
dateFieldUnits.add("1q");
|
||||||
|
dateFieldUnits.add("month");
|
||||||
|
dateFieldUnits.add("1M");
|
||||||
|
dateFieldUnits.add("week");
|
||||||
|
dateFieldUnits.add("1w");
|
||||||
|
dateFieldUnits.add("day");
|
||||||
|
dateFieldUnits.add("1d");
|
||||||
|
dateFieldUnits.add("hour");
|
||||||
|
dateFieldUnits.add("1h");
|
||||||
|
dateFieldUnits.add("minute");
|
||||||
|
dateFieldUnits.add("1m");
|
||||||
|
dateFieldUnits.add("second");
|
||||||
|
dateFieldUnits.add("1s");
|
||||||
|
DATE_FIELD_UNITS = Collections.unmodifiableSet(dateFieldUnits);
|
||||||
|
}
|
||||||
|
|
||||||
private static final ConstructingObjectParser<DateHistogramGroupConfig, Void> PARSER;
|
private static final ConstructingObjectParser<DateHistogramGroupConfig, Void> PARSER;
|
||||||
static {
|
static {
|
||||||
PARSER = new ConstructingObjectParser<>(NAME, true, a ->
|
PARSER = new ConstructingObjectParser<>(NAME, true, a -> {
|
||||||
new DateHistogramGroupConfig((String) a[0], (DateHistogramInterval) a[1], (DateHistogramInterval) a[2], (String) a[3]));
|
DateHistogramInterval oldInterval = (DateHistogramInterval) a[1];
|
||||||
|
DateHistogramInterval calendarInterval = (DateHistogramInterval) a[2];
|
||||||
|
DateHistogramInterval fixedInterval = (DateHistogramInterval) a[3];
|
||||||
|
|
||||||
|
if (oldInterval != null) {
|
||||||
|
if (calendarInterval != null || fixedInterval != null) {
|
||||||
|
throw new IllegalArgumentException("Cannot use [interval] with [fixed_interval] or [calendar_interval] " +
|
||||||
|
"configuration options.");
|
||||||
|
}
|
||||||
|
return new DateHistogramGroupConfig((String) a[0], oldInterval, (DateHistogramInterval) a[4], (String) a[5]);
|
||||||
|
} else if (calendarInterval != null && fixedInterval == null) {
|
||||||
|
return new CalendarInterval((String) a[0], calendarInterval, (DateHistogramInterval) a[4], (String) a[5]);
|
||||||
|
} else if (calendarInterval == null && fixedInterval != null) {
|
||||||
|
return new FixedInterval((String) a[0], fixedInterval, (DateHistogramInterval) a[4], (String) a[5]);
|
||||||
|
} else if (calendarInterval != null && fixedInterval != null) {
|
||||||
|
throw new IllegalArgumentException("Cannot set both [fixed_interval] and [calendar_interval] at the same time");
|
||||||
|
} else {
|
||||||
|
throw new IllegalArgumentException("An interval is required. Use [fixed_interval] or [calendar_interval].");
|
||||||
|
}
|
||||||
|
});
|
||||||
PARSER.declareString(constructorArg(), new ParseField(FIELD));
|
PARSER.declareString(constructorArg(), new ParseField(FIELD));
|
||||||
PARSER.declareField(constructorArg(), p -> new DateHistogramInterval(p.text()), new ParseField(INTERVAL), ValueType.STRING);
|
PARSER.declareField(optionalConstructorArg(), p -> new DateHistogramInterval(p.text()), new ParseField(INTERVAL), ValueType.STRING);
|
||||||
|
PARSER.declareField(optionalConstructorArg(), p -> new DateHistogramInterval(p.text()),
|
||||||
|
new ParseField(CALENDAR_INTERVAL), ValueType.STRING);
|
||||||
|
PARSER.declareField(optionalConstructorArg(), p -> new DateHistogramInterval(p.text()),
|
||||||
|
new ParseField(FIXED_INTERVAL), ValueType.STRING);
|
||||||
PARSER.declareField(optionalConstructorArg(), p -> new DateHistogramInterval(p.text()), new ParseField(DELAY), ValueType.STRING);
|
PARSER.declareField(optionalConstructorArg(), p -> new DateHistogramInterval(p.text()), new ParseField(DELAY), ValueType.STRING);
|
||||||
PARSER.declareString(optionalConstructorArg(), new ParseField(TIME_ZONE));
|
PARSER.declareString(optionalConstructorArg(), new ParseField(TIME_ZONE));
|
||||||
}
|
}
|
||||||
|
@ -76,8 +129,57 @@ public class DateHistogramGroupConfig implements Validatable, ToXContentObject {
|
||||||
private final String timeZone;
|
private final String timeZone;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Create a new {@link DateHistogramGroupConfig} using the given field and interval parameters.
|
* FixedInterval is a {@link DateHistogramGroupConfig} that uses a fixed time interval for rolling up data.
|
||||||
|
* The fixed time interval is one or multiples of SI units and has no calendar-awareness (e.g. doesn't account
|
||||||
|
* for leap corrections, does not have variable length months, etc).
|
||||||
|
*
|
||||||
|
* For calendar-aware rollups, use {@link CalendarInterval}
|
||||||
*/
|
*/
|
||||||
|
public static class FixedInterval extends DateHistogramGroupConfig {
|
||||||
|
public FixedInterval(String field, DateHistogramInterval interval) {
|
||||||
|
this(field, interval, null, null);
|
||||||
|
}
|
||||||
|
|
||||||
|
public FixedInterval(String field, DateHistogramInterval interval, DateHistogramInterval delay, String timeZone) {
|
||||||
|
super(field, interval, delay, timeZone);
|
||||||
|
// validate fixed time
|
||||||
|
TimeValue.parseTimeValue(interval.toString(), NAME + ".FixedInterval");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* CalendarInterval is a {@link DateHistogramGroupConfig} that uses calendar-aware intervals for rolling up data.
|
||||||
|
* Calendar time intervals understand leap corrections and contextual differences in certain calendar units (e.g.
|
||||||
|
* months are variable length depending on the month). Calendar units are only available in singular quantities:
|
||||||
|
* 1s, 1m, 1h, 1d, 1w, 1q, 1M, 1y
|
||||||
|
*
|
||||||
|
* For fixed time rollups, use {@link FixedInterval}
|
||||||
|
*/
|
||||||
|
public static class CalendarInterval extends DateHistogramGroupConfig {
|
||||||
|
public CalendarInterval(String field, DateHistogramInterval interval) {
|
||||||
|
this(field, interval, null, null);
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
public CalendarInterval(String field, DateHistogramInterval interval, DateHistogramInterval delay, String timeZone) {
|
||||||
|
super(field, interval, delay, timeZone);
|
||||||
|
if (DATE_FIELD_UNITS.contains(interval.toString()) == false) {
|
||||||
|
throw new IllegalArgumentException("The supplied interval [" + interval +"] could not be parsed " +
|
||||||
|
"as a calendar interval.");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Create a new {@link DateHistogramGroupConfig} using the given field and interval parameters.
|
||||||
|
*
|
||||||
|
* @deprecated Build a DateHistoConfig using {@link DateHistogramGroupConfig.CalendarInterval}
|
||||||
|
* or {@link DateHistogramGroupConfig.FixedInterval} instead
|
||||||
|
*
|
||||||
|
* @since 7.2.0
|
||||||
|
*/
|
||||||
|
@Deprecated
|
||||||
public DateHistogramGroupConfig(final String field, final DateHistogramInterval interval) {
|
public DateHistogramGroupConfig(final String field, final DateHistogramInterval interval) {
|
||||||
this(field, interval, null, null);
|
this(field, interval, null, null);
|
||||||
}
|
}
|
||||||
|
@ -90,12 +192,17 @@ public class DateHistogramGroupConfig implements Validatable, ToXContentObject {
|
||||||
* The {@code timeZone} is optional and can be set to {@code null}. When configured, the time zone value is resolved using
|
* The {@code timeZone} is optional and can be set to {@code null}. When configured, the time zone value is resolved using
|
||||||
* ({@link DateTimeZone#forID(String)} and must match a time zone identifier provided by the Joda Time library.
|
* ({@link DateTimeZone#forID(String)} and must match a time zone identifier provided by the Joda Time library.
|
||||||
* </p>
|
* </p>
|
||||||
*
|
|
||||||
* @param field the name of the date field to use for the date histogram (required)
|
* @param field the name of the date field to use for the date histogram (required)
|
||||||
* @param interval the interval to use for the date histogram (required)
|
* @param interval the interval to use for the date histogram (required)
|
||||||
* @param delay the time delay (optional)
|
* @param delay the time delay (optional)
|
||||||
* @param timeZone the id of time zone to use to calculate the date histogram (optional). When {@code null}, the UTC timezone is used.
|
* @param timeZone the id of time zone to use to calculate the date histogram (optional). When {@code null}, the UTC timezone is used.
|
||||||
|
*
|
||||||
|
* @deprecated Build a DateHistoConfig using {@link DateHistogramGroupConfig.CalendarInterval}
|
||||||
|
* or {@link DateHistogramGroupConfig.FixedInterval} instead
|
||||||
|
*
|
||||||
|
* @since 7.2.0
|
||||||
*/
|
*/
|
||||||
|
@Deprecated
|
||||||
public DateHistogramGroupConfig(final String field,
|
public DateHistogramGroupConfig(final String field,
|
||||||
final DateHistogramInterval interval,
|
final DateHistogramInterval interval,
|
||||||
final @Nullable DateHistogramInterval delay,
|
final @Nullable DateHistogramInterval delay,
|
||||||
|
@ -153,7 +260,13 @@ public class DateHistogramGroupConfig implements Validatable, ToXContentObject {
|
||||||
public XContentBuilder toXContent(final XContentBuilder builder, final Params params) throws IOException {
|
public XContentBuilder toXContent(final XContentBuilder builder, final Params params) throws IOException {
|
||||||
builder.startObject();
|
builder.startObject();
|
||||||
{
|
{
|
||||||
|
if (this.getClass().equals(CalendarInterval.class)) {
|
||||||
|
builder.field(CALENDAR_INTERVAL, interval.toString());
|
||||||
|
} else if (this.getClass().equals(FixedInterval.class)) {
|
||||||
|
builder.field(FIXED_INTERVAL, interval.toString());
|
||||||
|
} else {
|
||||||
builder.field(INTERVAL, interval.toString());
|
builder.field(INTERVAL, interval.toString());
|
||||||
|
}
|
||||||
builder.field(FIELD, field);
|
builder.field(FIELD, field);
|
||||||
if (delay != null) {
|
if (delay != null) {
|
||||||
builder.field(DELAY, delay.toString());
|
builder.field(DELAY, delay.toString());
|
||||||
|
|
|
@ -72,6 +72,7 @@ import static org.hamcrest.Matchers.equalTo;
|
||||||
import static org.hamcrest.Matchers.greaterThan;
|
import static org.hamcrest.Matchers.greaterThan;
|
||||||
import static org.hamcrest.Matchers.hasSize;
|
import static org.hamcrest.Matchers.hasSize;
|
||||||
import static org.hamcrest.Matchers.is;
|
import static org.hamcrest.Matchers.is;
|
||||||
|
import static org.hamcrest.Matchers.oneOf;
|
||||||
|
|
||||||
public class DataFrameTransformIT extends ESRestHighLevelClientTestCase {
|
public class DataFrameTransformIT extends ESRestHighLevelClientTestCase {
|
||||||
|
|
||||||
|
@ -141,7 +142,8 @@ public class DataFrameTransformIT extends ESRestHighLevelClientTestCase {
|
||||||
@After
|
@After
|
||||||
public void cleanUpTransforms() throws IOException {
|
public void cleanUpTransforms() throws IOException {
|
||||||
for (String transformId : transformsToClean) {
|
for (String transformId : transformsToClean) {
|
||||||
highLevelClient().dataFrame().stopDataFrameTransform(new StopDataFrameTransformRequest(transformId), RequestOptions.DEFAULT);
|
highLevelClient().dataFrame().stopDataFrameTransform(
|
||||||
|
new StopDataFrameTransformRequest(transformId, Boolean.TRUE, null), RequestOptions.DEFAULT);
|
||||||
}
|
}
|
||||||
|
|
||||||
for (String transformId : transformsToClean) {
|
for (String transformId : transformsToClean) {
|
||||||
|
@ -263,9 +265,10 @@ public class DataFrameTransformIT extends ESRestHighLevelClientTestCase {
|
||||||
GetDataFrameTransformStatsResponse statsResponse = execute(new GetDataFrameTransformStatsRequest(id),
|
GetDataFrameTransformStatsResponse statsResponse = execute(new GetDataFrameTransformStatsRequest(id),
|
||||||
client::getDataFrameTransformStats, client::getDataFrameTransformStatsAsync);
|
client::getDataFrameTransformStats, client::getDataFrameTransformStatsAsync);
|
||||||
assertThat(statsResponse.getTransformsStateAndStats(), hasSize(1));
|
assertThat(statsResponse.getTransformsStateAndStats(), hasSize(1));
|
||||||
assertEquals(IndexerState.STARTED, statsResponse.getTransformsStateAndStats().get(0).getTransformState().getIndexerState());
|
IndexerState indexerState = statsResponse.getTransformsStateAndStats().get(0).getTransformState().getIndexerState();
|
||||||
|
assertThat(indexerState, is(oneOf(IndexerState.STARTED, IndexerState.INDEXING)));
|
||||||
|
|
||||||
StopDataFrameTransformRequest stopRequest = new StopDataFrameTransformRequest(id);
|
StopDataFrameTransformRequest stopRequest = new StopDataFrameTransformRequest(id, Boolean.TRUE, null);
|
||||||
StopDataFrameTransformResponse stopResponse =
|
StopDataFrameTransformResponse stopResponse =
|
||||||
execute(stopRequest, client::stopDataFrameTransform, client::stopDataFrameTransformAsync);
|
execute(stopRequest, client::stopDataFrameTransform, client::stopDataFrameTransformAsync);
|
||||||
assertTrue(stopResponse.isStopped());
|
assertTrue(stopResponse.isStopped());
|
||||||
|
|
|
@ -152,7 +152,7 @@ public class RollupIT extends ESRestHighLevelClientTestCase {
|
||||||
|
|
||||||
|
|
||||||
public void testDeleteRollupJob() throws Exception {
|
public void testDeleteRollupJob() throws Exception {
|
||||||
final GroupConfig groups = new GroupConfig(new DateHistogramGroupConfig("date", DateHistogramInterval.DAY));
|
final GroupConfig groups = new GroupConfig(new DateHistogramGroupConfig.CalendarInterval("date", DateHistogramInterval.DAY));
|
||||||
final List<MetricConfig> metrics = Collections.singletonList(new MetricConfig("value", SUPPORTED_METRICS));
|
final List<MetricConfig> metrics = Collections.singletonList(new MetricConfig("value", SUPPORTED_METRICS));
|
||||||
final TimeValue timeout = TimeValue.timeValueSeconds(randomIntBetween(30, 600));
|
final TimeValue timeout = TimeValue.timeValueSeconds(randomIntBetween(30, 600));
|
||||||
PutRollupJobRequest putRollupJobRequest =
|
PutRollupJobRequest putRollupJobRequest =
|
||||||
|
@ -174,7 +174,7 @@ public class RollupIT extends ESRestHighLevelClientTestCase {
|
||||||
|
|
||||||
public void testPutStartAndGetRollupJob() throws Exception {
|
public void testPutStartAndGetRollupJob() throws Exception {
|
||||||
// TODO expand this to also test with histogram and terms?
|
// TODO expand this to also test with histogram and terms?
|
||||||
final GroupConfig groups = new GroupConfig(new DateHistogramGroupConfig("date", DateHistogramInterval.DAY));
|
final GroupConfig groups = new GroupConfig(new DateHistogramGroupConfig.CalendarInterval("date", DateHistogramInterval.DAY));
|
||||||
final List<MetricConfig> metrics = Collections.singletonList(new MetricConfig("value", SUPPORTED_METRICS));
|
final List<MetricConfig> metrics = Collections.singletonList(new MetricConfig("value", SUPPORTED_METRICS));
|
||||||
final TimeValue timeout = TimeValue.timeValueSeconds(randomIntBetween(30, 600));
|
final TimeValue timeout = TimeValue.timeValueSeconds(randomIntBetween(30, 600));
|
||||||
|
|
||||||
|
@ -334,7 +334,7 @@ public class RollupIT extends ESRestHighLevelClientTestCase {
|
||||||
final String cron = "*/1 * * * * ?";
|
final String cron = "*/1 * * * * ?";
|
||||||
final int pageSize = randomIntBetween(numDocs, numDocs * 10);
|
final int pageSize = randomIntBetween(numDocs, numDocs * 10);
|
||||||
// TODO expand this to also test with histogram and terms?
|
// TODO expand this to also test with histogram and terms?
|
||||||
final GroupConfig groups = new GroupConfig(new DateHistogramGroupConfig("date", DateHistogramInterval.DAY));
|
final GroupConfig groups = new GroupConfig(new DateHistogramGroupConfig.CalendarInterval("date", DateHistogramInterval.DAY));
|
||||||
final List<MetricConfig> metrics = Collections.singletonList(new MetricConfig("value", SUPPORTED_METRICS));
|
final List<MetricConfig> metrics = Collections.singletonList(new MetricConfig("value", SUPPORTED_METRICS));
|
||||||
final TimeValue timeout = TimeValue.timeValueSeconds(randomIntBetween(30, 600));
|
final TimeValue timeout = TimeValue.timeValueSeconds(randomIntBetween(30, 600));
|
||||||
|
|
||||||
|
@ -378,7 +378,7 @@ public class RollupIT extends ESRestHighLevelClientTestCase {
|
||||||
case "delay":
|
case "delay":
|
||||||
assertThat(entry.getValue(), equalTo("foo"));
|
assertThat(entry.getValue(), equalTo("foo"));
|
||||||
break;
|
break;
|
||||||
case "interval":
|
case "calendar_interval":
|
||||||
assertThat(entry.getValue(), equalTo("1d"));
|
assertThat(entry.getValue(), equalTo("1d"));
|
||||||
break;
|
break;
|
||||||
case "time_zone":
|
case "time_zone":
|
||||||
|
@ -446,7 +446,7 @@ public class RollupIT extends ESRestHighLevelClientTestCase {
|
||||||
final String cron = "*/1 * * * * ?";
|
final String cron = "*/1 * * * * ?";
|
||||||
final int pageSize = randomIntBetween(numDocs, numDocs * 10);
|
final int pageSize = randomIntBetween(numDocs, numDocs * 10);
|
||||||
// TODO expand this to also test with histogram and terms?
|
// TODO expand this to also test with histogram and terms?
|
||||||
final GroupConfig groups = new GroupConfig(new DateHistogramGroupConfig("date", DateHistogramInterval.DAY));
|
final GroupConfig groups = new GroupConfig(new DateHistogramGroupConfig.CalendarInterval("date", DateHistogramInterval.DAY));
|
||||||
final List<MetricConfig> metrics = Collections.singletonList(new MetricConfig("value", SUPPORTED_METRICS));
|
final List<MetricConfig> metrics = Collections.singletonList(new MetricConfig("value", SUPPORTED_METRICS));
|
||||||
final TimeValue timeout = TimeValue.timeValueSeconds(randomIntBetween(30, 600));
|
final TimeValue timeout = TimeValue.timeValueSeconds(randomIntBetween(30, 600));
|
||||||
|
|
||||||
|
@ -490,7 +490,7 @@ public class RollupIT extends ESRestHighLevelClientTestCase {
|
||||||
case "delay":
|
case "delay":
|
||||||
assertThat(entry.getValue(), equalTo("foo"));
|
assertThat(entry.getValue(), equalTo("foo"));
|
||||||
break;
|
break;
|
||||||
case "interval":
|
case "calendar_interval":
|
||||||
assertThat(entry.getValue(), equalTo("1d"));
|
assertThat(entry.getValue(), equalTo("1d"));
|
||||||
break;
|
break;
|
||||||
case "time_zone":
|
case "time_zone":
|
||||||
|
|
|
@ -76,7 +76,8 @@ public class DataFrameTransformDocumentationIT extends ESRestHighLevelClientTest
|
||||||
@After
|
@After
|
||||||
public void cleanUpTransforms() throws IOException {
|
public void cleanUpTransforms() throws IOException {
|
||||||
for (String transformId : transformsToClean) {
|
for (String transformId : transformsToClean) {
|
||||||
highLevelClient().dataFrame().stopDataFrameTransform(new StopDataFrameTransformRequest(transformId), RequestOptions.DEFAULT);
|
highLevelClient().dataFrame().stopDataFrameTransform(
|
||||||
|
new StopDataFrameTransformRequest(transformId, Boolean.TRUE, TimeValue.timeValueSeconds(20)), RequestOptions.DEFAULT);
|
||||||
}
|
}
|
||||||
|
|
||||||
for (String transformId : transformsToClean) {
|
for (String transformId : transformsToClean) {
|
||||||
|
|
|
@ -399,8 +399,8 @@ public class RollupDocumentationIT extends ESRestHighLevelClientTestCase {
|
||||||
public void testGetRollupCaps() throws Exception {
|
public void testGetRollupCaps() throws Exception {
|
||||||
RestHighLevelClient client = highLevelClient();
|
RestHighLevelClient client = highLevelClient();
|
||||||
|
|
||||||
DateHistogramGroupConfig dateHistogram =
|
DateHistogramGroupConfig dateHistogram = new DateHistogramGroupConfig.FixedInterval(
|
||||||
new DateHistogramGroupConfig("timestamp", DateHistogramInterval.HOUR, new DateHistogramInterval("7d"), "UTC"); // <1>
|
"timestamp", DateHistogramInterval.HOUR, new DateHistogramInterval("7d"), "UTC"); // <1>
|
||||||
TermsGroupConfig terms = new TermsGroupConfig("hostname", "datacenter");
|
TermsGroupConfig terms = new TermsGroupConfig("hostname", "datacenter");
|
||||||
HistogramGroupConfig histogram = new HistogramGroupConfig(5L, "load", "net_in", "net_out");
|
HistogramGroupConfig histogram = new HistogramGroupConfig(5L, "load", "net_in", "net_out");
|
||||||
GroupConfig groups = new GroupConfig(dateHistogram, histogram, terms);
|
GroupConfig groups = new GroupConfig(dateHistogram, histogram, terms);
|
||||||
|
@ -473,7 +473,8 @@ public class RollupDocumentationIT extends ESRestHighLevelClientTestCase {
|
||||||
// item represents a different aggregation that can be run against the "timestamp"
|
// item represents a different aggregation that can be run against the "timestamp"
|
||||||
// field, and any additional details specific to that agg (interval, etc)
|
// field, and any additional details specific to that agg (interval, etc)
|
||||||
List<Map<String, Object>> timestampCaps = fieldCaps.get("timestamp").getAggs();
|
List<Map<String, Object>> timestampCaps = fieldCaps.get("timestamp").getAggs();
|
||||||
assert timestampCaps.get(0).toString().equals("{agg=date_histogram, delay=7d, interval=1h, time_zone=UTC}");
|
logger.error(timestampCaps.get(0).toString());
|
||||||
|
assert timestampCaps.get(0).toString().equals("{agg=date_histogram, fixed_interval=1h, delay=7d, time_zone=UTC}");
|
||||||
|
|
||||||
// In contrast to the timestamp field, the temperature field has multiple aggs configured
|
// In contrast to the timestamp field, the temperature field has multiple aggs configured
|
||||||
List<Map<String, Object>> temperatureCaps = fieldCaps.get("temperature").getAggs();
|
List<Map<String, Object>> temperatureCaps = fieldCaps.get("temperature").getAggs();
|
||||||
|
@ -515,8 +516,8 @@ public class RollupDocumentationIT extends ESRestHighLevelClientTestCase {
|
||||||
public void testGetRollupIndexCaps() throws Exception {
|
public void testGetRollupIndexCaps() throws Exception {
|
||||||
RestHighLevelClient client = highLevelClient();
|
RestHighLevelClient client = highLevelClient();
|
||||||
|
|
||||||
DateHistogramGroupConfig dateHistogram =
|
DateHistogramGroupConfig dateHistogram = new DateHistogramGroupConfig.FixedInterval(
|
||||||
new DateHistogramGroupConfig("timestamp", DateHistogramInterval.HOUR, new DateHistogramInterval("7d"), "UTC"); // <1>
|
"timestamp", DateHistogramInterval.HOUR, new DateHistogramInterval("7d"), "UTC"); // <1>
|
||||||
TermsGroupConfig terms = new TermsGroupConfig("hostname", "datacenter");
|
TermsGroupConfig terms = new TermsGroupConfig("hostname", "datacenter");
|
||||||
HistogramGroupConfig histogram = new HistogramGroupConfig(5L, "load", "net_in", "net_out");
|
HistogramGroupConfig histogram = new HistogramGroupConfig(5L, "load", "net_in", "net_out");
|
||||||
GroupConfig groups = new GroupConfig(dateHistogram, histogram, terms);
|
GroupConfig groups = new GroupConfig(dateHistogram, histogram, terms);
|
||||||
|
@ -587,7 +588,8 @@ public class RollupDocumentationIT extends ESRestHighLevelClientTestCase {
|
||||||
// item represents a different aggregation that can be run against the "timestamp"
|
// item represents a different aggregation that can be run against the "timestamp"
|
||||||
// field, and any additional details specific to that agg (interval, etc)
|
// field, and any additional details specific to that agg (interval, etc)
|
||||||
List<Map<String, Object>> timestampCaps = fieldCaps.get("timestamp").getAggs();
|
List<Map<String, Object>> timestampCaps = fieldCaps.get("timestamp").getAggs();
|
||||||
assert timestampCaps.get(0).toString().equals("{agg=date_histogram, delay=7d, interval=1h, time_zone=UTC}");
|
logger.error(timestampCaps.get(0).toString());
|
||||||
|
assert timestampCaps.get(0).toString().equals("{agg=date_histogram, fixed_interval=1h, delay=7d, time_zone=UTC}");
|
||||||
|
|
||||||
// In contrast to the timestamp field, the temperature field has multiple aggs configured
|
// In contrast to the timestamp field, the temperature field has multiple aggs configured
|
||||||
List<Map<String, Object>> temperatureCaps = fieldCaps.get("temperature").getAggs();
|
List<Map<String, Object>> temperatureCaps = fieldCaps.get("temperature").getAggs();
|
||||||
|
|
|
@ -28,6 +28,7 @@ import org.elasticsearch.common.xcontent.XContentType;
|
||||||
import org.elasticsearch.index.query.QueryBuilders;
|
import org.elasticsearch.index.query.QueryBuilders;
|
||||||
import org.elasticsearch.search.aggregations.AggregationBuilders;
|
import org.elasticsearch.search.aggregations.AggregationBuilders;
|
||||||
import org.elasticsearch.search.aggregations.AggregatorFactories;
|
import org.elasticsearch.search.aggregations.AggregatorFactories;
|
||||||
|
import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval;
|
||||||
import org.elasticsearch.search.aggregations.metrics.MaxAggregationBuilder;
|
import org.elasticsearch.search.aggregations.metrics.MaxAggregationBuilder;
|
||||||
import org.elasticsearch.search.builder.SearchSourceBuilder.ScriptField;
|
import org.elasticsearch.search.builder.SearchSourceBuilder.ScriptField;
|
||||||
import org.elasticsearch.test.AbstractXContentTestCase;
|
import org.elasticsearch.test.AbstractXContentTestCase;
|
||||||
|
@ -79,7 +80,7 @@ public class DatafeedConfigTests extends AbstractXContentTestCase<DatafeedConfig
|
||||||
aggHistogramInterval = aggHistogramInterval <= 0 ? 1 : aggHistogramInterval;
|
aggHistogramInterval = aggHistogramInterval <= 0 ? 1 : aggHistogramInterval;
|
||||||
MaxAggregationBuilder maxTime = AggregationBuilders.max("time").field("time");
|
MaxAggregationBuilder maxTime = AggregationBuilders.max("time").field("time");
|
||||||
aggs.addAggregator(AggregationBuilders.dateHistogram("buckets")
|
aggs.addAggregator(AggregationBuilders.dateHistogram("buckets")
|
||||||
.interval(aggHistogramInterval).subAggregation(maxTime).field("time"));
|
.fixedInterval(new DateHistogramInterval(aggHistogramInterval + "ms")).subAggregation(maxTime).field("time"));
|
||||||
try {
|
try {
|
||||||
builder.setAggregations(aggs);
|
builder.setAggregations(aggs);
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
|
|
|
@ -44,7 +44,7 @@ public class GetRollupJobResponseTests extends ESTestCase {
|
||||||
this::createTestInstance,
|
this::createTestInstance,
|
||||||
this::toXContent,
|
this::toXContent,
|
||||||
GetRollupJobResponse::fromXContent)
|
GetRollupJobResponse::fromXContent)
|
||||||
.supportsUnknownFields(true)
|
.supportsUnknownFields(false)
|
||||||
.randomFieldsExcludeFilter(field ->
|
.randomFieldsExcludeFilter(field ->
|
||||||
field.endsWith("status.current_position"))
|
field.endsWith("status.current_position"))
|
||||||
.test();
|
.test();
|
||||||
|
|
|
@ -49,7 +49,7 @@ public class PutRollupJobRequestTests extends AbstractXContentTestCase<PutRollup
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected boolean supportsUnknownFields() {
|
protected boolean supportsUnknownFields() {
|
||||||
return true;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
public void testRequireConfiguration() {
|
public void testRequireConfiguration() {
|
||||||
|
|
|
@ -90,9 +90,21 @@ public class DateHistogramGroupConfigTests extends AbstractXContentTestCase<Date
|
||||||
|
|
||||||
static DateHistogramGroupConfig randomDateHistogramGroupConfig() {
|
static DateHistogramGroupConfig randomDateHistogramGroupConfig() {
|
||||||
final String field = randomAlphaOfLength(randomIntBetween(3, 10));
|
final String field = randomAlphaOfLength(randomIntBetween(3, 10));
|
||||||
final DateHistogramInterval interval = new DateHistogramInterval(randomPositiveTimeValue());
|
|
||||||
final DateHistogramInterval delay = randomBoolean() ? new DateHistogramInterval(randomPositiveTimeValue()) : null;
|
final DateHistogramInterval delay = randomBoolean() ? new DateHistogramInterval(randomPositiveTimeValue()) : null;
|
||||||
final String timezone = randomBoolean() ? randomDateTimeZone().toString() : null;
|
final String timezone = randomBoolean() ? randomDateTimeZone().toString() : null;
|
||||||
|
int i = randomIntBetween(0,2);
|
||||||
|
final DateHistogramInterval interval;
|
||||||
|
switch (i) {
|
||||||
|
case 0:
|
||||||
|
interval = new DateHistogramInterval(randomPositiveTimeValue());
|
||||||
|
return new DateHistogramGroupConfig.FixedInterval(field, interval, delay, timezone);
|
||||||
|
case 1:
|
||||||
|
interval = new DateHistogramInterval(randomTimeValue(1,1, "m", "h", "d", "w"));
|
||||||
|
return new DateHistogramGroupConfig.CalendarInterval(field, interval, delay, timezone);
|
||||||
|
default:
|
||||||
|
interval = new DateHistogramInterval(randomPositiveTimeValue());
|
||||||
return new DateHistogramGroupConfig(field, interval, delay, timezone);
|
return new DateHistogramGroupConfig(field, interval, delay, timezone);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -38,8 +38,10 @@ import java.net.InetAddress;
|
||||||
import java.net.InetSocketAddress;
|
import java.net.InetSocketAddress;
|
||||||
import java.nio.file.Files;
|
import java.nio.file.Files;
|
||||||
import java.nio.file.Paths;
|
import java.nio.file.Paths;
|
||||||
|
import java.security.AccessController;
|
||||||
import java.security.KeyFactory;
|
import java.security.KeyFactory;
|
||||||
import java.security.KeyStore;
|
import java.security.KeyStore;
|
||||||
|
import java.security.PrivilegedAction;
|
||||||
import java.security.cert.Certificate;
|
import java.security.cert.Certificate;
|
||||||
import java.security.cert.CertificateFactory;
|
import java.security.cert.CertificateFactory;
|
||||||
import java.security.spec.PKCS8EncodedKeySpec;
|
import java.security.spec.PKCS8EncodedKeySpec;
|
||||||
|
@ -106,7 +108,7 @@ public class RestClientBuilderIntegTests extends RestClientTestCase {
|
||||||
}
|
}
|
||||||
|
|
||||||
private static SSLContext getSslContext() throws Exception {
|
private static SSLContext getSslContext() throws Exception {
|
||||||
SSLContext sslContext = SSLContext.getInstance("TLS");
|
SSLContext sslContext = SSLContext.getInstance(getProtocol());
|
||||||
try (InputStream certFile = RestClientBuilderIntegTests.class.getResourceAsStream("/test.crt")) {
|
try (InputStream certFile = RestClientBuilderIntegTests.class.getResourceAsStream("/test.crt")) {
|
||||||
// Build a keystore of default type programmatically since we can't use JKS keystores to
|
// Build a keystore of default type programmatically since we can't use JKS keystores to
|
||||||
// init a KeyManagerFactory in FIPS 140 JVMs.
|
// init a KeyManagerFactory in FIPS 140 JVMs.
|
||||||
|
@ -126,4 +128,37 @@ public class RestClientBuilderIntegTests extends RestClientTestCase {
|
||||||
}
|
}
|
||||||
return sslContext;
|
return sslContext;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The {@link HttpsServer} in the JDK has issues with TLSv1.3 when running in a JDK that supports TLSv1.3 prior to
|
||||||
|
* 12.0.1 so we pin to TLSv1.2 when running on an earlier JDK.
|
||||||
|
*/
|
||||||
|
private static String getProtocol() {
|
||||||
|
String version = AccessController.doPrivileged((PrivilegedAction<String>) () -> System.getProperty("java.version"));
|
||||||
|
String[] components = version.split("\\.");
|
||||||
|
if (components.length > 0) {
|
||||||
|
final int major = Integer.valueOf(components[0]);
|
||||||
|
if (major < 11) {
|
||||||
|
return "TLS";
|
||||||
|
} if (major > 12) {
|
||||||
|
return "TLS";
|
||||||
|
} else if (major == 12 && components.length > 2) {
|
||||||
|
final int minor = Integer.valueOf(components[1]);
|
||||||
|
if (minor > 0) {
|
||||||
|
return "TLS";
|
||||||
|
} else {
|
||||||
|
String patch = components[2];
|
||||||
|
final int index = patch.indexOf("_");
|
||||||
|
if (index > -1) {
|
||||||
|
patch = patch.substring(0, index);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (Integer.valueOf(patch) >= 1) {
|
||||||
|
return "TLS";
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return "TLSv1.2";
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -614,7 +614,7 @@ buildRestTests.setups['sensor_rollup_job'] = '''
|
||||||
"groups" : {
|
"groups" : {
|
||||||
"date_histogram": {
|
"date_histogram": {
|
||||||
"field": "timestamp",
|
"field": "timestamp",
|
||||||
"interval": "1h",
|
"fixed_interval": "1h",
|
||||||
"delay": "7d"
|
"delay": "7d"
|
||||||
},
|
},
|
||||||
"terms": {
|
"terms": {
|
||||||
|
@ -683,7 +683,7 @@ buildRestTests.setups['sensor_started_rollup_job'] = '''
|
||||||
"groups" : {
|
"groups" : {
|
||||||
"date_histogram": {
|
"date_histogram": {
|
||||||
"field": "timestamp",
|
"field": "timestamp",
|
||||||
"interval": "1h",
|
"fixed_interval": "1h",
|
||||||
"delay": "7d"
|
"delay": "7d"
|
||||||
},
|
},
|
||||||
"terms": {
|
"terms": {
|
||||||
|
@ -800,7 +800,7 @@ buildRestTests.setups['sensor_prefab_data'] = '''
|
||||||
date_histogram:
|
date_histogram:
|
||||||
delay: "7d"
|
delay: "7d"
|
||||||
field: "timestamp"
|
field: "timestamp"
|
||||||
interval: "60m"
|
fixed_interval: "60m"
|
||||||
time_zone: "UTC"
|
time_zone: "UTC"
|
||||||
terms:
|
terms:
|
||||||
fields:
|
fields:
|
||||||
|
|
|
@ -16,7 +16,7 @@ AggregationBuilder aggregation =
|
||||||
AggregationBuilders
|
AggregationBuilders
|
||||||
.dateHistogram("agg")
|
.dateHistogram("agg")
|
||||||
.field("dateOfBirth")
|
.field("dateOfBirth")
|
||||||
.dateHistogramInterval(DateHistogramInterval.YEAR);
|
.calendarInterval(DateHistogramInterval.YEAR);
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
|
|
||||||
Or if you want to set an interval of 10 days:
|
Or if you want to set an interval of 10 days:
|
||||||
|
@ -27,7 +27,7 @@ AggregationBuilder aggregation =
|
||||||
AggregationBuilders
|
AggregationBuilders
|
||||||
.dateHistogram("agg")
|
.dateHistogram("agg")
|
||||||
.field("dateOfBirth")
|
.field("dateOfBirth")
|
||||||
.dateHistogramInterval(DateHistogramInterval.days(10));
|
.fixedInterval(DateHistogramInterval.days(10));
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -47,7 +47,7 @@ SearchResponse sr = node.client().prepareSearch()
|
||||||
AggregationBuilders.terms("by_country").field("country")
|
AggregationBuilders.terms("by_country").field("country")
|
||||||
.subAggregation(AggregationBuilders.dateHistogram("by_year")
|
.subAggregation(AggregationBuilders.dateHistogram("by_year")
|
||||||
.field("dateOfBirth")
|
.field("dateOfBirth")
|
||||||
.dateHistogramInterval(DateHistogramInterval.YEAR)
|
.calendarInterval(DateHistogramInterval.YEAR)
|
||||||
.subAggregation(AggregationBuilders.avg("avg_children").field("children"))
|
.subAggregation(AggregationBuilders.avg("avg_children").field("children"))
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
|
|
|
@ -109,7 +109,7 @@ SearchResponse sr = client.prepareSearch()
|
||||||
.addAggregation(
|
.addAggregation(
|
||||||
AggregationBuilders.dateHistogram("agg2")
|
AggregationBuilders.dateHistogram("agg2")
|
||||||
.field("birth")
|
.field("birth")
|
||||||
.dateHistogramInterval(DateHistogramInterval.YEAR)
|
.calendarInterval(DateHistogramInterval.YEAR)
|
||||||
)
|
)
|
||||||
.get();
|
.get();
|
||||||
|
|
||||||
|
|
|
@ -54,7 +54,7 @@ Using the REST API, we could define this grouping configuration:
|
||||||
"groups" : {
|
"groups" : {
|
||||||
"date_histogram": {
|
"date_histogram": {
|
||||||
"field": "timestamp",
|
"field": "timestamp",
|
||||||
"interval": "1h",
|
"calendar_interval": "1h",
|
||||||
"delay": "7d",
|
"delay": "7d",
|
||||||
"time_zone": "UTC"
|
"time_zone": "UTC"
|
||||||
},
|
},
|
||||||
|
|
|
@ -226,7 +226,7 @@ GET /_search
|
||||||
"my_buckets": {
|
"my_buckets": {
|
||||||
"composite" : {
|
"composite" : {
|
||||||
"sources" : [
|
"sources" : [
|
||||||
{ "date": { "date_histogram" : { "field": "timestamp", "interval": "1d" } } }
|
{ "date": { "date_histogram" : { "field": "timestamp", "calendar_interval": "1d" } } }
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -260,7 +260,7 @@ GET /_search
|
||||||
"date": {
|
"date": {
|
||||||
"date_histogram" : {
|
"date_histogram" : {
|
||||||
"field": "timestamp",
|
"field": "timestamp",
|
||||||
"interval": "1d",
|
"calendar_interval": "1d",
|
||||||
"format": "yyyy-MM-dd" <1>
|
"format": "yyyy-MM-dd" <1>
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -299,7 +299,7 @@ GET /_search
|
||||||
"my_buckets": {
|
"my_buckets": {
|
||||||
"composite" : {
|
"composite" : {
|
||||||
"sources" : [
|
"sources" : [
|
||||||
{ "date": { "date_histogram": { "field": "timestamp", "interval": "1d" } } },
|
{ "date": { "date_histogram": { "field": "timestamp", "calendar_interval": "1d" } } },
|
||||||
{ "product": { "terms": {"field": "product" } } }
|
{ "product": { "terms": {"field": "product" } } }
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
@ -324,7 +324,7 @@ GET /_search
|
||||||
"sources" : [
|
"sources" : [
|
||||||
{ "shop": { "terms": {"field": "shop" } } },
|
{ "shop": { "terms": {"field": "shop" } } },
|
||||||
{ "product": { "terms": { "field": "product" } } },
|
{ "product": { "terms": { "field": "product" } } },
|
||||||
{ "date": { "date_histogram": { "field": "timestamp", "interval": "1d" } } }
|
{ "date": { "date_histogram": { "field": "timestamp", "calendar_interval": "1d" } } }
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -352,7 +352,7 @@ GET /_search
|
||||||
"my_buckets": {
|
"my_buckets": {
|
||||||
"composite" : {
|
"composite" : {
|
||||||
"sources" : [
|
"sources" : [
|
||||||
{ "date": { "date_histogram": { "field": "timestamp", "interval": "1d", "order": "desc" } } },
|
{ "date": { "date_histogram": { "field": "timestamp", "calendar_interval": "1d", "order": "desc" } } },
|
||||||
{ "product": { "terms": {"field": "product", "order": "asc" } } }
|
{ "product": { "terms": {"field": "product", "order": "asc" } } }
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
@ -420,7 +420,7 @@ GET /_search
|
||||||
"composite" : {
|
"composite" : {
|
||||||
"size": 2,
|
"size": 2,
|
||||||
"sources" : [
|
"sources" : [
|
||||||
{ "date": { "date_histogram": { "field": "timestamp", "interval": "1d" } } },
|
{ "date": { "date_histogram": { "field": "timestamp", "calendar_interval": "1d" } } },
|
||||||
{ "product": { "terms": {"field": "product" } } }
|
{ "product": { "terms": {"field": "product" } } }
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
@ -486,7 +486,7 @@ GET /_search
|
||||||
"composite" : {
|
"composite" : {
|
||||||
"size": 2,
|
"size": 2,
|
||||||
"sources" : [
|
"sources" : [
|
||||||
{ "date": { "date_histogram": { "field": "timestamp", "interval": "1d", "order": "desc" } } },
|
{ "date": { "date_histogram": { "field": "timestamp", "calendar_interval": "1d", "order": "desc" } } },
|
||||||
{ "product": { "terms": {"field": "product", "order": "asc" } } }
|
{ "product": { "terms": {"field": "product", "order": "asc" } } }
|
||||||
],
|
],
|
||||||
"after": { "date": 1494288000000, "product": "mad max" } <1>
|
"after": { "date": 1494288000000, "product": "mad max" } <1>
|
||||||
|
@ -515,7 +515,7 @@ GET /_search
|
||||||
"my_buckets": {
|
"my_buckets": {
|
||||||
"composite" : {
|
"composite" : {
|
||||||
"sources" : [
|
"sources" : [
|
||||||
{ "date": { "date_histogram": { "field": "timestamp", "interval": "1d", "order": "desc" } } },
|
{ "date": { "date_histogram": { "field": "timestamp", "calendar_interval": "1d", "order": "desc" } } },
|
||||||
{ "product": { "terms": {"field": "product" } } }
|
{ "product": { "terms": {"field": "product" } } }
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
|
|
|
@ -10,102 +10,252 @@ that here the interval can be specified using date/time expressions. Time-based
|
||||||
data requires special support because time-based intervals are not always a
|
data requires special support because time-based intervals are not always a
|
||||||
fixed length.
|
fixed length.
|
||||||
|
|
||||||
==== Setting intervals
|
==== Calendar and Fixed intervals
|
||||||
|
|
||||||
There seems to be no limit to the creativity we humans apply to setting our
|
When configuring a date histogram aggregation, the interval can be specified
|
||||||
clocks and calendars. We've invented leap years and leap seconds, standard and
|
in two manners: calendar-aware time intervals, and fixed time intervals.
|
||||||
daylight savings times, and timezone offsets of 30 or 45 minutes rather than a
|
|
||||||
full hour. While these creations help keep us in sync with the cosmos and our
|
|
||||||
environment, they can make specifying time intervals accurately a real challenge.
|
|
||||||
The only universal truth our researchers have yet to disprove is that a
|
|
||||||
millisecond is always the same duration, and a second is always 1000 milliseconds.
|
|
||||||
Beyond that, things get complicated.
|
|
||||||
|
|
||||||
Generally speaking, when you specify a single time unit, such as 1 hour or 1 day, you
|
Calendar-aware intervals understand that daylight savings changes the length
|
||||||
are working with a _calendar interval_, but multiples, such as 6 hours or 3 days, are
|
of specific days, months have different amounts of days, and leap seconds can
|
||||||
_fixed-length intervals_.
|
be tacked onto a particular year.
|
||||||
|
|
||||||
For example, a specification of 1 day (1d) from now is a calendar interval that
|
Fixed intervals are, by contrast, always multiples of SI units and do not change
|
||||||
means "at
|
based on calendaring context.
|
||||||
this exact time tomorrow" no matter the length of the day. A change to or from
|
|
||||||
daylight savings time that results in a 23 or 25 hour day is compensated for and the
|
|
||||||
specification of "this exact time tomorrow" is maintained. But if you specify 2 or
|
|
||||||
more days, each day must be of the same fixed duration (24 hours). In this case, if
|
|
||||||
the specified interval includes the change to or from daylight savings time, the
|
|
||||||
interval will end an hour sooner or later than you expect.
|
|
||||||
|
|
||||||
There are similar differences to consider when you specify single versus multiple
|
[NOTE]
|
||||||
minutes or hours. Multiple time periods longer than a day are not supported.
|
.Combined `interval` field is deprecated
|
||||||
|
==================================
|
||||||
|
deprecated[7.2, `interval` field is deprecated] Historically both calendar and fixed
|
||||||
|
intervals were configured in a single `interval` field, which led to confusing
|
||||||
|
semantics. Specifying `1d` would be assumed as a calendar-aware time,
|
||||||
|
whereas `2d` would be interpreted as fixed time. To get "one day" of fixed time,
|
||||||
|
the user would need to specify the next smaller unit (in this case, `24h`).
|
||||||
|
|
||||||
Here are the valid time specifications and their meanings:
|
This combined behavior was often unknown to users, and even when knowledgeable about
|
||||||
|
the behavior it was difficult to use and confusing.
|
||||||
|
|
||||||
|
This behavior has been deprecated in favor of two new, explicit fields: `calendar_interval`
|
||||||
|
and `fixed_interval`.
|
||||||
|
|
||||||
|
By forcing a choice between calendar and intervals up front, the semantics of the interval
|
||||||
|
are clear to the user immediately and there is no ambiguity. The old `interval` field
|
||||||
|
will be removed in the future.
|
||||||
|
==================================
|
||||||
|
|
||||||
|
===== Calendar Intervals
|
||||||
|
|
||||||
|
Calendar-aware intervals are configured with the `calendar_interval` parameter.
|
||||||
|
Calendar intervals can only be specified in "singular" quantities of the unit
|
||||||
|
(`1d`, `1M`, etc). Multiples, such as `2d`, are not supported and will throw an exception.
|
||||||
|
|
||||||
|
The accepted units for calendar intervals are:
|
||||||
|
|
||||||
|
minute (`m`, `1m`) ::
|
||||||
|
All minutes begin at 00 seconds.
|
||||||
|
|
||||||
|
One minute is the interval between 00 seconds of the first minute and 00
|
||||||
|
seconds of the following minute in the specified timezone, compensating for any
|
||||||
|
intervening leap seconds, so that the number of minutes and seconds past the
|
||||||
|
hour is the same at the start and end.
|
||||||
|
|
||||||
|
hours (`h`, `1h`) ::
|
||||||
|
All hours begin at 00 minutes and 00 seconds.
|
||||||
|
|
||||||
|
One hour (1h) is the interval between 00:00 minutes of the first hour and 00:00
|
||||||
|
minutes of the following hour in the specified timezone, compensating for any
|
||||||
|
intervening leap seconds, so that the number of minutes and seconds past the hour
|
||||||
|
is the same at the start and end.
|
||||||
|
|
||||||
|
|
||||||
|
days (`d`, `1d`) ::
|
||||||
|
All days begin at the earliest possible time, which is usually 00:00:00
|
||||||
|
(midnight).
|
||||||
|
|
||||||
|
One day (1d) is the interval between the start of the day and the start of
|
||||||
|
of the following day in the specified timezone, compensating for any intervening
|
||||||
|
time changes.
|
||||||
|
|
||||||
|
week (`w`, `1w`) ::
|
||||||
|
|
||||||
|
One week is the interval between the start day_of_week:hour:minute:second
|
||||||
|
and the same day of the week and time of the following week in the specified
|
||||||
|
timezone.
|
||||||
|
|
||||||
|
month (`M`, `1M`) ::
|
||||||
|
|
||||||
|
One month is the interval between the start day of the month and time of
|
||||||
|
day and the same day of the month and time of the following month in the specified
|
||||||
|
timezone, so that the day of the month and time of day are the same at the start
|
||||||
|
and end.
|
||||||
|
|
||||||
|
quarter (`q`, `1q`) ::
|
||||||
|
|
||||||
|
One quarter (1q) is the interval between the start day of the month and
|
||||||
|
time of day and the same day of the month and time of day three months later,
|
||||||
|
so that the day of the month and time of day are the same at the start and end. +
|
||||||
|
|
||||||
|
year (`y`, `1y`) ::
|
||||||
|
|
||||||
|
One year (1y) is the interval between the start day of the month and time of
|
||||||
|
day and the same day of the month and time of day the following year in the
|
||||||
|
specified timezone, so that the date and time are the same at the start and end. +
|
||||||
|
|
||||||
|
===== Calendar Interval Examples
|
||||||
|
As an example, here is an aggregation requesting bucket intervals of a month in calendar time:
|
||||||
|
|
||||||
|
[source,js]
|
||||||
|
--------------------------------------------------
|
||||||
|
POST /sales/_search?size=0
|
||||||
|
{
|
||||||
|
"aggs" : {
|
||||||
|
"sales_over_time" : {
|
||||||
|
"date_histogram" : {
|
||||||
|
"field" : "date",
|
||||||
|
"calendar_interval" : "month"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
--------------------------------------------------
|
||||||
|
// CONSOLE
|
||||||
|
// TEST[setup:sales]
|
||||||
|
|
||||||
|
If you attempt to use multiples of calendar units, the aggregation will fail because only
|
||||||
|
singular calendar units are supported:
|
||||||
|
|
||||||
|
[source,js]
|
||||||
|
--------------------------------------------------
|
||||||
|
POST /sales/_search?size=0
|
||||||
|
{
|
||||||
|
"aggs" : {
|
||||||
|
"sales_over_time" : {
|
||||||
|
"date_histogram" : {
|
||||||
|
"field" : "date",
|
||||||
|
"calendar_interval" : "2d"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
--------------------------------------------------
|
||||||
|
// CONSOLE
|
||||||
|
// TEST[setup:sales]
|
||||||
|
// TEST[catch:bad_request]
|
||||||
|
|
||||||
|
[source,js]
|
||||||
|
--------------------------------------------------
|
||||||
|
{
|
||||||
|
"error" : {
|
||||||
|
"root_cause" : [...],
|
||||||
|
"type" : "x_content_parse_exception",
|
||||||
|
"reason" : "[1:82] [date_histogram] failed to parse field [calendar_interval]",
|
||||||
|
"caused_by" : {
|
||||||
|
"type" : "illegal_argument_exception",
|
||||||
|
"reason" : "The supplied interval [2d] could not be parsed as a calendar interval.",
|
||||||
|
"stack_trace" : "java.lang.IllegalArgumentException: The supplied interval [2d] could not be parsed as a calendar interval."
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
--------------------------------------------------
|
||||||
|
// NOTCONSOLE
|
||||||
|
|
||||||
|
===== Fixed Intervals
|
||||||
|
|
||||||
|
Fixed intervals are configured with the `fixed_interval` parameter.
|
||||||
|
|
||||||
|
In contrast to calendar-aware intervals, fixed intervals are a fixed number of SI
|
||||||
|
units and never deviate, regardless of where they fall on the calendar. One second
|
||||||
|
is always composed of 1000ms. This allows fixed intervals to be specified in
|
||||||
|
any multiple of the supported units.
|
||||||
|
|
||||||
|
However, it means fixed intervals cannot express other units such as months,
|
||||||
|
since the duration of a month is not a fixed quantity. Attempting to specify
|
||||||
|
a calendar interval like month or quarter will throw an exception.
|
||||||
|
|
||||||
|
The accepted units for fixed intervals are:
|
||||||
|
|
||||||
milliseconds (ms) ::
|
milliseconds (ms) ::
|
||||||
Fixed length interval; supports multiples.
|
|
||||||
|
|
||||||
seconds (s) ::
|
seconds (s) ::
|
||||||
1000 milliseconds; fixed length interval (except for the last second of a
|
Defined as 1000 milliseconds each
|
||||||
minute that contains a leap-second, which is 2000ms long); supports multiples.
|
|
||||||
|
|
||||||
minutes (m) ::
|
minutes (m) ::
|
||||||
All minutes begin at 00 seconds.
|
All minutes begin at 00 seconds.
|
||||||
|
|
||||||
* One minute (1m) is the interval between 00 seconds of the first minute and 00
|
Defined as 60 seconds each (60,000 milliseconds)
|
||||||
seconds of the following minute in the specified timezone, compensating for any
|
|
||||||
intervening leap seconds, so that the number of minutes and seconds past the
|
|
||||||
hour is the same at the start and end.
|
|
||||||
* Multiple minutes (__n__m) are intervals of exactly 60x1000=60,000 milliseconds
|
|
||||||
each.
|
|
||||||
|
|
||||||
hours (h) ::
|
hours (h) ::
|
||||||
All hours begin at 00 minutes and 00 seconds.
|
All hours begin at 00 minutes and 00 seconds.
|
||||||
|
Defined as 60 minutes each (3,600,000 milliseconds)
|
||||||
* One hour (1h) is the interval between 00:00 minutes of the first hour and 00:00
|
|
||||||
minutes of the following hour in the specified timezone, compensating for any
|
|
||||||
intervening leap seconds, so that the number of minutes and seconds past the hour
|
|
||||||
is the same at the start and end.
|
|
||||||
* Multiple hours (__n__h) are intervals of exactly 60x60x1000=3,600,000 milliseconds
|
|
||||||
each.
|
|
||||||
|
|
||||||
days (d) ::
|
days (d) ::
|
||||||
All days begin at the earliest possible time, which is usually 00:00:00
|
All days begin at the earliest possible time, which is usually 00:00:00
|
||||||
(midnight).
|
(midnight).
|
||||||
|
|
||||||
* One day (1d) is the interval between the start of the day and the start of
|
Defined as 24 hours (86,400,000 milliseconds)
|
||||||
of the following day in the specified timezone, compensating for any intervening
|
|
||||||
time changes.
|
|
||||||
* Multiple days (__n__d) are intervals of exactly 24x60x60x1000=86,400,000
|
|
||||||
milliseconds each.
|
|
||||||
|
|
||||||
weeks (w) ::
|
===== Fixed Interval Examples
|
||||||
|
|
||||||
* One week (1w) is the interval between the start day_of_week:hour:minute:second
|
If we try to recreate the "month" `calendar_interval` from earlier, we can approximate that with
|
||||||
and the same day of the week and time of the following week in the specified
|
30 fixed days:
|
||||||
timezone.
|
|
||||||
* Multiple weeks (__n__w) are not supported.
|
|
||||||
|
|
||||||
months (M) ::
|
[source,js]
|
||||||
|
--------------------------------------------------
|
||||||
|
POST /sales/_search?size=0
|
||||||
|
{
|
||||||
|
"aggs" : {
|
||||||
|
"sales_over_time" : {
|
||||||
|
"date_histogram" : {
|
||||||
|
"field" : "date",
|
||||||
|
"fixed_interval" : "30d"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
--------------------------------------------------
|
||||||
|
// CONSOLE
|
||||||
|
// TEST[setup:sales]
|
||||||
|
|
||||||
* One month (1M) is the interval between the start day of the month and time of
|
But if we try to use a calendar unit that is not supported, such as weeks, we'll get an exception:
|
||||||
day and the same day of the month and time of the following month in the specified
|
|
||||||
timezone, so that the day of the month and time of day are the same at the start
|
|
||||||
and end.
|
|
||||||
* Multiple months (__n__M) are not supported.
|
|
||||||
|
|
||||||
quarters (q) ::
|
[source,js]
|
||||||
|
--------------------------------------------------
|
||||||
|
POST /sales/_search?size=0
|
||||||
|
{
|
||||||
|
"aggs" : {
|
||||||
|
"sales_over_time" : {
|
||||||
|
"date_histogram" : {
|
||||||
|
"field" : "date",
|
||||||
|
"fixed_interval" : "2w"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
--------------------------------------------------
|
||||||
|
// CONSOLE
|
||||||
|
// TEST[setup:sales]
|
||||||
|
// TEST[catch:bad_request]
|
||||||
|
|
||||||
* One quarter (1q) is the interval between the start day of the month and
|
[source,js]
|
||||||
time of day and the same day of the month and time of day three months later,
|
--------------------------------------------------
|
||||||
so that the day of the month and time of day are the same at the start and end. +
|
{
|
||||||
* Multiple quarters (__n__q) are not supported.
|
"error" : {
|
||||||
|
"root_cause" : [...],
|
||||||
|
"type" : "x_content_parse_exception",
|
||||||
|
"reason" : "[1:82] [date_histogram] failed to parse field [fixed_interval]",
|
||||||
|
"caused_by" : {
|
||||||
|
"type" : "illegal_argument_exception",
|
||||||
|
"reason" : "failed to parse setting [date_histogram.fixedInterval] with value [2w] as a time value: unit is missing or unrecognized",
|
||||||
|
"stack_trace" : "java.lang.IllegalArgumentException: failed to parse setting [date_histogram.fixedInterval] with value [2w] as a time value: unit is missing or unrecognized"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
years (y) ::
|
--------------------------------------------------
|
||||||
|
// NOTCONSOLE
|
||||||
|
|
||||||
* One year (1y) is the interval between the start day of the month and time of
|
===== Notes
|
||||||
day and the same day of the month and time of day the following year in the
|
|
||||||
specified timezone, so that the date and time are the same at the start and end. +
|
|
||||||
* Multiple years (__n__y) are not supported.
|
|
||||||
|
|
||||||
NOTE:
|
|
||||||
In all cases, when the specified end time does not exist, the actual end time is
|
In all cases, when the specified end time does not exist, the actual end time is
|
||||||
the closest available time after the specified end.
|
the closest available time after the specified end.
|
||||||
|
|
||||||
|
@ -123,49 +273,11 @@ WARNING:
|
||||||
To avoid unexpected results, all connected servers and clients must sync to a
|
To avoid unexpected results, all connected servers and clients must sync to a
|
||||||
reliable network time service.
|
reliable network time service.
|
||||||
|
|
||||||
==== Examples
|
NOTE: fractional time values are not supported, but you can address this by
|
||||||
|
shifting to another time unit (e.g., `1.5h` could instead be specified as `90m`).
|
||||||
|
|
||||||
Requesting bucket intervals of a month.
|
NOTE: You can also specify time values using abbreviations supported by
|
||||||
|
|
||||||
[source,js]
|
|
||||||
--------------------------------------------------
|
|
||||||
POST /sales/_search?size=0
|
|
||||||
{
|
|
||||||
"aggs" : {
|
|
||||||
"sales_over_time" : {
|
|
||||||
"date_histogram" : {
|
|
||||||
"field" : "date",
|
|
||||||
"interval" : "month"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
--------------------------------------------------
|
|
||||||
// CONSOLE
|
|
||||||
// TEST[setup:sales]
|
|
||||||
|
|
||||||
You can also specify time values using abbreviations supported by
|
|
||||||
<<time-units,time units>> parsing.
|
<<time-units,time units>> parsing.
|
||||||
Note that fractional time values are not supported, but you can address this by
|
|
||||||
shifting to another
|
|
||||||
time unit (e.g., `1.5h` could instead be specified as `90m`).
|
|
||||||
|
|
||||||
[source,js]
|
|
||||||
--------------------------------------------------
|
|
||||||
POST /sales/_search?size=0
|
|
||||||
{
|
|
||||||
"aggs" : {
|
|
||||||
"sales_over_time" : {
|
|
||||||
"date_histogram" : {
|
|
||||||
"field" : "date",
|
|
||||||
"interval" : "90m"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
--------------------------------------------------
|
|
||||||
// CONSOLE
|
|
||||||
// TEST[setup:sales]
|
|
||||||
|
|
||||||
===== Keys
|
===== Keys
|
||||||
|
|
||||||
|
@ -186,7 +298,7 @@ POST /sales/_search?size=0
|
||||||
"sales_over_time" : {
|
"sales_over_time" : {
|
||||||
"date_histogram" : {
|
"date_histogram" : {
|
||||||
"field" : "date",
|
"field" : "date",
|
||||||
"interval" : "1M",
|
"calendar_interval" : "1M",
|
||||||
"format" : "yyyy-MM-dd" <1>
|
"format" : "yyyy-MM-dd" <1>
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -259,7 +371,7 @@ GET my_index/_search?size=0
|
||||||
"by_day": {
|
"by_day": {
|
||||||
"date_histogram": {
|
"date_histogram": {
|
||||||
"field": "date",
|
"field": "date",
|
||||||
"interval": "day"
|
"calendar_interval": "day"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -301,7 +413,7 @@ GET my_index/_search?size=0
|
||||||
"by_day": {
|
"by_day": {
|
||||||
"date_histogram": {
|
"date_histogram": {
|
||||||
"field": "date",
|
"field": "date",
|
||||||
"interval": "day",
|
"calendar_interval": "day",
|
||||||
"time_zone": "-01:00"
|
"time_zone": "-01:00"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -380,7 +492,7 @@ GET my_index/_search?size=0
|
||||||
"by_day": {
|
"by_day": {
|
||||||
"date_histogram": {
|
"date_histogram": {
|
||||||
"field": "date",
|
"field": "date",
|
||||||
"interval": "day",
|
"calendar_interval": "day",
|
||||||
"offset": "+6h"
|
"offset": "+6h"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -432,7 +544,7 @@ POST /sales/_search?size=0
|
||||||
"sales_over_time" : {
|
"sales_over_time" : {
|
||||||
"date_histogram" : {
|
"date_histogram" : {
|
||||||
"field" : "date",
|
"field" : "date",
|
||||||
"interval" : "1M",
|
"calendar_interval" : "1M",
|
||||||
"format" : "yyyy-MM-dd",
|
"format" : "yyyy-MM-dd",
|
||||||
"keyed": true
|
"keyed": true
|
||||||
}
|
}
|
||||||
|
@ -502,7 +614,7 @@ POST /sales/_search?size=0
|
||||||
"sale_date" : {
|
"sale_date" : {
|
||||||
"date_histogram" : {
|
"date_histogram" : {
|
||||||
"field" : "date",
|
"field" : "date",
|
||||||
"interval": "year",
|
"calendar_interval": "year",
|
||||||
"missing": "2000/01/01" <1>
|
"missing": "2000/01/01" <1>
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -522,8 +634,6 @@ control the order using
|
||||||
the `order` setting. This setting supports the same `order` functionality as
|
the `order` setting. This setting supports the same `order` functionality as
|
||||||
<<search-aggregations-bucket-terms-aggregation-order,`Terms Aggregation`>>.
|
<<search-aggregations-bucket-terms-aggregation-order,`Terms Aggregation`>>.
|
||||||
|
|
||||||
deprecated[6.0.0, Use `_key` instead of `_time` to order buckets by their dates/keys]
|
|
||||||
|
|
||||||
===== Using a script to aggregate by day of the week
|
===== Using a script to aggregate by day of the week
|
||||||
|
|
||||||
When you need to aggregate the results by day of the week, use a script that
|
When you need to aggregate the results by day of the week, use a script that
|
||||||
|
|
|
@ -102,7 +102,7 @@ GET /twitter/_search?typed_keys
|
||||||
"tweets_over_time": {
|
"tweets_over_time": {
|
||||||
"date_histogram": {
|
"date_histogram": {
|
||||||
"field": "date",
|
"field": "date",
|
||||||
"interval": "year"
|
"calendar_interval": "year"
|
||||||
},
|
},
|
||||||
"aggregations": {
|
"aggregations": {
|
||||||
"top_users": {
|
"top_users": {
|
||||||
|
|
|
@ -57,7 +57,7 @@ POST /_search
|
||||||
"my_date_histo":{
|
"my_date_histo":{
|
||||||
"date_histogram":{
|
"date_histogram":{
|
||||||
"field":"timestamp",
|
"field":"timestamp",
|
||||||
"interval":"day"
|
"calendar_interval":"day"
|
||||||
},
|
},
|
||||||
"aggs":{
|
"aggs":{
|
||||||
"the_sum":{
|
"the_sum":{
|
||||||
|
@ -88,7 +88,7 @@ POST /_search
|
||||||
"sales_per_month" : {
|
"sales_per_month" : {
|
||||||
"date_histogram" : {
|
"date_histogram" : {
|
||||||
"field" : "date",
|
"field" : "date",
|
||||||
"interval" : "month"
|
"calendar_interval" : "month"
|
||||||
},
|
},
|
||||||
"aggs": {
|
"aggs": {
|
||||||
"sales": {
|
"sales": {
|
||||||
|
@ -125,7 +125,7 @@ POST /_search
|
||||||
"my_date_histo": {
|
"my_date_histo": {
|
||||||
"date_histogram": {
|
"date_histogram": {
|
||||||
"field":"timestamp",
|
"field":"timestamp",
|
||||||
"interval":"day"
|
"calendar_interval":"day"
|
||||||
},
|
},
|
||||||
"aggs": {
|
"aggs": {
|
||||||
"the_movavg": {
|
"the_movavg": {
|
||||||
|
@ -153,7 +153,7 @@ POST /sales/_search
|
||||||
"histo": {
|
"histo": {
|
||||||
"date_histogram": {
|
"date_histogram": {
|
||||||
"field": "date",
|
"field": "date",
|
||||||
"interval": "day"
|
"calendar_interval": "day"
|
||||||
},
|
},
|
||||||
"aggs": {
|
"aggs": {
|
||||||
"categories": {
|
"categories": {
|
||||||
|
|
|
@ -42,7 +42,7 @@ POST /_search
|
||||||
"sales_per_month": {
|
"sales_per_month": {
|
||||||
"date_histogram": {
|
"date_histogram": {
|
||||||
"field": "date",
|
"field": "date",
|
||||||
"interval": "month"
|
"calendar_interval": "month"
|
||||||
},
|
},
|
||||||
"aggs": {
|
"aggs": {
|
||||||
"sales": {
|
"sales": {
|
||||||
|
|
|
@ -50,7 +50,7 @@ POST /sales/_search
|
||||||
"sales_per_month" : {
|
"sales_per_month" : {
|
||||||
"date_histogram" : {
|
"date_histogram" : {
|
||||||
"field" : "date",
|
"field" : "date",
|
||||||
"interval" : "month"
|
"calendar_interval" : "month"
|
||||||
},
|
},
|
||||||
"aggs": {
|
"aggs": {
|
||||||
"total_sales": {
|
"total_sales": {
|
||||||
|
|
|
@ -53,7 +53,7 @@ POST /sales/_search
|
||||||
"sales_per_month" : {
|
"sales_per_month" : {
|
||||||
"date_histogram" : {
|
"date_histogram" : {
|
||||||
"field" : "date",
|
"field" : "date",
|
||||||
"interval" : "month"
|
"calendar_interval" : "month"
|
||||||
},
|
},
|
||||||
"aggs": {
|
"aggs": {
|
||||||
"total_sales": {
|
"total_sales": {
|
||||||
|
|
|
@ -56,7 +56,7 @@ POST /sales/_search
|
||||||
"sales_per_month" : {
|
"sales_per_month" : {
|
||||||
"date_histogram" : {
|
"date_histogram" : {
|
||||||
"field" : "date",
|
"field" : "date",
|
||||||
"interval" : "month"
|
"calendar_interval" : "month"
|
||||||
},
|
},
|
||||||
"aggs": {
|
"aggs": {
|
||||||
"total_sales": {
|
"total_sales": {
|
||||||
|
@ -144,7 +144,7 @@ POST /sales/_search
|
||||||
"sales_per_month" : {
|
"sales_per_month" : {
|
||||||
"date_histogram" : {
|
"date_histogram" : {
|
||||||
"field" : "date",
|
"field" : "date",
|
||||||
"interval" : "month"
|
"calendar_interval" : "month"
|
||||||
},
|
},
|
||||||
"aggs": {
|
"aggs": {
|
||||||
"bucket_truncate": {
|
"bucket_truncate": {
|
||||||
|
|
|
@ -40,7 +40,7 @@ POST /sales/_search
|
||||||
"sales_per_month" : {
|
"sales_per_month" : {
|
||||||
"date_histogram" : {
|
"date_histogram" : {
|
||||||
"field" : "date",
|
"field" : "date",
|
||||||
"interval" : "month"
|
"calendar_interval" : "month"
|
||||||
},
|
},
|
||||||
"aggs": {
|
"aggs": {
|
||||||
"sales": {
|
"sales": {
|
||||||
|
|
|
@ -43,7 +43,7 @@ POST /sales/_search
|
||||||
"sales_per_month" : {
|
"sales_per_month" : {
|
||||||
"date_histogram" : {
|
"date_histogram" : {
|
||||||
"field" : "date",
|
"field" : "date",
|
||||||
"interval" : "month"
|
"calendar_interval" : "month"
|
||||||
},
|
},
|
||||||
"aggs": {
|
"aggs": {
|
||||||
"sales": {
|
"sales": {
|
||||||
|
@ -137,7 +137,7 @@ POST /sales/_search
|
||||||
"sales_per_month" : {
|
"sales_per_month" : {
|
||||||
"date_histogram" : {
|
"date_histogram" : {
|
||||||
"field" : "date",
|
"field" : "date",
|
||||||
"interval" : "month"
|
"calendar_interval" : "month"
|
||||||
},
|
},
|
||||||
"aggs": {
|
"aggs": {
|
||||||
"sales": {
|
"sales": {
|
||||||
|
@ -237,7 +237,7 @@ POST /sales/_search
|
||||||
"sales_per_month" : {
|
"sales_per_month" : {
|
||||||
"date_histogram" : {
|
"date_histogram" : {
|
||||||
"field" : "date",
|
"field" : "date",
|
||||||
"interval" : "month"
|
"calendar_interval" : "month"
|
||||||
},
|
},
|
||||||
"aggs": {
|
"aggs": {
|
||||||
"sales": {
|
"sales": {
|
||||||
|
|
|
@ -44,7 +44,7 @@ POST /sales/_search
|
||||||
"sales_per_month" : {
|
"sales_per_month" : {
|
||||||
"date_histogram" : {
|
"date_histogram" : {
|
||||||
"field" : "date",
|
"field" : "date",
|
||||||
"interval" : "month"
|
"calendar_interval" : "month"
|
||||||
},
|
},
|
||||||
"aggs": {
|
"aggs": {
|
||||||
"sales": {
|
"sales": {
|
||||||
|
|
|
@ -42,7 +42,7 @@ POST /sales/_search
|
||||||
"sales_per_month" : {
|
"sales_per_month" : {
|
||||||
"date_histogram" : {
|
"date_histogram" : {
|
||||||
"field" : "date",
|
"field" : "date",
|
||||||
"interval" : "month"
|
"calendar_interval" : "month"
|
||||||
},
|
},
|
||||||
"aggs": {
|
"aggs": {
|
||||||
"sales": {
|
"sales": {
|
||||||
|
|
|
@ -42,7 +42,7 @@ POST /sales/_search
|
||||||
"sales_per_month" : {
|
"sales_per_month" : {
|
||||||
"date_histogram" : {
|
"date_histogram" : {
|
||||||
"field" : "date",
|
"field" : "date",
|
||||||
"interval" : "month"
|
"calendar_interval" : "month"
|
||||||
},
|
},
|
||||||
"aggs": {
|
"aggs": {
|
||||||
"sales": {
|
"sales": {
|
||||||
|
|
|
@ -62,7 +62,7 @@ POST /_search
|
||||||
"my_date_histo":{ <1>
|
"my_date_histo":{ <1>
|
||||||
"date_histogram":{
|
"date_histogram":{
|
||||||
"field":"date",
|
"field":"date",
|
||||||
"interval":"1M"
|
"calendar_interval":"1M"
|
||||||
},
|
},
|
||||||
"aggs":{
|
"aggs":{
|
||||||
"the_sum":{
|
"the_sum":{
|
||||||
|
@ -165,7 +165,7 @@ POST /_search
|
||||||
"my_date_histo":{
|
"my_date_histo":{
|
||||||
"date_histogram":{
|
"date_histogram":{
|
||||||
"field":"date",
|
"field":"date",
|
||||||
"interval":"1M"
|
"calendar_interval":"1M"
|
||||||
},
|
},
|
||||||
"aggs":{
|
"aggs":{
|
||||||
"the_sum":{
|
"the_sum":{
|
||||||
|
@ -219,7 +219,7 @@ POST /_search
|
||||||
"my_date_histo":{
|
"my_date_histo":{
|
||||||
"date_histogram":{
|
"date_histogram":{
|
||||||
"field":"date",
|
"field":"date",
|
||||||
"interval":"1M"
|
"calendar_interval":"1M"
|
||||||
},
|
},
|
||||||
"aggs":{
|
"aggs":{
|
||||||
"the_sum":{
|
"the_sum":{
|
||||||
|
@ -279,7 +279,7 @@ POST /_search
|
||||||
"my_date_histo":{
|
"my_date_histo":{
|
||||||
"date_histogram":{
|
"date_histogram":{
|
||||||
"field":"date",
|
"field":"date",
|
||||||
"interval":"1M"
|
"calendar_interval":"1M"
|
||||||
},
|
},
|
||||||
"aggs":{
|
"aggs":{
|
||||||
"the_sum":{
|
"the_sum":{
|
||||||
|
@ -338,7 +338,7 @@ POST /_search
|
||||||
"my_date_histo":{
|
"my_date_histo":{
|
||||||
"date_histogram":{
|
"date_histogram":{
|
||||||
"field":"date",
|
"field":"date",
|
||||||
"interval":"1M"
|
"calendar_interval":"1M"
|
||||||
},
|
},
|
||||||
"aggs":{
|
"aggs":{
|
||||||
"the_sum":{
|
"the_sum":{
|
||||||
|
@ -427,7 +427,7 @@ POST /_search
|
||||||
"my_date_histo":{
|
"my_date_histo":{
|
||||||
"date_histogram":{
|
"date_histogram":{
|
||||||
"field":"date",
|
"field":"date",
|
||||||
"interval":"1M"
|
"calendar_interval":"1M"
|
||||||
},
|
},
|
||||||
"aggs":{
|
"aggs":{
|
||||||
"the_sum":{
|
"the_sum":{
|
||||||
|
@ -488,7 +488,7 @@ POST /_search
|
||||||
"my_date_histo":{
|
"my_date_histo":{
|
||||||
"date_histogram":{
|
"date_histogram":{
|
||||||
"field":"date",
|
"field":"date",
|
||||||
"interval":"1M"
|
"calendar_interval":"1M"
|
||||||
},
|
},
|
||||||
"aggs":{
|
"aggs":{
|
||||||
"the_sum":{
|
"the_sum":{
|
||||||
|
@ -538,7 +538,7 @@ POST /_search
|
||||||
"my_date_histo":{
|
"my_date_histo":{
|
||||||
"date_histogram":{
|
"date_histogram":{
|
||||||
"field":"date",
|
"field":"date",
|
||||||
"interval":"1M"
|
"calendar_interval":"1M"
|
||||||
},
|
},
|
||||||
"aggs":{
|
"aggs":{
|
||||||
"the_sum":{
|
"the_sum":{
|
||||||
|
@ -617,7 +617,7 @@ POST /_search
|
||||||
"my_date_histo":{
|
"my_date_histo":{
|
||||||
"date_histogram":{
|
"date_histogram":{
|
||||||
"field":"date",
|
"field":"date",
|
||||||
"interval":"1M"
|
"calendar_interval":"1M"
|
||||||
},
|
},
|
||||||
"aggs":{
|
"aggs":{
|
||||||
"the_sum":{
|
"the_sum":{
|
||||||
|
|
|
@ -46,7 +46,7 @@ POST /_search
|
||||||
"my_date_histo":{ <1>
|
"my_date_histo":{ <1>
|
||||||
"date_histogram":{
|
"date_histogram":{
|
||||||
"field":"date",
|
"field":"date",
|
||||||
"interval":"1M"
|
"calendar_interval":"1M"
|
||||||
},
|
},
|
||||||
"aggs":{
|
"aggs":{
|
||||||
"the_sum":{
|
"the_sum":{
|
||||||
|
@ -148,7 +148,7 @@ POST /_search
|
||||||
"my_date_histo":{
|
"my_date_histo":{
|
||||||
"date_histogram":{
|
"date_histogram":{
|
||||||
"field":"date",
|
"field":"date",
|
||||||
"interval":"1M"
|
"calendar_interval":"1M"
|
||||||
},
|
},
|
||||||
"aggs":{
|
"aggs":{
|
||||||
"the_sum":{
|
"the_sum":{
|
||||||
|
@ -207,7 +207,7 @@ POST /_search
|
||||||
"my_date_histo":{
|
"my_date_histo":{
|
||||||
"date_histogram":{
|
"date_histogram":{
|
||||||
"field":"date",
|
"field":"date",
|
||||||
"interval":"1M"
|
"calendar_interval":"1M"
|
||||||
},
|
},
|
||||||
"aggs":{
|
"aggs":{
|
||||||
"the_sum":{
|
"the_sum":{
|
||||||
|
@ -250,7 +250,7 @@ POST /_search
|
||||||
"my_date_histo":{
|
"my_date_histo":{
|
||||||
"date_histogram":{
|
"date_histogram":{
|
||||||
"field":"date",
|
"field":"date",
|
||||||
"interval":"1M"
|
"calendar_interval":"1M"
|
||||||
},
|
},
|
||||||
"aggs":{
|
"aggs":{
|
||||||
"the_sum":{
|
"the_sum":{
|
||||||
|
@ -293,7 +293,7 @@ POST /_search
|
||||||
"my_date_histo":{
|
"my_date_histo":{
|
||||||
"date_histogram":{
|
"date_histogram":{
|
||||||
"field":"date",
|
"field":"date",
|
||||||
"interval":"1M"
|
"calendar_interval":"1M"
|
||||||
},
|
},
|
||||||
"aggs":{
|
"aggs":{
|
||||||
"the_sum":{
|
"the_sum":{
|
||||||
|
@ -338,7 +338,7 @@ POST /_search
|
||||||
"my_date_histo":{
|
"my_date_histo":{
|
||||||
"date_histogram":{
|
"date_histogram":{
|
||||||
"field":"date",
|
"field":"date",
|
||||||
"interval":"1M"
|
"calendar_interval":"1M"
|
||||||
},
|
},
|
||||||
"aggs":{
|
"aggs":{
|
||||||
"the_sum":{
|
"the_sum":{
|
||||||
|
@ -390,7 +390,7 @@ POST /_search
|
||||||
"my_date_histo":{
|
"my_date_histo":{
|
||||||
"date_histogram":{
|
"date_histogram":{
|
||||||
"field":"date",
|
"field":"date",
|
||||||
"interval":"1M"
|
"calendar_interval":"1M"
|
||||||
},
|
},
|
||||||
"aggs":{
|
"aggs":{
|
||||||
"the_sum":{
|
"the_sum":{
|
||||||
|
@ -436,7 +436,7 @@ POST /_search
|
||||||
"my_date_histo":{
|
"my_date_histo":{
|
||||||
"date_histogram":{
|
"date_histogram":{
|
||||||
"field":"date",
|
"field":"date",
|
||||||
"interval":"1M"
|
"calendar_interval":"1M"
|
||||||
},
|
},
|
||||||
"aggs":{
|
"aggs":{
|
||||||
"the_sum":{
|
"the_sum":{
|
||||||
|
@ -488,7 +488,7 @@ POST /_search
|
||||||
"my_date_histo":{
|
"my_date_histo":{
|
||||||
"date_histogram":{
|
"date_histogram":{
|
||||||
"field":"date",
|
"field":"date",
|
||||||
"interval":"1M"
|
"calendar_interval":"1M"
|
||||||
},
|
},
|
||||||
"aggs":{
|
"aggs":{
|
||||||
"the_sum":{
|
"the_sum":{
|
||||||
|
@ -546,7 +546,7 @@ POST /_search
|
||||||
"my_date_histo":{
|
"my_date_histo":{
|
||||||
"date_histogram":{
|
"date_histogram":{
|
||||||
"field":"date",
|
"field":"date",
|
||||||
"interval":"1M"
|
"calendar_interval":"1M"
|
||||||
},
|
},
|
||||||
"aggs":{
|
"aggs":{
|
||||||
"the_sum":{
|
"the_sum":{
|
||||||
|
@ -612,7 +612,7 @@ POST /_search
|
||||||
"my_date_histo":{
|
"my_date_histo":{
|
||||||
"date_histogram":{
|
"date_histogram":{
|
||||||
"field":"date",
|
"field":"date",
|
||||||
"interval":"1M"
|
"calendar_interval":"1M"
|
||||||
},
|
},
|
||||||
"aggs":{
|
"aggs":{
|
||||||
"the_sum":{
|
"the_sum":{
|
||||||
|
|
|
@ -43,7 +43,7 @@ POST /sales/_search
|
||||||
"sales_per_month" : {
|
"sales_per_month" : {
|
||||||
"date_histogram" : {
|
"date_histogram" : {
|
||||||
"field" : "date",
|
"field" : "date",
|
||||||
"interval" : "month"
|
"calendar_interval" : "month"
|
||||||
},
|
},
|
||||||
"aggs": {
|
"aggs": {
|
||||||
"sales": {
|
"sales": {
|
||||||
|
|
|
@ -69,7 +69,7 @@ POST /_search
|
||||||
"my_date_histo": { <1>
|
"my_date_histo": { <1>
|
||||||
"date_histogram": {
|
"date_histogram": {
|
||||||
"field": "timestamp",
|
"field": "timestamp",
|
||||||
"interval": "day"
|
"calendar_interval": "day"
|
||||||
},
|
},
|
||||||
"aggs": {
|
"aggs": {
|
||||||
"the_sum": {
|
"the_sum": {
|
||||||
|
|
|
@ -41,7 +41,7 @@ POST /sales/_search
|
||||||
"sales_per_month" : {
|
"sales_per_month" : {
|
||||||
"date_histogram" : {
|
"date_histogram" : {
|
||||||
"field" : "date",
|
"field" : "date",
|
||||||
"interval" : "month"
|
"calendar_interval" : "month"
|
||||||
},
|
},
|
||||||
"aggs": {
|
"aggs": {
|
||||||
"sales": {
|
"sales": {
|
||||||
|
|
|
@ -41,7 +41,7 @@ POST /sales/_search
|
||||||
"sales_per_month" : {
|
"sales_per_month" : {
|
||||||
"date_histogram" : {
|
"date_histogram" : {
|
||||||
"field" : "date",
|
"field" : "date",
|
||||||
"interval" : "month"
|
"calendar_interval" : "month"
|
||||||
},
|
},
|
||||||
"aggs": {
|
"aggs": {
|
||||||
"sales": {
|
"sales": {
|
||||||
|
|
|
@ -59,7 +59,7 @@ ml_autodetect (default distro only)
|
||||||
ml_datafeed (default distro only)
|
ml_datafeed (default distro only)
|
||||||
ml_utility (default distro only)
|
ml_utility (default distro only)
|
||||||
refresh
|
refresh
|
||||||
rollup_indexing (default distro only)`
|
rollup_indexing (default distro only)
|
||||||
search
|
search
|
||||||
security-token-key (default distro only)
|
security-token-key (default distro only)
|
||||||
snapshot
|
snapshot
|
||||||
|
|
|
@ -4,14 +4,15 @@
|
||||||
The `elasticsearch-node` command enables you to perform certain unsafe
|
The `elasticsearch-node` command enables you to perform certain unsafe
|
||||||
operations on a node that are only possible while it is shut down. This command
|
operations on a node that are only possible while it is shut down. This command
|
||||||
allows you to adjust the <<modules-node,role>> of a node and may be able to
|
allows you to adjust the <<modules-node,role>> of a node and may be able to
|
||||||
recover some data after a disaster.
|
recover some data after a disaster or start a node even if it is incompatible
|
||||||
|
with the data on disk.
|
||||||
|
|
||||||
[float]
|
[float]
|
||||||
=== Synopsis
|
=== Synopsis
|
||||||
|
|
||||||
[source,shell]
|
[source,shell]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
bin/elasticsearch-node repurpose|unsafe-bootstrap|detach-cluster
|
bin/elasticsearch-node repurpose|unsafe-bootstrap|detach-cluster|override-version
|
||||||
[--ordinal <Integer>] [-E <KeyValuePair>]
|
[--ordinal <Integer>] [-E <KeyValuePair>]
|
||||||
[-h, --help] ([-s, --silent] | [-v, --verbose])
|
[-h, --help] ([-s, --silent] | [-v, --verbose])
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
|
@ -19,7 +20,7 @@ bin/elasticsearch-node repurpose|unsafe-bootstrap|detach-cluster
|
||||||
[float]
|
[float]
|
||||||
=== Description
|
=== Description
|
||||||
|
|
||||||
This tool has three modes:
|
This tool has four modes:
|
||||||
|
|
||||||
* `elasticsearch-node repurpose` can be used to delete unwanted data from a
|
* `elasticsearch-node repurpose` can be used to delete unwanted data from a
|
||||||
node if it used to be a <<data-node,data node>> or a
|
node if it used to be a <<data-node,data node>> or a
|
||||||
|
@ -36,6 +37,11 @@ This tool has three modes:
|
||||||
cluster bootstrapping was not possible, it also enables you to move nodes
|
cluster bootstrapping was not possible, it also enables you to move nodes
|
||||||
into a brand-new cluster.
|
into a brand-new cluster.
|
||||||
|
|
||||||
|
* `elasticsearch-node override-version` enables you to start up a node
|
||||||
|
even if the data in the data path was written by an incompatible version of
|
||||||
|
{es}. This may sometimes allow you to downgrade to an earlier version of
|
||||||
|
{es}.
|
||||||
|
|
||||||
[[node-tool-repurpose]]
|
[[node-tool-repurpose]]
|
||||||
[float]
|
[float]
|
||||||
==== Changing the role of a node
|
==== Changing the role of a node
|
||||||
|
@ -109,6 +115,25 @@ way forward that does not risk data loss, but it may be possible to use the
|
||||||
`elasticsearch-node` tool to construct a new cluster that contains some of the
|
`elasticsearch-node` tool to construct a new cluster that contains some of the
|
||||||
data from the failed cluster.
|
data from the failed cluster.
|
||||||
|
|
||||||
|
[[node-tool-override-version]]
|
||||||
|
[float]
|
||||||
|
==== Bypassing version checks
|
||||||
|
|
||||||
|
The data that {es} writes to disk is designed to be read by the current version
|
||||||
|
and a limited set of future versions. It cannot generally be read by older
|
||||||
|
versions, nor by versions that are more than one major version newer. The data
|
||||||
|
stored on disk includes the version of the node that wrote it, and {es} checks
|
||||||
|
that it is compatible with this version when starting up.
|
||||||
|
|
||||||
|
In rare circumstances it may be desirable to bypass this check and start up an
|
||||||
|
{es} node using data that was written by an incompatible version. This may not
|
||||||
|
work if the format of the stored data has changed, and it is a risky process
|
||||||
|
because it is possible for the format to change in ways that {es} may
|
||||||
|
misinterpret, silently leading to data loss.
|
||||||
|
|
||||||
|
To bypass this check, you can use the `elasticsearch-node override-version`
|
||||||
|
tool to overwrite the version number stored in the data path with the current
|
||||||
|
version, causing {es} to believe that it is compatible with the on-disk data.
|
||||||
|
|
||||||
[[node-tool-unsafe-bootstrap]]
|
[[node-tool-unsafe-bootstrap]]
|
||||||
[float]
|
[float]
|
||||||
|
@ -262,6 +287,9 @@ one-node cluster.
|
||||||
`detach-cluster`:: Specifies to unsafely detach this node from its cluster so
|
`detach-cluster`:: Specifies to unsafely detach this node from its cluster so
|
||||||
it can join a different cluster.
|
it can join a different cluster.
|
||||||
|
|
||||||
|
`override-version`:: Overwrites the version number stored in the data path so
|
||||||
|
that a node can start despite being incompatible with the on-disk data.
|
||||||
|
|
||||||
`--ordinal <Integer>`:: If there is <<max-local-storage-nodes,more than one
|
`--ordinal <Integer>`:: If there is <<max-local-storage-nodes,more than one
|
||||||
node sharing a data path>> then this specifies which node to target. Defaults
|
node sharing a data path>> then this specifies which node to target. Defaults
|
||||||
to `0`, meaning to use the first node in the data path.
|
to `0`, meaning to use the first node in the data path.
|
||||||
|
@ -423,3 +451,32 @@ Do you want to proceed?
|
||||||
Confirm [y/N] y
|
Confirm [y/N] y
|
||||||
Node was successfully detached from the cluster
|
Node was successfully detached from the cluster
|
||||||
----
|
----
|
||||||
|
|
||||||
|
[float]
|
||||||
|
==== Bypassing version checks
|
||||||
|
|
||||||
|
Run the `elasticsearch-node override-version` command to overwrite the version
|
||||||
|
stored in the data path so that a node can start despite being incompatible
|
||||||
|
with the data stored in the data path:
|
||||||
|
|
||||||
|
[source, txt]
|
||||||
|
----
|
||||||
|
node$ ./bin/elasticsearch-node override-version
|
||||||
|
|
||||||
|
WARNING: Elasticsearch MUST be stopped before running this tool.
|
||||||
|
|
||||||
|
This data path was last written by Elasticsearch version [x.x.x] and may no
|
||||||
|
longer be compatible with Elasticsearch version [y.y.y]. This tool will bypass
|
||||||
|
this compatibility check, allowing a version [y.y.y] node to start on this data
|
||||||
|
path, but a version [y.y.y] node may not be able to read this data or may read
|
||||||
|
it incorrectly leading to data loss.
|
||||||
|
|
||||||
|
You should not use this tool. Instead, continue to use a version [x.x.x] node
|
||||||
|
on this data path. If necessary, you can use reindex-from-remote to copy the
|
||||||
|
data from here into an older cluster.
|
||||||
|
|
||||||
|
Do you want to proceed?
|
||||||
|
|
||||||
|
Confirm [y/N] y
|
||||||
|
Successfully overwrote this node's metadata to bypass its version compatibility checks.
|
||||||
|
----
|
||||||
|
|
|
@ -9,8 +9,6 @@ your application to Elasticsearch 7.1.
|
||||||
|
|
||||||
See also <<release-highlights>> and <<es-release-notes>>.
|
See also <<release-highlights>> and <<es-release-notes>>.
|
||||||
|
|
||||||
coming[7.1.0]
|
|
||||||
|
|
||||||
//NOTE: The notable-breaking-changes tagged regions are re-used in the
|
//NOTE: The notable-breaking-changes tagged regions are re-used in the
|
||||||
//Installation and Upgrade Guide
|
//Installation and Upgrade Guide
|
||||||
|
|
||||||
|
|
|
@ -63,7 +63,7 @@ PUT _ml/datafeeds/datafeed-farequote
|
||||||
"buckets": {
|
"buckets": {
|
||||||
"date_histogram": {
|
"date_histogram": {
|
||||||
"field": "time",
|
"field": "time",
|
||||||
"interval": "360s",
|
"fixed_interval": "360s",
|
||||||
"time_zone": "UTC"
|
"time_zone": "UTC"
|
||||||
},
|
},
|
||||||
"aggregations": {
|
"aggregations": {
|
||||||
|
@ -119,7 +119,7 @@ pipeline aggregation to find the first order derivative of the counter
|
||||||
"buckets": {
|
"buckets": {
|
||||||
"date_histogram": {
|
"date_histogram": {
|
||||||
"field": "@timestamp",
|
"field": "@timestamp",
|
||||||
"interval": "5m"
|
"fixed_interval": "5m"
|
||||||
},
|
},
|
||||||
"aggregations": {
|
"aggregations": {
|
||||||
"@timestamp": {
|
"@timestamp": {
|
||||||
|
|
|
@ -74,10 +74,18 @@ to be the most efficient by using the internal mechanisms.
|
||||||
|
|
||||||
[[vector-functions]]
|
[[vector-functions]]
|
||||||
===== Functions for vector fields
|
===== Functions for vector fields
|
||||||
|
|
||||||
|
experimental[]
|
||||||
|
|
||||||
These functions are used for
|
These functions are used for
|
||||||
for <<dense-vector,`dense_vector`>> and
|
for <<dense-vector,`dense_vector`>> and
|
||||||
<<sparse-vector,`sparse_vector`>> fields.
|
<<sparse-vector,`sparse_vector`>> fields.
|
||||||
|
|
||||||
|
NOTE: During vector functions' calculation, all matched documents are
|
||||||
|
linearly scanned. Thus, expect the query time grow linearly
|
||||||
|
with the number of matched documents. For this reason, we recommend
|
||||||
|
to limit the number of matched documents with a `query` parameter.
|
||||||
|
|
||||||
For dense_vector fields, `cosineSimilarity` calculates the measure of
|
For dense_vector fields, `cosineSimilarity` calculates the measure of
|
||||||
cosine similarity between a given query vector and document vectors.
|
cosine similarity between a given query vector and document vectors.
|
||||||
|
|
||||||
|
|
|
@ -16,8 +16,8 @@ This section summarizes the changes in each release.
|
||||||
|
|
||||||
--
|
--
|
||||||
|
|
||||||
include::release-notes/7.1.0.asciidoc[]
|
include::release-notes/7.1.asciidoc[]
|
||||||
include::release-notes/7.0.0.asciidoc[]
|
include::release-notes/7.0.asciidoc[]
|
||||||
include::release-notes/7.0.0-rc2.asciidoc[]
|
include::release-notes/7.0.0-rc2.asciidoc[]
|
||||||
include::release-notes/7.0.0-rc1.asciidoc[]
|
include::release-notes/7.0.0-rc1.asciidoc[]
|
||||||
include::release-notes/7.0.0-beta1.asciidoc[]
|
include::release-notes/7.0.0-beta1.asciidoc[]
|
||||||
|
|
|
@ -1,52 +0,0 @@
|
||||||
////
|
|
||||||
// To add a release, copy and paste the following text, uncomment the relevant
|
|
||||||
// sections, and add a link to the new section in the list of releases in
|
|
||||||
// ../release-notes.asciidoc. Note that release subheads must be floated and
|
|
||||||
// sections cannot be empty.
|
|
||||||
// TEMPLATE
|
|
||||||
|
|
||||||
// [[release-notes-n.n.n]]
|
|
||||||
// == {es} version n.n.n
|
|
||||||
|
|
||||||
// coming[n.n.n]
|
|
||||||
|
|
||||||
// Also see <<breaking-changes-n.n>>.
|
|
||||||
|
|
||||||
// [float]
|
|
||||||
// [[breaking-n.n.n]]
|
|
||||||
// === Breaking Changes
|
|
||||||
|
|
||||||
// [float]
|
|
||||||
// [[breaking-java-n.n.n]]
|
|
||||||
// === Breaking Java Changes
|
|
||||||
|
|
||||||
// [float]
|
|
||||||
// [[deprecation-n.n.n]]
|
|
||||||
// === Deprecations
|
|
||||||
|
|
||||||
// [float]
|
|
||||||
// [[feature-n.n.n]]
|
|
||||||
// === New Features
|
|
||||||
|
|
||||||
// [float]
|
|
||||||
// [[enhancement-n.n.n]]
|
|
||||||
// === Enhancements
|
|
||||||
|
|
||||||
// [float]
|
|
||||||
// [[bug-n.n.n]]
|
|
||||||
// === Bug Fixes
|
|
||||||
|
|
||||||
// [float]
|
|
||||||
// [[regression-n.n.n]]
|
|
||||||
// === Regressions
|
|
||||||
|
|
||||||
// [float]
|
|
||||||
// === Known Issues
|
|
||||||
////
|
|
||||||
|
|
||||||
[[release-notes-7.1.0]]
|
|
||||||
== {es} version 7.1.0
|
|
||||||
|
|
||||||
Also see <<breaking-changes-7.1,Breaking changes in 7.1>>.
|
|
||||||
|
|
||||||
coming[7.1.0]
|
|
|
@ -0,0 +1,45 @@
|
||||||
|
[[release-notes-7.1.0]]
|
||||||
|
== {es} version 7.1.0
|
||||||
|
|
||||||
|
Also see <<breaking-changes-7.1,Breaking changes in 7.1>>.
|
||||||
|
|
||||||
|
[[enhancement-7.1.0]]
|
||||||
|
[float]
|
||||||
|
=== Enhancements
|
||||||
|
|
||||||
|
Security::
|
||||||
|
* Moved some security features to basic. See <<release-highlights-7.1.0, 7.1.0 Release highlights>>
|
||||||
|
|
||||||
|
Authentication::
|
||||||
|
* Log warning when unlicensed realms are skipped {pull}41778[#41778]
|
||||||
|
|
||||||
|
Infra/Settings::
|
||||||
|
* Drop distinction in entries for keystore {pull}41701[#41701]
|
||||||
|
|
||||||
|
|
||||||
|
[[bug-7.1.0]]
|
||||||
|
[float]
|
||||||
|
=== Bug fixes
|
||||||
|
|
||||||
|
Cluster Coordination::
|
||||||
|
* Handle serialization exceptions during publication {pull}41781[#41781] (issue: {issue}41090[#41090])
|
||||||
|
|
||||||
|
Infra/Core::
|
||||||
|
* Fix fractional seconds for strict_date_optional_time {pull}41871[#41871] (issue: {issue}41633[#41633])
|
||||||
|
|
||||||
|
Network::
|
||||||
|
* Enforce transport TLS on Basic with Security {pull}42150[#42150]
|
||||||
|
|
||||||
|
Reindex::
|
||||||
|
* Allow reindexing into write alias {pull}41677[#41677] (issue: {issue}41667[#41667])
|
||||||
|
|
||||||
|
SQL::
|
||||||
|
* SQL: Fix issue regarding INTERVAL * number {pull}42014[#42014] (issue: {issue}41239[#41239])
|
||||||
|
* SQL: Remove CircuitBreaker from parser {pull}41835[#41835] (issue: {issue}41471[#41471])
|
||||||
|
|
||||||
|
Search::
|
||||||
|
* Fix IAE on cross_fields query introduced in 7.0.1 {pull}41938[#41938] (issues: {issue}41125[#41125], {issue}41934[#41934])
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -4,11 +4,37 @@
|
||||||
<titleabbrev>7.1.0</titleabbrev>
|
<titleabbrev>7.1.0</titleabbrev>
|
||||||
++++
|
++++
|
||||||
|
|
||||||
coming[7.1.0]
|
See also <<release-notes-7.1.0,{es} 7.1.0 release notes>>.
|
||||||
|
|
||||||
//NOTE: The notable-highlights tagged regions are re-used in the
|
|
||||||
//Installation and Upgrade Guide
|
|
||||||
|
|
||||||
//tag::notable-highlights[]
|
//tag::notable-highlights[]
|
||||||
|
[float]
|
||||||
|
==== TLS is now licensed under the Elastic Basic license
|
||||||
|
|
||||||
|
Transport Layer Security (TLS), commonly referred to as SSL, is now
|
||||||
|
licensed under the free-of-charge Elastic Basic license. Previously, this security feature
|
||||||
|
required a paid Gold-tier subscription. With the default distribution,
|
||||||
|
you can now encrypt all Elasticsearch communication, within a cluster and across remotes
|
||||||
|
clusters. Download https://www.elastic.co/downloads/elasticsearch[Elasticsearch],
|
||||||
|
https://www.elastic.co/guide/en/elasticsearch/reference/7.1/configuring-tls.html[configure TLS],
|
||||||
|
and run your cluster in production, knowing all Elasticsearch communication is safely encrypted.
|
||||||
|
For details, see https://www.elastic.co/subscriptions
|
||||||
|
//end::notable-highlights[]
|
||||||
|
|
||||||
|
//tag::notable-highlights[]
|
||||||
|
[float]
|
||||||
|
==== RBAC is now licensed under the Elastic Basic license
|
||||||
|
|
||||||
|
RBAC (Role Based Access Control) is now licenced under the free-of-charge Elastic Basic licence.
|
||||||
|
Previously, this security feature required a paid Gold-tier subscription.
|
||||||
|
With the default distribution you can take advantage of RBAC by configuring users, groups, roles
|
||||||
|
and permissions for any user from the
|
||||||
|
https://www.elastic.co/guide/en/elasticsearch/reference/7.1/configuring-file-realm.html[file realm]
|
||||||
|
or the https://www.elastic.co/guide/en/elasticsearch/reference/7.1/configuring-native-realm.html[native realm]
|
||||||
|
. Download https://www.elastic.co/downloads/elasticsearch[Elasticsearch],
|
||||||
|
https://www.elastic.co/guide/en/elastic-stack-overview/7.1/authorization.html[configure RBAC],
|
||||||
|
and run your cluster in production, knowing your private data stays private.
|
||||||
|
Note that our advanced security features, such as single sign-on and Active Directory/LDAP
|
||||||
|
authentication to field-level and document-level security, remain paid features.
|
||||||
|
For details, see https://www.elastic.co/subscriptions
|
||||||
|
|
||||||
//end::notable-highlights[]
|
//end::notable-highlights[]
|
||||||
|
|
|
@ -63,7 +63,7 @@ Which will yield the following response:
|
||||||
"cron" : "*/30 * * * * ?",
|
"cron" : "*/30 * * * * ?",
|
||||||
"groups" : {
|
"groups" : {
|
||||||
"date_histogram" : {
|
"date_histogram" : {
|
||||||
"interval" : "1h",
|
"fixed_interval" : "1h",
|
||||||
"delay": "7d",
|
"delay": "7d",
|
||||||
"field": "timestamp",
|
"field": "timestamp",
|
||||||
"time_zone": "UTC"
|
"time_zone": "UTC"
|
||||||
|
@ -149,7 +149,7 @@ PUT _rollup/job/sensor2 <1>
|
||||||
"groups" : {
|
"groups" : {
|
||||||
"date_histogram": {
|
"date_histogram": {
|
||||||
"field": "timestamp",
|
"field": "timestamp",
|
||||||
"interval": "1h",
|
"fixed_interval": "1h",
|
||||||
"delay": "7d"
|
"delay": "7d"
|
||||||
},
|
},
|
||||||
"terms": {
|
"terms": {
|
||||||
|
@ -189,7 +189,7 @@ Which will yield the following response:
|
||||||
"cron" : "*/30 * * * * ?",
|
"cron" : "*/30 * * * * ?",
|
||||||
"groups" : {
|
"groups" : {
|
||||||
"date_histogram" : {
|
"date_histogram" : {
|
||||||
"interval" : "1h",
|
"fixed_interval" : "1h",
|
||||||
"delay": "7d",
|
"delay": "7d",
|
||||||
"field": "timestamp",
|
"field": "timestamp",
|
||||||
"time_zone": "UTC"
|
"time_zone": "UTC"
|
||||||
|
@ -244,7 +244,7 @@ Which will yield the following response:
|
||||||
"cron" : "*/30 * * * * ?",
|
"cron" : "*/30 * * * * ?",
|
||||||
"groups" : {
|
"groups" : {
|
||||||
"date_histogram" : {
|
"date_histogram" : {
|
||||||
"interval" : "1h",
|
"fixed_interval" : "1h",
|
||||||
"delay": "7d",
|
"delay": "7d",
|
||||||
"field": "timestamp",
|
"field": "timestamp",
|
||||||
"time_zone": "UTC"
|
"time_zone": "UTC"
|
||||||
|
|
|
@ -68,7 +68,7 @@ PUT _rollup/job/sensor
|
||||||
"groups" : {
|
"groups" : {
|
||||||
"date_histogram": {
|
"date_histogram": {
|
||||||
"field": "timestamp",
|
"field": "timestamp",
|
||||||
"interval": "1h",
|
"fixed_interval": "1h",
|
||||||
"delay": "7d"
|
"delay": "7d"
|
||||||
},
|
},
|
||||||
"terms": {
|
"terms": {
|
||||||
|
|
|
@ -62,7 +62,7 @@ PUT _rollup/job/sensor
|
||||||
"groups" : {
|
"groups" : {
|
||||||
"date_histogram": {
|
"date_histogram": {
|
||||||
"field": "timestamp",
|
"field": "timestamp",
|
||||||
"interval": "1h",
|
"fixed_interval": "1h",
|
||||||
"delay": "7d"
|
"delay": "7d"
|
||||||
},
|
},
|
||||||
"terms": {
|
"terms": {
|
||||||
|
@ -125,7 +125,7 @@ Which will yield the following response:
|
||||||
{
|
{
|
||||||
"agg" : "date_histogram",
|
"agg" : "date_histogram",
|
||||||
"time_zone" : "UTC",
|
"time_zone" : "UTC",
|
||||||
"interval" : "1h",
|
"fixed_interval" : "1h",
|
||||||
"delay": "7d"
|
"delay": "7d"
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
|
|
|
@ -53,7 +53,7 @@ PUT _rollup/job/sensor
|
||||||
"groups" : {
|
"groups" : {
|
||||||
"date_histogram": {
|
"date_histogram": {
|
||||||
"field": "timestamp",
|
"field": "timestamp",
|
||||||
"interval": "1h",
|
"fixed_interval": "1h",
|
||||||
"delay": "7d"
|
"delay": "7d"
|
||||||
},
|
},
|
||||||
"terms": {
|
"terms": {
|
||||||
|
@ -118,7 +118,7 @@ This will yield the following response:
|
||||||
{
|
{
|
||||||
"agg" : "date_histogram",
|
"agg" : "date_histogram",
|
||||||
"time_zone" : "UTC",
|
"time_zone" : "UTC",
|
||||||
"interval" : "1h",
|
"fixed_interval" : "1h",
|
||||||
"delay": "7d"
|
"delay": "7d"
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
|
|
|
@ -24,7 +24,7 @@ PUT _rollup/job/sensor
|
||||||
"groups" : {
|
"groups" : {
|
||||||
"date_histogram": {
|
"date_histogram": {
|
||||||
"field": "timestamp",
|
"field": "timestamp",
|
||||||
"interval": "60m",
|
"fixed_interval": "60m",
|
||||||
"delay": "7d"
|
"delay": "7d"
|
||||||
},
|
},
|
||||||
"terms": {
|
"terms": {
|
||||||
|
@ -100,7 +100,7 @@ fields will then be available later for aggregating into buckets. For example,
|
||||||
"groups" : {
|
"groups" : {
|
||||||
"date_histogram": {
|
"date_histogram": {
|
||||||
"field": "timestamp",
|
"field": "timestamp",
|
||||||
"interval": "60m",
|
"fixed_interval": "60m",
|
||||||
"delay": "7d"
|
"delay": "7d"
|
||||||
},
|
},
|
||||||
"terms": {
|
"terms": {
|
||||||
|
|
|
@ -62,7 +62,7 @@ PUT _rollup/job/sensor
|
||||||
"groups" : {
|
"groups" : {
|
||||||
"date_histogram": {
|
"date_histogram": {
|
||||||
"field": "timestamp",
|
"field": "timestamp",
|
||||||
"interval": "1h",
|
"fixed_interval": "1h",
|
||||||
"delay": "7d"
|
"delay": "7d"
|
||||||
},
|
},
|
||||||
"terms": {
|
"terms": {
|
||||||
|
|
|
@ -39,7 +39,7 @@ PUT _rollup/job/sensor
|
||||||
"groups" : {
|
"groups" : {
|
||||||
"date_histogram": {
|
"date_histogram": {
|
||||||
"field": "timestamp",
|
"field": "timestamp",
|
||||||
"interval": "60m"
|
"fixed_interval": "60m"
|
||||||
},
|
},
|
||||||
"terms": {
|
"terms": {
|
||||||
"fields": ["node"]
|
"fields": ["node"]
|
||||||
|
@ -194,7 +194,7 @@ GET /sensor_rollup/_rollup_search
|
||||||
"timeline": {
|
"timeline": {
|
||||||
"date_histogram": {
|
"date_histogram": {
|
||||||
"field": "timestamp",
|
"field": "timestamp",
|
||||||
"interval": "7d"
|
"fixed_interval": "7d"
|
||||||
},
|
},
|
||||||
"aggs": {
|
"aggs": {
|
||||||
"nodes": {
|
"nodes": {
|
||||||
|
|
|
@ -22,7 +22,7 @@ based on which groups are potentially useful to future queries. For example, th
|
||||||
"groups" : {
|
"groups" : {
|
||||||
"date_histogram": {
|
"date_histogram": {
|
||||||
"field": "timestamp",
|
"field": "timestamp",
|
||||||
"interval": "1h",
|
"fixed_interval": "1h",
|
||||||
"delay": "7d"
|
"delay": "7d"
|
||||||
},
|
},
|
||||||
"terms": {
|
"terms": {
|
||||||
|
@ -47,7 +47,7 @@ Importantly, these aggs/fields can be used in any combination. This aggregation
|
||||||
"hourly": {
|
"hourly": {
|
||||||
"date_histogram": {
|
"date_histogram": {
|
||||||
"field": "timestamp",
|
"field": "timestamp",
|
||||||
"interval": "1h"
|
"fixed_interval": "1h"
|
||||||
},
|
},
|
||||||
"aggs": {
|
"aggs": {
|
||||||
"host_names": {
|
"host_names": {
|
||||||
|
@ -69,7 +69,7 @@ is just as valid as this aggregation:
|
||||||
"hourly": {
|
"hourly": {
|
||||||
"date_histogram": {
|
"date_histogram": {
|
||||||
"field": "timestamp",
|
"field": "timestamp",
|
||||||
"interval": "1h"
|
"fixed_interval": "1h"
|
||||||
},
|
},
|
||||||
"aggs": {
|
"aggs": {
|
||||||
"data_center": {
|
"data_center": {
|
||||||
|
@ -171,7 +171,7 @@ PUT _rollup/job/combined
|
||||||
"groups" : {
|
"groups" : {
|
||||||
"date_histogram": {
|
"date_histogram": {
|
||||||
"field": "timestamp",
|
"field": "timestamp",
|
||||||
"interval": "1h",
|
"fixed_interval": "1h",
|
||||||
"delay": "7d"
|
"delay": "7d"
|
||||||
},
|
},
|
||||||
"terms": {
|
"terms": {
|
||||||
|
|
|
@ -2,10 +2,8 @@
|
||||||
[[configuring-tls-docker]]
|
[[configuring-tls-docker]]
|
||||||
=== Encrypting communications in an {es} Docker Container
|
=== Encrypting communications in an {es} Docker Container
|
||||||
|
|
||||||
Starting with version 6.0.0, {stack} {security-features}
|
Unless you are using a trial license, {stack} {security-features} require
|
||||||
(Gold, Platinum or Enterprise subscriptions)
|
SSL/TLS encryption for the transport networking layer.
|
||||||
https://www.elastic.co/guide/en/elasticsearch/reference/6.0/breaking-6.0.0-xes.html[require SSL/TLS]
|
|
||||||
encryption for the transport networking layer.
|
|
||||||
|
|
||||||
This section demonstrates an easy path to get started with SSL/TLS for both
|
This section demonstrates an easy path to get started with SSL/TLS for both
|
||||||
HTTPS and transport using the {es} Docker image. The example uses
|
HTTPS and transport using the {es} Docker image. The example uses
|
||||||
|
|
|
@ -7,8 +7,8 @@ your {es} cluster. Connections are secured using Transport Layer Security
|
||||||
(TLS/SSL).
|
(TLS/SSL).
|
||||||
|
|
||||||
WARNING: Clusters that do not have encryption enabled send all data in plain text
|
WARNING: Clusters that do not have encryption enabled send all data in plain text
|
||||||
including passwords and will not be able to install a license that enables
|
including passwords. If the {es} {security-features} are enabled, unless you
|
||||||
{security-features}.
|
have a trial license, you must configure SSL/TLS for internode-communication.
|
||||||
|
|
||||||
To enable encryption, you need to perform the following steps on each node in
|
To enable encryption, you need to perform the following steps on each node in
|
||||||
the cluster:
|
the cluster:
|
||||||
|
|
|
@ -1,16 +1,15 @@
|
||||||
[[ssl-tls]]
|
[[ssl-tls]]
|
||||||
=== Setting Up TLS on a cluster
|
=== Setting up TLS on a cluster
|
||||||
|
|
||||||
The {stack} {security-features} enables you to encrypt traffic to, from, and
|
The {stack} {security-features} enable you to encrypt traffic to, from, and
|
||||||
within your {es} cluster. Connections are secured using Transport Layer Security
|
within your {es} cluster. Connections are secured using Transport Layer Security
|
||||||
(TLS), which is commonly referred to as "SSL".
|
(TLS), which is commonly referred to as "SSL".
|
||||||
|
|
||||||
WARNING: Clusters that do not have encryption enabled send all data in plain text
|
WARNING: Clusters that do not have encryption enabled send all data in plain text
|
||||||
including passwords and will not be able to install a license that enables
|
including passwords. If the {es} {security-features} are enabled, unless you have a trial license, you must configure SSL/TLS for internode-communication.
|
||||||
{security-features}.
|
|
||||||
|
|
||||||
The following steps describe how to enable encryption across the various
|
The following steps describe how to enable encryption across the various
|
||||||
components of the Elastic Stack. You must perform each of the steps that are
|
components of the {stack}. You must perform each of the steps that are
|
||||||
applicable to your cluster.
|
applicable to your cluster.
|
||||||
|
|
||||||
. Generate a private key and X.509 certificate for each of your {es} nodes. See
|
. Generate a private key and X.509 certificate for each of your {es} nodes. See
|
||||||
|
@ -22,14 +21,14 @@ enable TLS on the HTTP layer. See
|
||||||
{ref}/configuring-tls.html#tls-transport[Encrypting Communications Between Nodes in a Cluster] and
|
{ref}/configuring-tls.html#tls-transport[Encrypting Communications Between Nodes in a Cluster] and
|
||||||
{ref}/configuring-tls.html#tls-http[Encrypting HTTP Client Communications].
|
{ref}/configuring-tls.html#tls-http[Encrypting HTTP Client Communications].
|
||||||
|
|
||||||
. Configure {monitoring} to use encrypted connections. See <<secure-monitoring>>.
|
. Configure the {monitor-features} to use encrypted connections. See <<secure-monitoring>>.
|
||||||
|
|
||||||
. Configure {kib} to encrypt communications between the browser and
|
. Configure {kib} to encrypt communications between the browser and
|
||||||
the {kib} server and to connect to {es} via HTTPS. See
|
the {kib} server and to connect to {es} via HTTPS. See
|
||||||
{kibana-ref}/using-kibana-with-security.html[Configuring Security in {kib}].
|
{kibana-ref}/using-kibana-with-security.html[Configuring security in {kib}].
|
||||||
|
|
||||||
. Configure Logstash to use TLS encryption. See
|
. Configure Logstash to use TLS encryption. See
|
||||||
{logstash-ref}/ls-security.html[Configuring Security in Logstash].
|
{logstash-ref}/ls-security.html[Configuring security in {ls}].
|
||||||
|
|
||||||
. Configure Beats to use encrypted connections. See <<beats>>.
|
. Configure Beats to use encrypted connections. See <<beats>>.
|
||||||
|
|
||||||
|
|
|
@ -1526,13 +1526,30 @@ Controls the verification of certificates. Valid values are:
|
||||||
The default value is `full`.
|
The default value is `full`.
|
||||||
|
|
||||||
`*.ssl.cipher_suites`::
|
`*.ssl.cipher_suites`::
|
||||||
Supported cipher suites can be found in Oracle's http://docs.oracle.com/javase/8/docs/technotes/guides/security/SunProviders.html[
|
Supported cipher suites can be found in Oracle's
|
||||||
Java Cryptography Architecture documentation]. Defaults to `TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256`,
|
https://docs.oracle.com/en/java/javase/11/security/oracle-providers.html#GUID-7093246A-31A3-4304-AC5F-5FB6400405E2[Java
|
||||||
`TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256`, `TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA`, `TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA`,
|
Cryptography Architecture documentation].
|
||||||
`TLS_RSA_WITH_AES_128_CBC_SHA256`, `TLS_RSA_WITH_AES_128_CBC_SHA`. If the _Java Cryptography Extension (JCE) Unlimited Strength
|
Defaults to `TLS_AES_256_GCM_SHA384`, `TLS_AES_128_GCM_SHA256`,
|
||||||
Jurisdiction Policy Files_ has been installed, the default value also includes `TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384`,
|
`TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384`, `TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256`,
|
||||||
`TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384`, `TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA`, `TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA`,
|
`TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384`, `TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256`,
|
||||||
`TLS_RSA_WITH_AES_256_CBC_SHA256`, `TLS_RSA_WITH_AES_256_CBC_SHA`.
|
`TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384`, `TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256`,
|
||||||
|
`TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384`, `TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256`,
|
||||||
|
`TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA`, `TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA`,
|
||||||
|
`TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA`, `TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA`,
|
||||||
|
`TLS_RSA_WITH_AES_256_GCM_SHA384`, `TLS_RSA_WITH_AES_128_GCM_SHA256`,
|
||||||
|
`TLS_RSA_WITH_AES_256_CBC_SHA256`, `TLS_RSA_WITH_AES_128_CBC_SHA256`,
|
||||||
|
`TLS_RSA_WITH_AES_256_CBC_SHA`, `TLS_RSA_WITH_AES_128_CBC_SHA`.
|
||||||
|
+
|
||||||
|
--
|
||||||
|
NOTE: The default cipher suites list above includes TLSv1.3 ciphers and ciphers
|
||||||
|
that require the _Java Cryptography Extension (JCE) Unlimited Strength
|
||||||
|
Jurisdiction Policy Files_ for 256-bit AES encryption. If TLSv1.3 is not
|
||||||
|
available, the TLSv1.3 ciphers TLS_AES_256_GCM_SHA384`, `TLS_AES_128_GCM_SHA256`
|
||||||
|
will not be included in the default list. If 256-bit AES is unavailable, ciphers
|
||||||
|
with `AES_256` in their names wil not be included in the default list. Finally,
|
||||||
|
AES GCM has known performance issues in Java versions prior to 11 and will only
|
||||||
|
be included in the default list when using Java 11 or above.
|
||||||
|
--
|
||||||
|
|
||||||
[float]
|
[float]
|
||||||
[[tls-ssl-key-settings]]
|
[[tls-ssl-key-settings]]
|
||||||
|
|
|
@ -53,9 +53,8 @@ must also be valid.
|
||||||
=== SSL/TLS check
|
=== SSL/TLS check
|
||||||
//See TLSLicenseBootstrapCheck.java
|
//See TLSLicenseBootstrapCheck.java
|
||||||
|
|
||||||
In 6.0 and later releases, if you have a gold, platinum, or enterprise license
|
If you enable {es} {security-features}, unless you have a trial license, you
|
||||||
and {es} {security-features} are enabled, you must configure SSL/TLS for
|
must configure SSL/TLS for internode-communication.
|
||||||
internode-communication.
|
|
||||||
|
|
||||||
NOTE: Single-node clusters that use a loopback interface do not have this
|
NOTE: Single-node clusters that use a loopback interface do not have this
|
||||||
requirement. For more information, see
|
requirement. For more information, see
|
||||||
|
|
|
@ -7,8 +7,8 @@
|
||||||
process so upgrading does not interrupt service. Rolling upgrades are supported:
|
process so upgrading does not interrupt service. Rolling upgrades are supported:
|
||||||
|
|
||||||
* Between minor versions
|
* Between minor versions
|
||||||
* From 5.6 to 6.7
|
* From 5.6 to 6.8
|
||||||
* From 6.7 to {version}
|
* From 6.8 to {version}
|
||||||
|
|
||||||
{es} can read indices created in the previous major version. If you
|
{es} can read indices created in the previous major version. If you
|
||||||
have indices created in 5.x or before, you must reindex or delete them
|
have indices created in 5.x or before, you must reindex or delete them
|
||||||
|
@ -21,7 +21,7 @@ When upgrading to a new version of {es}, you need to upgrade each
|
||||||
of the products in your Elastic Stack. For more information, see the
|
of the products in your Elastic Stack. For more information, see the
|
||||||
{stack-ref}/upgrading-elastic-stack.html[Elastic Stack Installation and Upgrade Guide].
|
{stack-ref}/upgrading-elastic-stack.html[Elastic Stack Installation and Upgrade Guide].
|
||||||
|
|
||||||
To upgrade directly to {version} from 6.6 or earlier, you must shut down the
|
To upgrade directly to {version} from 6.7 or earlier, you must shut down the
|
||||||
cluster, install {version}, and restart. For more information, see
|
cluster, install {version}, and restart. For more information, see
|
||||||
<<restart-upgrade, Full cluster restart upgrade>>.
|
<<restart-upgrade, Full cluster restart upgrade>>.
|
||||||
|
|
||||||
|
|
|
@ -1,11 +1,11 @@
|
||||||
[[restart-upgrade]]
|
[[restart-upgrade]]
|
||||||
== Full cluster restart upgrade
|
== Full cluster restart upgrade
|
||||||
|
|
||||||
To upgrade directly to {es} {version} from versions 6.0-6.6, you must shut down
|
To upgrade directly to {es} {version} from versions 6.0-6.7, you must shut down
|
||||||
all nodes in the cluster, upgrade each node to {version}, and restart the cluster.
|
all nodes in the cluster, upgrade each node to {version}, and restart the cluster.
|
||||||
|
|
||||||
NOTE: If you are running a version prior to 6.0,
|
NOTE: If you are running a version prior to 6.0,
|
||||||
https://www.elastic.co/guide/en/elastic-stack/6.7/upgrading-elastic-stack.html[upgrade to 6.7]
|
https://www.elastic.co/guide/en/elastic-stack/6.8/upgrading-elastic-stack.html[upgrade to 6.8]
|
||||||
and reindex your old indices or bring up a new {version} cluster and
|
and reindex your old indices or bring up a new {version} cluster and
|
||||||
<<reindex-upgrade-remote, reindex from remote>>.
|
<<reindex-upgrade-remote, reindex from remote>>.
|
||||||
|
|
||||||
|
|
|
@ -10,13 +10,13 @@ running the older version.
|
||||||
Rolling upgrades are supported:
|
Rolling upgrades are supported:
|
||||||
|
|
||||||
* Between minor versions
|
* Between minor versions
|
||||||
* https://www.elastic.co/guide/en/elastic-stack/6.7/upgrading-elastic-stack.html[From 5.6 to 6.7]
|
* https://www.elastic.co/guide/en/elastic-stack/6.8/upgrading-elastic-stack.html[From 5.6 to 6.8]
|
||||||
* From 6.7 to {version}
|
* From 6.8 to {version}
|
||||||
|
|
||||||
Upgrading directly to {version} from 6.6 or earlier requires a
|
Upgrading directly to {version} from 6.7 or earlier requires a
|
||||||
<<restart-upgrade, full cluster restart>>.
|
<<restart-upgrade, full cluster restart>>.
|
||||||
|
|
||||||
To perform a rolling upgrade from 6.7 to {version}:
|
To perform a rolling upgrade from 6.8 to {version}:
|
||||||
|
|
||||||
. *Disable shard allocation*.
|
. *Disable shard allocation*.
|
||||||
+
|
+
|
||||||
|
|
|
@ -19,6 +19,8 @@
|
||||||
|
|
||||||
package org.elasticsearch.common.ssl;
|
package org.elasticsearch.common.ssl;
|
||||||
|
|
||||||
|
import org.elasticsearch.bootstrap.JavaVersion;
|
||||||
|
|
||||||
import javax.crypto.Cipher;
|
import javax.crypto.Cipher;
|
||||||
import javax.net.ssl.KeyManagerFactory;
|
import javax.net.ssl.KeyManagerFactory;
|
||||||
import javax.net.ssl.TrustManagerFactory;
|
import javax.net.ssl.TrustManagerFactory;
|
||||||
|
@ -338,30 +340,53 @@ public abstract class SslConfigurationLoader {
|
||||||
}
|
}
|
||||||
|
|
||||||
private static List<String> loadDefaultCiphers() {
|
private static List<String> loadDefaultCiphers() {
|
||||||
final List<String> ciphers128 = Arrays.asList(
|
final boolean has256BitAES = has256BitAES();
|
||||||
"TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256",
|
final boolean useGCM = JavaVersion.current().compareTo(JavaVersion.parse("11")) >= 0;
|
||||||
"TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256",
|
final boolean tlsV13Supported = DEFAULT_PROTOCOLS.contains("TLSv1.3");
|
||||||
"TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA",
|
List<String> ciphers = new ArrayList<>();
|
||||||
"TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA",
|
if (tlsV13Supported) { // TLSv1.3 cipher has PFS, AEAD, hardware support
|
||||||
"TLS_RSA_WITH_AES_128_CBC_SHA256",
|
if (has256BitAES) {
|
||||||
"TLS_RSA_WITH_AES_128_CBC_SHA"
|
ciphers.add("TLS_AES_256_GCM_SHA384");
|
||||||
);
|
|
||||||
final List<String> ciphers256 = Arrays.asList(
|
|
||||||
"TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384",
|
|
||||||
"TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384",
|
|
||||||
"TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA",
|
|
||||||
"TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA",
|
|
||||||
"TLS_RSA_WITH_AES_256_CBC_SHA256",
|
|
||||||
"TLS_RSA_WITH_AES_256_CBC_SHA"
|
|
||||||
);
|
|
||||||
if (has256BitAES()) {
|
|
||||||
List<String> ciphers = new ArrayList<>(ciphers256.size() + ciphers128.size());
|
|
||||||
ciphers.addAll(ciphers256);
|
|
||||||
ciphers.addAll(ciphers128);
|
|
||||||
return ciphers;
|
|
||||||
} else {
|
|
||||||
return ciphers128;
|
|
||||||
}
|
}
|
||||||
|
ciphers.add("TLS_AES_128_GCM_SHA256");
|
||||||
|
}
|
||||||
|
if (useGCM) { // PFS, AEAD, hardware support
|
||||||
|
if (has256BitAES) {
|
||||||
|
ciphers.addAll(Arrays.asList("TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256",
|
||||||
|
"TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256"));
|
||||||
|
} else {
|
||||||
|
ciphers.addAll(Arrays.asList("TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256"));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// PFS, hardware support
|
||||||
|
if (has256BitAES) {
|
||||||
|
ciphers.addAll(Arrays.asList("TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384", "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256",
|
||||||
|
"TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384", "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256",
|
||||||
|
"TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA", "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA",
|
||||||
|
"TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA", "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA"));
|
||||||
|
} else {
|
||||||
|
ciphers.addAll(Arrays.asList("TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256", "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256",
|
||||||
|
"TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA", "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA"));
|
||||||
|
}
|
||||||
|
|
||||||
|
// AEAD, hardware support
|
||||||
|
if (useGCM) {
|
||||||
|
if (has256BitAES) {
|
||||||
|
ciphers.addAll(Arrays.asList("TLS_RSA_WITH_AES_256_GCM_SHA384", "TLS_RSA_WITH_AES_128_GCM_SHA256"));
|
||||||
|
} else {
|
||||||
|
ciphers.add("TLS_RSA_WITH_AES_128_GCM_SHA256");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// hardware support
|
||||||
|
if (has256BitAES) {
|
||||||
|
ciphers.addAll(Arrays.asList("TLS_RSA_WITH_AES_256_CBC_SHA256", "TLS_RSA_WITH_AES_128_CBC_SHA256",
|
||||||
|
"TLS_RSA_WITH_AES_256_CBC_SHA", "TLS_RSA_WITH_AES_128_CBC_SHA"));
|
||||||
|
} else {
|
||||||
|
ciphers.addAll(Arrays.asList("TLS_RSA_WITH_AES_128_CBC_SHA256", "TLS_RSA_WITH_AES_128_CBC_SHA"));
|
||||||
|
}
|
||||||
|
return ciphers;
|
||||||
}
|
}
|
||||||
|
|
||||||
private static boolean has256BitAES() {
|
private static boolean has256BitAES() {
|
||||||
|
|
|
@ -66,7 +66,7 @@ setup:
|
||||||
the_histo:
|
the_histo:
|
||||||
date_histogram:
|
date_histogram:
|
||||||
field: "date"
|
field: "date"
|
||||||
interval: "1d"
|
calendar_interval: "1d"
|
||||||
aggs:
|
aggs:
|
||||||
the_avg:
|
the_avg:
|
||||||
avg:
|
avg:
|
||||||
|
@ -98,7 +98,7 @@ setup:
|
||||||
the_histo:
|
the_histo:
|
||||||
date_histogram:
|
date_histogram:
|
||||||
field: "date"
|
field: "date"
|
||||||
interval: "1d"
|
calendar_interval: "1d"
|
||||||
aggs:
|
aggs:
|
||||||
the_avg:
|
the_avg:
|
||||||
avg:
|
avg:
|
||||||
|
@ -130,7 +130,7 @@ setup:
|
||||||
the_histo:
|
the_histo:
|
||||||
date_histogram:
|
date_histogram:
|
||||||
field: "date"
|
field: "date"
|
||||||
interval: "1d"
|
calendar_interval: "1d"
|
||||||
aggs:
|
aggs:
|
||||||
the_avg:
|
the_avg:
|
||||||
avg:
|
avg:
|
||||||
|
@ -162,7 +162,7 @@ setup:
|
||||||
the_histo:
|
the_histo:
|
||||||
date_histogram:
|
date_histogram:
|
||||||
field: "date"
|
field: "date"
|
||||||
interval: "1d"
|
calendar_interval: "1d"
|
||||||
aggs:
|
aggs:
|
||||||
the_avg:
|
the_avg:
|
||||||
avg:
|
avg:
|
||||||
|
@ -189,7 +189,7 @@ setup:
|
||||||
the_histo:
|
the_histo:
|
||||||
date_histogram:
|
date_histogram:
|
||||||
field: "date"
|
field: "date"
|
||||||
interval: "1d"
|
calendar_interval: "1d"
|
||||||
aggs:
|
aggs:
|
||||||
the_avg:
|
the_avg:
|
||||||
avg:
|
avg:
|
||||||
|
@ -216,7 +216,7 @@ setup:
|
||||||
the_histo:
|
the_histo:
|
||||||
date_histogram:
|
date_histogram:
|
||||||
field: "date"
|
field: "date"
|
||||||
interval: "1d"
|
calendar_interval: "1d"
|
||||||
aggs:
|
aggs:
|
||||||
the_avg:
|
the_avg:
|
||||||
avg:
|
avg:
|
||||||
|
@ -243,7 +243,7 @@ setup:
|
||||||
the_histo:
|
the_histo:
|
||||||
date_histogram:
|
date_histogram:
|
||||||
field: "date"
|
field: "date"
|
||||||
interval: "1d"
|
calendar_interval: "1d"
|
||||||
aggs:
|
aggs:
|
||||||
the_avg:
|
the_avg:
|
||||||
avg:
|
avg:
|
||||||
|
@ -270,7 +270,7 @@ setup:
|
||||||
the_histo:
|
the_histo:
|
||||||
date_histogram:
|
date_histogram:
|
||||||
field: "date"
|
field: "date"
|
||||||
interval: "1d"
|
calendar_interval: "1d"
|
||||||
aggs:
|
aggs:
|
||||||
the_avg:
|
the_avg:
|
||||||
avg:
|
avg:
|
||||||
|
@ -296,7 +296,7 @@ setup:
|
||||||
the_histo:
|
the_histo:
|
||||||
date_histogram:
|
date_histogram:
|
||||||
field: "date"
|
field: "date"
|
||||||
interval: "1d"
|
calendar_interval: "1d"
|
||||||
aggs:
|
aggs:
|
||||||
the_avg:
|
the_avg:
|
||||||
avg:
|
avg:
|
||||||
|
|
|
@ -120,6 +120,7 @@ public class ReindexRestClientSslTests extends ESTestCase {
|
||||||
final List<Thread> threads = new ArrayList<>();
|
final List<Thread> threads = new ArrayList<>();
|
||||||
final Settings settings = Settings.builder()
|
final Settings settings = Settings.builder()
|
||||||
.put("path.home", createTempDir())
|
.put("path.home", createTempDir())
|
||||||
|
.put("reindex.ssl.supported_protocols", "TLSv1.2")
|
||||||
.build();
|
.build();
|
||||||
final Environment environment = TestEnvironment.newEnvironment(settings);
|
final Environment environment = TestEnvironment.newEnvironment(settings);
|
||||||
final ReindexSslConfig ssl = new ReindexSslConfig(settings, environment, mock(ResourceWatcherService.class));
|
final ReindexSslConfig ssl = new ReindexSslConfig(settings, environment, mock(ResourceWatcherService.class));
|
||||||
|
@ -134,6 +135,7 @@ public class ReindexRestClientSslTests extends ESTestCase {
|
||||||
final Settings settings = Settings.builder()
|
final Settings settings = Settings.builder()
|
||||||
.put("path.home", createTempDir())
|
.put("path.home", createTempDir())
|
||||||
.putList("reindex.ssl.certificate_authorities", ca.toString())
|
.putList("reindex.ssl.certificate_authorities", ca.toString())
|
||||||
|
.put("reindex.ssl.supported_protocols", "TLSv1.2")
|
||||||
.build();
|
.build();
|
||||||
final Environment environment = TestEnvironment.newEnvironment(settings);
|
final Environment environment = TestEnvironment.newEnvironment(settings);
|
||||||
final ReindexSslConfig ssl = new ReindexSslConfig(settings, environment, mock(ResourceWatcherService.class));
|
final ReindexSslConfig ssl = new ReindexSslConfig(settings, environment, mock(ResourceWatcherService.class));
|
||||||
|
@ -149,6 +151,7 @@ public class ReindexRestClientSslTests extends ESTestCase {
|
||||||
final Settings settings = Settings.builder()
|
final Settings settings = Settings.builder()
|
||||||
.put("path.home", createTempDir())
|
.put("path.home", createTempDir())
|
||||||
.put("reindex.ssl.verification_mode", "NONE")
|
.put("reindex.ssl.verification_mode", "NONE")
|
||||||
|
.put("reindex.ssl.supported_protocols", "TLSv1.2")
|
||||||
.build();
|
.build();
|
||||||
final Environment environment = TestEnvironment.newEnvironment(settings);
|
final Environment environment = TestEnvironment.newEnvironment(settings);
|
||||||
final ReindexSslConfig ssl = new ReindexSslConfig(settings, environment, mock(ResourceWatcherService.class));
|
final ReindexSslConfig ssl = new ReindexSslConfig(settings, environment, mock(ResourceWatcherService.class));
|
||||||
|
@ -169,6 +172,7 @@ public class ReindexRestClientSslTests extends ESTestCase {
|
||||||
.put("reindex.ssl.certificate", cert)
|
.put("reindex.ssl.certificate", cert)
|
||||||
.put("reindex.ssl.key", key)
|
.put("reindex.ssl.key", key)
|
||||||
.put("reindex.ssl.key_passphrase", "client-password")
|
.put("reindex.ssl.key_passphrase", "client-password")
|
||||||
|
.put("reindex.ssl.supported_protocols", "TLSv1.2")
|
||||||
.build();
|
.build();
|
||||||
AtomicReference<Certificate[]> clientCertificates = new AtomicReference<>();
|
AtomicReference<Certificate[]> clientCertificates = new AtomicReference<>();
|
||||||
handler = https -> {
|
handler = https -> {
|
||||||
|
|
|
@ -25,6 +25,7 @@ import com.sun.net.httpserver.Headers;
|
||||||
import com.sun.net.httpserver.HttpsConfigurator;
|
import com.sun.net.httpserver.HttpsConfigurator;
|
||||||
import com.sun.net.httpserver.HttpsServer;
|
import com.sun.net.httpserver.HttpsServer;
|
||||||
import org.apache.logging.log4j.LogManager;
|
import org.apache.logging.log4j.LogManager;
|
||||||
|
import org.elasticsearch.bootstrap.JavaVersion;
|
||||||
import org.elasticsearch.cloud.azure.classic.management.AzureComputeService;
|
import org.elasticsearch.cloud.azure.classic.management.AzureComputeService;
|
||||||
import org.elasticsearch.common.SuppressForbidden;
|
import org.elasticsearch.common.SuppressForbidden;
|
||||||
import org.elasticsearch.common.io.FileSystemUtils;
|
import org.elasticsearch.common.io.FileSystemUtils;
|
||||||
|
@ -59,7 +60,9 @@ import java.net.InetSocketAddress;
|
||||||
import java.nio.charset.StandardCharsets;
|
import java.nio.charset.StandardCharsets;
|
||||||
import java.nio.file.Files;
|
import java.nio.file.Files;
|
||||||
import java.nio.file.Path;
|
import java.nio.file.Path;
|
||||||
|
import java.security.AccessController;
|
||||||
import java.security.KeyStore;
|
import java.security.KeyStore;
|
||||||
|
import java.security.PrivilegedAction;
|
||||||
import java.util.Arrays;
|
import java.util.Arrays;
|
||||||
import java.util.Collection;
|
import java.util.Collection;
|
||||||
import java.util.Collections;
|
import java.util.Collections;
|
||||||
|
@ -262,11 +265,30 @@ public class AzureDiscoveryClusterFormationTests extends ESIntegTestCase {
|
||||||
kmf.init(ks, passphrase);
|
kmf.init(ks, passphrase);
|
||||||
TrustManagerFactory tmf = TrustManagerFactory.getInstance("SunX509");
|
TrustManagerFactory tmf = TrustManagerFactory.getInstance("SunX509");
|
||||||
tmf.init(ks);
|
tmf.init(ks);
|
||||||
SSLContext ssl = SSLContext.getInstance("TLS");
|
SSLContext ssl = SSLContext.getInstance(getProtocol());
|
||||||
ssl.init(kmf.getKeyManagers(), tmf.getTrustManagers(), null);
|
ssl.init(kmf.getKeyManagers(), tmf.getTrustManagers(), null);
|
||||||
return ssl;
|
return ssl;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The {@link HttpsServer} in the JDK has issues with TLSv1.3 when running in a JDK prior to
|
||||||
|
* 12.0.1 so we pin to TLSv1.2 when running on an earlier JDK
|
||||||
|
*/
|
||||||
|
private static String getProtocol() {
|
||||||
|
if (JavaVersion.current().compareTo(JavaVersion.parse("11")) < 0) {
|
||||||
|
return "TLS";
|
||||||
|
} else if (JavaVersion.current().compareTo(JavaVersion.parse("12")) < 0) {
|
||||||
|
return "TLSv1.2";
|
||||||
|
} else {
|
||||||
|
JavaVersion full =
|
||||||
|
AccessController.doPrivileged((PrivilegedAction<JavaVersion>) () -> JavaVersion.parse(System.getProperty("java.version")));
|
||||||
|
if (full.compareTo(JavaVersion.parse("12.0.1")) < 0) {
|
||||||
|
return "TLSv1.2";
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return "TLS";
|
||||||
|
}
|
||||||
|
|
||||||
@AfterClass
|
@AfterClass
|
||||||
public static void stopHttpd() throws IOException {
|
public static void stopHttpd() throws IOException {
|
||||||
for (int i = 0; i < internalCluster().size(); i++) {
|
for (int i = 0; i < internalCluster().size(); i++) {
|
||||||
|
|
|
@ -564,7 +564,7 @@ public class CCSDuelIT extends ESRestTestCase {
|
||||||
tags.showTermDocCountError(true);
|
tags.showTermDocCountError(true);
|
||||||
DateHistogramAggregationBuilder creation = new DateHistogramAggregationBuilder("creation");
|
DateHistogramAggregationBuilder creation = new DateHistogramAggregationBuilder("creation");
|
||||||
creation.field("creationDate");
|
creation.field("creationDate");
|
||||||
creation.dateHistogramInterval(DateHistogramInterval.QUARTER);
|
creation.calendarInterval(DateHistogramInterval.QUARTER);
|
||||||
creation.subAggregation(tags);
|
creation.subAggregation(tags);
|
||||||
sourceBuilder.aggregation(creation);
|
sourceBuilder.aggregation(creation);
|
||||||
duelSearch(searchRequest, CCSDuelIT::assertAggs);
|
duelSearch(searchRequest, CCSDuelIT::assertAggs);
|
||||||
|
@ -591,7 +591,7 @@ public class CCSDuelIT extends ESRestTestCase {
|
||||||
sourceBuilder.size(0);
|
sourceBuilder.size(0);
|
||||||
DateHistogramAggregationBuilder daily = new DateHistogramAggregationBuilder("daily");
|
DateHistogramAggregationBuilder daily = new DateHistogramAggregationBuilder("daily");
|
||||||
daily.field("creationDate");
|
daily.field("creationDate");
|
||||||
daily.dateHistogramInterval(DateHistogramInterval.DAY);
|
daily.calendarInterval(DateHistogramInterval.DAY);
|
||||||
sourceBuilder.aggregation(daily);
|
sourceBuilder.aggregation(daily);
|
||||||
daily.subAggregation(new DerivativePipelineAggregationBuilder("derivative", "_count"));
|
daily.subAggregation(new DerivativePipelineAggregationBuilder("derivative", "_count"));
|
||||||
sourceBuilder.aggregation(new MaxBucketPipelineAggregationBuilder("biggest_day", "daily._count"));
|
sourceBuilder.aggregation(new MaxBucketPipelineAggregationBuilder("biggest_day", "daily._count"));
|
||||||
|
|
|
@ -61,14 +61,6 @@
|
||||||
"type" : "list",
|
"type" : "list",
|
||||||
"description" : "A list of fields to extract and return from the _source field"
|
"description" : "A list of fields to extract and return from the _source field"
|
||||||
},
|
},
|
||||||
"_source_exclude": {
|
|
||||||
"type" : "list",
|
|
||||||
"description" : "A list of fields to exclude from the returned _source field"
|
|
||||||
},
|
|
||||||
"_source_include": {
|
|
||||||
"type" : "list",
|
|
||||||
"description" : "A list of fields to extract and return from the _source field"
|
|
||||||
},
|
|
||||||
"version" : {
|
"version" : {
|
||||||
"type" : "number",
|
"type" : "number",
|
||||||
"description" : "Explicit version number for concurrency control"
|
"description" : "Explicit version number for concurrency control"
|
||||||
|
|
|
@ -143,7 +143,8 @@ setup:
|
||||||
"Deprecated _time order":
|
"Deprecated _time order":
|
||||||
|
|
||||||
- skip:
|
- skip:
|
||||||
reason: _time order deprecated in 6.0, replaced by _key
|
version: " - 7.1.99"
|
||||||
|
reason: _time order deprecated in 6.0, replaced by _key. Calendar_interval added in 7.2
|
||||||
features: "warnings"
|
features: "warnings"
|
||||||
|
|
||||||
- do:
|
- do:
|
||||||
|
@ -176,7 +177,7 @@ setup:
|
||||||
- do:
|
- do:
|
||||||
search:
|
search:
|
||||||
rest_total_hits_as_int: true
|
rest_total_hits_as_int: true
|
||||||
body: { "aggs" : { "histo" : { "date_histogram" : { "field" : "date", "interval" : "month", "order" : { "_time" : "desc" } } } } }
|
body: { "aggs" : { "histo" : { "date_histogram" : { "field" : "date", "calendar_interval" : "month", "order" : { "_time" : "desc" } } } } }
|
||||||
warnings:
|
warnings:
|
||||||
- "Deprecated aggregation order key [_time] used, replaced by [_key]"
|
- "Deprecated aggregation order key [_time] used, replaced by [_key]"
|
||||||
|
|
||||||
|
|
|
@ -251,8 +251,20 @@ setup:
|
||||||
|
|
||||||
---
|
---
|
||||||
"Bad params":
|
"Bad params":
|
||||||
|
- skip:
|
||||||
|
version: " - 7.1.99"
|
||||||
|
reason: "empty bodies throws exception starting in 7.2"
|
||||||
|
- do:
|
||||||
|
catch: /\[filters\] cannot be empty/
|
||||||
|
search:
|
||||||
|
rest_total_hits_as_int: true
|
||||||
|
body:
|
||||||
|
aggs:
|
||||||
|
the_filter:
|
||||||
|
filters: {}
|
||||||
|
|
||||||
- do:
|
- do:
|
||||||
|
catch: /\[filters\] cannot be empty/
|
||||||
search:
|
search:
|
||||||
rest_total_hits_as_int: true
|
rest_total_hits_as_int: true
|
||||||
body:
|
body:
|
||||||
|
|
|
@ -264,8 +264,74 @@ setup:
|
||||||
---
|
---
|
||||||
"Composite aggregation with format":
|
"Composite aggregation with format":
|
||||||
- skip:
|
- skip:
|
||||||
version: " - 6.2.99"
|
version: " - 7.1.99"
|
||||||
reason: this uses a new option (format) added in 6.3.0
|
reason: calendar_interval introduced in 7.2.0
|
||||||
|
features: warnings
|
||||||
|
|
||||||
|
- do:
|
||||||
|
warnings:
|
||||||
|
- '[interval] on [date_histogram] is deprecated, use [fixed_interval] or [calendar_interval] in the future.'
|
||||||
|
search:
|
||||||
|
rest_total_hits_as_int: true
|
||||||
|
index: test
|
||||||
|
body:
|
||||||
|
aggregations:
|
||||||
|
test:
|
||||||
|
composite:
|
||||||
|
sources: [
|
||||||
|
{
|
||||||
|
"date": {
|
||||||
|
"date_histogram": {
|
||||||
|
"field": "date",
|
||||||
|
"interval": "1d",
|
||||||
|
"format": "yyyy-MM-dd"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
]
|
||||||
|
|
||||||
|
- match: {hits.total: 6}
|
||||||
|
- length: { aggregations.test.buckets: 2 }
|
||||||
|
- match: { aggregations.test.buckets.0.key.date: "2017-10-20" }
|
||||||
|
- match: { aggregations.test.buckets.0.doc_count: 1 }
|
||||||
|
- match: { aggregations.test.buckets.1.key.date: "2017-10-21" }
|
||||||
|
- match: { aggregations.test.buckets.1.doc_count: 1 }
|
||||||
|
|
||||||
|
- do:
|
||||||
|
warnings:
|
||||||
|
- '[interval] on [date_histogram] is deprecated, use [fixed_interval] or [calendar_interval] in the future.'
|
||||||
|
search:
|
||||||
|
rest_total_hits_as_int: true
|
||||||
|
index: test
|
||||||
|
body:
|
||||||
|
aggregations:
|
||||||
|
test:
|
||||||
|
composite:
|
||||||
|
after: {
|
||||||
|
date: "2017-10-20"
|
||||||
|
}
|
||||||
|
sources: [
|
||||||
|
{
|
||||||
|
"date": {
|
||||||
|
"date_histogram": {
|
||||||
|
"field": "date",
|
||||||
|
"interval": "1d",
|
||||||
|
"format": "yyyy-MM-dd"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
]
|
||||||
|
|
||||||
|
- match: {hits.total: 6}
|
||||||
|
- length: { aggregations.test.buckets: 1 }
|
||||||
|
- match: { aggregations.test.buckets.0.key.date: "2017-10-21" }
|
||||||
|
- match: { aggregations.test.buckets.0.doc_count: 1 }
|
||||||
|
|
||||||
|
---
|
||||||
|
"Composite aggregation with format and calendar_interval":
|
||||||
|
- skip:
|
||||||
|
version: " - 7.1.99"
|
||||||
|
reason: calendar_interval introduced in 7.2.0
|
||||||
|
|
||||||
- do:
|
- do:
|
||||||
search:
|
search:
|
||||||
|
@ -280,7 +346,7 @@ setup:
|
||||||
"date": {
|
"date": {
|
||||||
"date_histogram": {
|
"date_histogram": {
|
||||||
"field": "date",
|
"field": "date",
|
||||||
"interval": "1d",
|
"calendar_interval": "1d",
|
||||||
"format": "yyyy-MM-dd"
|
"format": "yyyy-MM-dd"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -310,7 +376,7 @@ setup:
|
||||||
"date": {
|
"date": {
|
||||||
"date_histogram": {
|
"date_histogram": {
|
||||||
"field": "date",
|
"field": "date",
|
||||||
"interval": "1d",
|
"calendar_interval": "1d",
|
||||||
"format": "yyyy-MM-dd"
|
"format": "yyyy-MM-dd"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -89,18 +89,25 @@ setup:
|
||||||
catch: /.*Trying to create too many buckets.*/
|
catch: /.*Trying to create too many buckets.*/
|
||||||
search:
|
search:
|
||||||
rest_total_hits_as_int: true
|
rest_total_hits_as_int: true
|
||||||
|
allow_partial_search_results: false
|
||||||
index: test
|
index: test
|
||||||
body:
|
body:
|
||||||
aggregations:
|
aggregations:
|
||||||
test:
|
test:
|
||||||
date_histogram:
|
terms:
|
||||||
field: date
|
field: keyword
|
||||||
interval: 1d
|
|
||||||
|
- do:
|
||||||
|
cluster.put_settings:
|
||||||
|
body:
|
||||||
|
transient:
|
||||||
|
search.max_buckets: 6
|
||||||
|
|
||||||
- do:
|
- do:
|
||||||
catch: /.*Trying to create too many buckets.*/
|
catch: /.*Trying to create too many buckets.*/
|
||||||
search:
|
search:
|
||||||
rest_total_hits_as_int: true
|
rest_total_hits_as_int: true
|
||||||
|
allow_partial_search_results: false
|
||||||
index: test
|
index: test
|
||||||
body:
|
body:
|
||||||
aggregations:
|
aggregations:
|
||||||
|
@ -109,25 +116,6 @@ setup:
|
||||||
field: keyword
|
field: keyword
|
||||||
aggs:
|
aggs:
|
||||||
2:
|
2:
|
||||||
date_histogram:
|
terms:
|
||||||
field: date
|
field: date
|
||||||
interval: 1d
|
|
||||||
|
|
||||||
- do:
|
|
||||||
cluster.put_settings:
|
|
||||||
body:
|
|
||||||
transient:
|
|
||||||
search.max_buckets: 100
|
|
||||||
|
|
||||||
- do:
|
|
||||||
catch: /.*Trying to create too many buckets.*/
|
|
||||||
search:
|
|
||||||
rest_total_hits_as_int: true
|
|
||||||
index: test
|
|
||||||
body:
|
|
||||||
aggregations:
|
|
||||||
test:
|
|
||||||
date_histogram:
|
|
||||||
field: date
|
|
||||||
interval: 1d
|
|
||||||
min_doc_count: 0
|
|
||||||
|
|
|
@ -6,8 +6,43 @@ setup:
|
||||||
---
|
---
|
||||||
"Bad window":
|
"Bad window":
|
||||||
|
|
||||||
|
- skip:
|
||||||
|
version: " - 7.1.99"
|
||||||
|
reason: "calendar_interval added in 7.2"
|
||||||
|
|
||||||
- do:
|
- do:
|
||||||
catch: /\[window\] must be a positive, non-zero integer\./
|
catch: /\[window\] must be a positive, non-zero integer\./
|
||||||
|
search:
|
||||||
|
rest_total_hits_as_int: true
|
||||||
|
body:
|
||||||
|
size: 0
|
||||||
|
aggs:
|
||||||
|
the_histo:
|
||||||
|
date_histogram:
|
||||||
|
field: "date"
|
||||||
|
calendar_interval: "1d"
|
||||||
|
aggs:
|
||||||
|
the_avg:
|
||||||
|
avg:
|
||||||
|
field: "value_field"
|
||||||
|
the_mov_fn:
|
||||||
|
moving_fn:
|
||||||
|
buckets_path: "the_avg"
|
||||||
|
window: -1
|
||||||
|
script: "MovingFunctions.windowMax(values)"
|
||||||
|
|
||||||
|
---
|
||||||
|
"Bad window deprecated interval":
|
||||||
|
|
||||||
|
- skip:
|
||||||
|
version: " - 7.1.99"
|
||||||
|
reason: "interval deprecation added in 7.2"
|
||||||
|
features: "warnings"
|
||||||
|
|
||||||
|
- do:
|
||||||
|
catch: /\[window\] must be a positive, non-zero integer\./
|
||||||
|
warnings:
|
||||||
|
- "[interval] on [date_histogram] is deprecated, use [fixed_interval] or [calendar_interval] in the future."
|
||||||
search:
|
search:
|
||||||
rest_total_hits_as_int: true
|
rest_total_hits_as_int: true
|
||||||
body:
|
body:
|
||||||
|
@ -26,7 +61,6 @@ setup:
|
||||||
buckets_path: "the_avg"
|
buckets_path: "the_avg"
|
||||||
window: -1
|
window: -1
|
||||||
script: "MovingFunctions.windowMax(values)"
|
script: "MovingFunctions.windowMax(values)"
|
||||||
|
|
||||||
---
|
---
|
||||||
"Not under date_histo":
|
"Not under date_histo":
|
||||||
|
|
||||||
|
|
|
@ -206,12 +206,9 @@ setup:
|
||||||
---
|
---
|
||||||
"Test typed keys parameter for date_histogram aggregation and max_bucket pipeline aggregation":
|
"Test typed keys parameter for date_histogram aggregation and max_bucket pipeline aggregation":
|
||||||
- skip:
|
- skip:
|
||||||
features: warnings
|
version: " - 7.1.99"
|
||||||
version: " - 6.3.99"
|
reason: "calendar_interval added in 7.2"
|
||||||
reason: "deprecation added in 6.4.0"
|
|
||||||
- do:
|
- do:
|
||||||
warnings:
|
|
||||||
- 'The moving_avg aggregation has been deprecated in favor of the moving_fn aggregation.'
|
|
||||||
search:
|
search:
|
||||||
rest_total_hits_as_int: true
|
rest_total_hits_as_int: true
|
||||||
typed_keys: true
|
typed_keys: true
|
||||||
|
@ -221,13 +218,13 @@ setup:
|
||||||
test_created_histogram:
|
test_created_histogram:
|
||||||
date_histogram:
|
date_histogram:
|
||||||
field: created
|
field: created
|
||||||
interval: month
|
calendar_interval: month
|
||||||
aggregations:
|
aggregations:
|
||||||
test_sum:
|
test_sum:
|
||||||
sum:
|
sum:
|
||||||
field: num
|
field: num
|
||||||
test_moving_avg:
|
test_deriv:
|
||||||
moving_avg:
|
derivative:
|
||||||
buckets_path: "test_sum"
|
buckets_path: "test_sum"
|
||||||
test_max_bucket:
|
test_max_bucket:
|
||||||
max_bucket:
|
max_bucket:
|
||||||
|
@ -236,5 +233,5 @@ setup:
|
||||||
- is_true: aggregations.date_histogram#test_created_histogram
|
- is_true: aggregations.date_histogram#test_created_histogram
|
||||||
- is_true: aggregations.date_histogram#test_created_histogram.buckets.0.sum#test_sum
|
- is_true: aggregations.date_histogram#test_created_histogram.buckets.0.sum#test_sum
|
||||||
- is_true: aggregations.date_histogram#test_created_histogram.buckets.1.sum#test_sum
|
- is_true: aggregations.date_histogram#test_created_histogram.buckets.1.sum#test_sum
|
||||||
- is_true: aggregations.date_histogram#test_created_histogram.buckets.1.simple_value#test_moving_avg
|
- is_true: aggregations.date_histogram#test_created_histogram.buckets.1.derivative#test_deriv
|
||||||
- is_true: aggregations.bucket_metric_value#test_max_bucket
|
- is_true: aggregations.bucket_metric_value#test_max_bucket
|
||||||
|
|
|
@ -124,6 +124,9 @@ setup:
|
||||||
|
|
||||||
---
|
---
|
||||||
"date histogram aggregation with date and date_nanos mapping":
|
"date histogram aggregation with date and date_nanos mapping":
|
||||||
|
- skip:
|
||||||
|
version: " - 7.1.99"
|
||||||
|
reason: calendar_interval introduced in 7.2.0
|
||||||
|
|
||||||
- do:
|
- do:
|
||||||
bulk:
|
bulk:
|
||||||
|
@ -148,7 +151,7 @@ setup:
|
||||||
date:
|
date:
|
||||||
date_histogram:
|
date_histogram:
|
||||||
field: date
|
field: date
|
||||||
interval: 1d
|
calendar_interval: 1d
|
||||||
|
|
||||||
- match: { hits.total: 4 }
|
- match: { hits.total: 4 }
|
||||||
- length: { aggregations.date.buckets: 2 }
|
- length: { aggregations.date.buckets: 2 }
|
||||||
|
|
|
@ -1 +0,0 @@
|
||||||
9ac3dbf89dbf2ee385185dd0cd3064fe789efee0
|
|
|
@ -0,0 +1 @@
|
||||||
|
a079fc39ccc3de02acdeb7117443e5d9bd431687
|
|
@ -128,18 +128,18 @@ public class Version implements Comparable<Version>, ToXContentFragment {
|
||||||
public static final Version V_6_7_1 = new Version(V_6_7_1_ID, org.apache.lucene.util.Version.LUCENE_7_7_0);
|
public static final Version V_6_7_1 = new Version(V_6_7_1_ID, org.apache.lucene.util.Version.LUCENE_7_7_0);
|
||||||
public static final int V_6_7_2_ID = 6070299;
|
public static final int V_6_7_2_ID = 6070299;
|
||||||
public static final Version V_6_7_2 = new Version(V_6_7_2_ID, org.apache.lucene.util.Version.LUCENE_7_7_0);
|
public static final Version V_6_7_2 = new Version(V_6_7_2_ID, org.apache.lucene.util.Version.LUCENE_7_7_0);
|
||||||
public static final int V_6_7_3_ID = 6070399;
|
|
||||||
public static final Version V_6_7_3 = new Version(V_6_7_3_ID, org.apache.lucene.util.Version.LUCENE_7_7_0);
|
|
||||||
public static final int V_6_8_0_ID = 6080099;
|
public static final int V_6_8_0_ID = 6080099;
|
||||||
public static final Version V_6_8_0 = new Version(V_6_8_0_ID, org.apache.lucene.util.Version.LUCENE_7_7_0);
|
public static final Version V_6_8_0 = new Version(V_6_8_0_ID, org.apache.lucene.util.Version.LUCENE_7_7_0);
|
||||||
|
public static final int V_6_8_1_ID = 6080199;
|
||||||
|
public static final Version V_6_8_1 = new Version(V_6_8_1_ID, org.apache.lucene.util.Version.LUCENE_7_7_0);
|
||||||
public static final int V_7_0_0_ID = 7000099;
|
public static final int V_7_0_0_ID = 7000099;
|
||||||
public static final Version V_7_0_0 = new Version(V_7_0_0_ID, org.apache.lucene.util.Version.LUCENE_8_0_0);
|
public static final Version V_7_0_0 = new Version(V_7_0_0_ID, org.apache.lucene.util.Version.LUCENE_8_0_0);
|
||||||
public static final int V_7_0_1_ID = 7000199;
|
public static final int V_7_0_1_ID = 7000199;
|
||||||
public static final Version V_7_0_1 = new Version(V_7_0_1_ID, org.apache.lucene.util.Version.LUCENE_8_0_0);
|
public static final Version V_7_0_1 = new Version(V_7_0_1_ID, org.apache.lucene.util.Version.LUCENE_8_0_0);
|
||||||
public static final int V_7_0_2_ID = 7000299;
|
|
||||||
public static final Version V_7_0_2 = new Version(V_7_0_2_ID, org.apache.lucene.util.Version.LUCENE_8_0_0);
|
|
||||||
public static final int V_7_1_0_ID = 7010099;
|
public static final int V_7_1_0_ID = 7010099;
|
||||||
public static final Version V_7_1_0 = new Version(V_7_1_0_ID, org.apache.lucene.util.Version.LUCENE_8_0_0);
|
public static final Version V_7_1_0 = new Version(V_7_1_0_ID, org.apache.lucene.util.Version.LUCENE_8_0_0);
|
||||||
|
public static final int V_7_1_1_ID = 7010199;
|
||||||
|
public static final Version V_7_1_1 = new Version(V_7_1_1_ID, org.apache.lucene.util.Version.LUCENE_8_0_0);
|
||||||
public static final int V_7_2_0_ID = 7020099;
|
public static final int V_7_2_0_ID = 7020099;
|
||||||
public static final Version V_7_2_0 = new Version(V_7_2_0_ID, org.apache.lucene.util.Version.LUCENE_8_0_0);
|
public static final Version V_7_2_0 = new Version(V_7_2_0_ID, org.apache.lucene.util.Version.LUCENE_8_0_0);
|
||||||
public static final Version CURRENT = V_7_2_0;
|
public static final Version CURRENT = V_7_2_0;
|
||||||
|
@ -157,18 +157,18 @@ public class Version implements Comparable<Version>, ToXContentFragment {
|
||||||
switch (id) {
|
switch (id) {
|
||||||
case V_7_2_0_ID:
|
case V_7_2_0_ID:
|
||||||
return V_7_2_0;
|
return V_7_2_0;
|
||||||
|
case V_7_1_1_ID:
|
||||||
|
return V_7_1_1;
|
||||||
case V_7_1_0_ID:
|
case V_7_1_0_ID:
|
||||||
return V_7_1_0;
|
return V_7_1_0;
|
||||||
case V_7_0_2_ID:
|
|
||||||
return V_7_0_2;
|
|
||||||
case V_7_0_1_ID:
|
case V_7_0_1_ID:
|
||||||
return V_7_0_1;
|
return V_7_0_1;
|
||||||
case V_7_0_0_ID:
|
case V_7_0_0_ID:
|
||||||
return V_7_0_0;
|
return V_7_0_0;
|
||||||
|
case V_6_8_1_ID:
|
||||||
|
return V_6_8_1;
|
||||||
case V_6_8_0_ID:
|
case V_6_8_0_ID:
|
||||||
return V_6_8_0;
|
return V_6_8_0;
|
||||||
case V_6_7_3_ID:
|
|
||||||
return V_6_7_3;
|
|
||||||
case V_6_7_1_ID:
|
case V_6_7_1_ID:
|
||||||
return V_6_7_1;
|
return V_6_7_1;
|
||||||
case V_6_7_2_ID:
|
case V_6_7_2_ID:
|
||||||
|
|
|
@ -203,18 +203,7 @@ public class TransportGetTaskAction extends HandledTransportAction<GetTaskReques
|
||||||
request.getTaskId().toString());
|
request.getTaskId().toString());
|
||||||
get.setParentTask(clusterService.localNode().getId(), thisTask.getId());
|
get.setParentTask(clusterService.localNode().getId(), thisTask.getId());
|
||||||
|
|
||||||
client.get(get, new ActionListener<GetResponse>() {
|
client.get(get, ActionListener.wrap(r -> onGetFinishedTaskFromIndex(r, listener), e -> {
|
||||||
@Override
|
|
||||||
public void onResponse(GetResponse getResponse) {
|
|
||||||
try {
|
|
||||||
onGetFinishedTaskFromIndex(getResponse, listener);
|
|
||||||
} catch (Exception e) {
|
|
||||||
listener.onFailure(e);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public void onFailure(Exception e) {
|
|
||||||
if (ExceptionsHelper.unwrap(e, IndexNotFoundException.class) != null) {
|
if (ExceptionsHelper.unwrap(e, IndexNotFoundException.class) != null) {
|
||||||
// We haven't yet created the index for the task results so it can't be found.
|
// We haven't yet created the index for the task results so it can't be found.
|
||||||
listener.onFailure(new ResourceNotFoundException("task [{}] isn't running and hasn't stored its results", e,
|
listener.onFailure(new ResourceNotFoundException("task [{}] isn't running and hasn't stored its results", e,
|
||||||
|
@ -222,8 +211,7 @@ public class TransportGetTaskAction extends HandledTransportAction<GetTaskReques
|
||||||
} else {
|
} else {
|
||||||
listener.onFailure(e);
|
listener.onFailure(e);
|
||||||
}
|
}
|
||||||
}
|
}));
|
||||||
});
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -119,23 +119,11 @@ public class TransportSnapshotsStatusAction extends TransportMasterNodeAction<Sn
|
||||||
TransportNodesSnapshotsStatus.Request nodesRequest =
|
TransportNodesSnapshotsStatus.Request nodesRequest =
|
||||||
new TransportNodesSnapshotsStatus.Request(nodesIds.toArray(new String[nodesIds.size()]))
|
new TransportNodesSnapshotsStatus.Request(nodesIds.toArray(new String[nodesIds.size()]))
|
||||||
.snapshots(snapshots).timeout(request.masterNodeTimeout());
|
.snapshots(snapshots).timeout(request.masterNodeTimeout());
|
||||||
transportNodesSnapshotsStatus.execute(nodesRequest, new ActionListener<TransportNodesSnapshotsStatus.NodesSnapshotStatus>() {
|
transportNodesSnapshotsStatus.execute(nodesRequest,
|
||||||
@Override
|
ActionListener.map(
|
||||||
public void onResponse(TransportNodesSnapshotsStatus.NodesSnapshotStatus nodeSnapshotStatuses) {
|
listener, nodeSnapshotStatuses ->
|
||||||
try {
|
buildResponse(request, snapshotsService.currentSnapshots(request.repository(), Arrays.asList(request.snapshots())),
|
||||||
List<SnapshotsInProgress.Entry> currentSnapshots =
|
nodeSnapshotStatuses)));
|
||||||
snapshotsService.currentSnapshots(request.repository(), Arrays.asList(request.snapshots()));
|
|
||||||
listener.onResponse(buildResponse(request, currentSnapshots, nodeSnapshotStatuses));
|
|
||||||
} catch (Exception e) {
|
|
||||||
listener.onFailure(e);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public void onFailure(Exception e) {
|
|
||||||
listener.onFailure(e);
|
|
||||||
}
|
|
||||||
});
|
|
||||||
} else {
|
} else {
|
||||||
// We don't have any in-progress shards, just return current stats
|
// We don't have any in-progress shards, just return current stats
|
||||||
listener.onResponse(buildResponse(request, currentSnapshots, null));
|
listener.onResponse(buildResponse(request, currentSnapshots, null));
|
||||||
|
|
|
@ -184,26 +184,13 @@ public class TransportUpgradeAction extends TransportBroadcastByNodeAction<Upgra
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected void doExecute(Task task, UpgradeRequest request, final ActionListener<UpgradeResponse> listener) {
|
protected void doExecute(Task task, UpgradeRequest request, final ActionListener<UpgradeResponse> listener) {
|
||||||
ActionListener<UpgradeResponse> settingsUpdateListener = new ActionListener<UpgradeResponse>() {
|
super.doExecute(task, request, ActionListener.wrap(upgradeResponse -> {
|
||||||
@Override
|
|
||||||
public void onResponse(UpgradeResponse upgradeResponse) {
|
|
||||||
try {
|
|
||||||
if (upgradeResponse.versions().isEmpty()) {
|
if (upgradeResponse.versions().isEmpty()) {
|
||||||
listener.onResponse(upgradeResponse);
|
listener.onResponse(upgradeResponse);
|
||||||
} else {
|
} else {
|
||||||
updateSettings(upgradeResponse, listener);
|
updateSettings(upgradeResponse, listener);
|
||||||
}
|
}
|
||||||
} catch (Exception e) {
|
}, listener::onFailure));
|
||||||
listener.onFailure(e);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public void onFailure(Exception e) {
|
|
||||||
listener.onFailure(e);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
super.doExecute(task, request, settingsUpdateListener);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
private void updateSettings(final UpgradeResponse upgradeResponse, final ActionListener<UpgradeResponse> listener) {
|
private void updateSettings(final UpgradeResponse upgradeResponse, final ActionListener<UpgradeResponse> listener) {
|
||||||
|
|
|
@ -59,27 +59,20 @@ public final class BulkRequestHandler {
|
||||||
semaphore.acquire();
|
semaphore.acquire();
|
||||||
toRelease = semaphore::release;
|
toRelease = semaphore::release;
|
||||||
CountDownLatch latch = new CountDownLatch(1);
|
CountDownLatch latch = new CountDownLatch(1);
|
||||||
retry.withBackoff(consumer, bulkRequest, new ActionListener<BulkResponse>() {
|
retry.withBackoff(consumer, bulkRequest, ActionListener.runAfter(new ActionListener<BulkResponse>() {
|
||||||
@Override
|
@Override
|
||||||
public void onResponse(BulkResponse response) {
|
public void onResponse(BulkResponse response) {
|
||||||
try {
|
|
||||||
listener.afterBulk(executionId, bulkRequest, response);
|
listener.afterBulk(executionId, bulkRequest, response);
|
||||||
} finally {
|
|
||||||
semaphore.release();
|
|
||||||
latch.countDown();
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void onFailure(Exception e) {
|
public void onFailure(Exception e) {
|
||||||
try {
|
|
||||||
listener.afterBulk(executionId, bulkRequest, e);
|
listener.afterBulk(executionId, bulkRequest, e);
|
||||||
} finally {
|
}
|
||||||
|
}, () -> {
|
||||||
semaphore.release();
|
semaphore.release();
|
||||||
latch.countDown();
|
latch.countDown();
|
||||||
}
|
}));
|
||||||
}
|
|
||||||
});
|
|
||||||
bulkRequestSetupSuccessful = true;
|
bulkRequestSetupSuccessful = true;
|
||||||
if (concurrentRequests == 0) {
|
if (concurrentRequests == 0) {
|
||||||
latch.await();
|
latch.await();
|
||||||
|
|
|
@ -22,7 +22,6 @@ package org.elasticsearch.action.ingest;
|
||||||
import org.elasticsearch.action.ActionListener;
|
import org.elasticsearch.action.ActionListener;
|
||||||
import org.elasticsearch.action.admin.cluster.node.info.NodeInfo;
|
import org.elasticsearch.action.admin.cluster.node.info.NodeInfo;
|
||||||
import org.elasticsearch.action.admin.cluster.node.info.NodesInfoRequest;
|
import org.elasticsearch.action.admin.cluster.node.info.NodesInfoRequest;
|
||||||
import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse;
|
|
||||||
import org.elasticsearch.action.support.ActionFilters;
|
import org.elasticsearch.action.support.ActionFilters;
|
||||||
import org.elasticsearch.action.support.master.AcknowledgedResponse;
|
import org.elasticsearch.action.support.master.AcknowledgedResponse;
|
||||||
import org.elasticsearch.action.support.master.TransportMasterNodeAction;
|
import org.elasticsearch.action.support.master.TransportMasterNodeAction;
|
||||||
|
@ -74,25 +73,13 @@ public class PutPipelineTransportAction extends TransportMasterNodeAction<PutPip
|
||||||
NodesInfoRequest nodesInfoRequest = new NodesInfoRequest();
|
NodesInfoRequest nodesInfoRequest = new NodesInfoRequest();
|
||||||
nodesInfoRequest.clear();
|
nodesInfoRequest.clear();
|
||||||
nodesInfoRequest.ingest(true);
|
nodesInfoRequest.ingest(true);
|
||||||
client.admin().cluster().nodesInfo(nodesInfoRequest, new ActionListener<NodesInfoResponse>() {
|
client.admin().cluster().nodesInfo(nodesInfoRequest, ActionListener.wrap(nodeInfos -> {
|
||||||
@Override
|
|
||||||
public void onResponse(NodesInfoResponse nodeInfos) {
|
|
||||||
try {
|
|
||||||
Map<DiscoveryNode, IngestInfo> ingestInfos = new HashMap<>();
|
Map<DiscoveryNode, IngestInfo> ingestInfos = new HashMap<>();
|
||||||
for (NodeInfo nodeInfo : nodeInfos.getNodes()) {
|
for (NodeInfo nodeInfo : nodeInfos.getNodes()) {
|
||||||
ingestInfos.put(nodeInfo.getNode(), nodeInfo.getIngest());
|
ingestInfos.put(nodeInfo.getNode(), nodeInfo.getIngest());
|
||||||
}
|
}
|
||||||
ingestService.putPipeline(ingestInfos, request, listener);
|
ingestService.putPipeline(ingestInfos, request, listener);
|
||||||
} catch (Exception e) {
|
}, listener::onFailure));
|
||||||
onFailure(e);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public void onFailure(Exception e) {
|
|
||||||
listener.onFailure(e);
|
|
||||||
}
|
|
||||||
});
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
|
|
@ -22,6 +22,7 @@ package org.elasticsearch.action.support;
|
||||||
import org.apache.logging.log4j.Logger;
|
import org.apache.logging.log4j.Logger;
|
||||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||||
import org.elasticsearch.action.ActionListener;
|
import org.elasticsearch.action.ActionListener;
|
||||||
|
import org.elasticsearch.action.ActionRunnable;
|
||||||
import org.elasticsearch.client.Client;
|
import org.elasticsearch.client.Client;
|
||||||
import org.elasticsearch.client.transport.TransportClient;
|
import org.elasticsearch.client.transport.TransportClient;
|
||||||
import org.elasticsearch.common.settings.Settings;
|
import org.elasticsearch.common.settings.Settings;
|
||||||
|
@ -86,21 +87,16 @@ public final class ThreadedActionListener<Response> implements ActionListener<Re
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void onResponse(final Response response) {
|
public void onResponse(final Response response) {
|
||||||
threadPool.executor(executor).execute(new AbstractRunnable() {
|
threadPool.executor(executor).execute(new ActionRunnable<Response>(listener) {
|
||||||
@Override
|
@Override
|
||||||
public boolean isForceExecution() {
|
public boolean isForceExecution() {
|
||||||
return forceExecution;
|
return forceExecution;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected void doRun() throws Exception {
|
protected void doRun() {
|
||||||
listener.onResponse(response);
|
listener.onResponse(response);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
|
||||||
public void onFailure(Exception e) {
|
|
||||||
listener.onFailure(e);
|
|
||||||
}
|
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -21,6 +21,7 @@ package org.elasticsearch.action.support.broadcast;
|
||||||
|
|
||||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||||
import org.elasticsearch.action.ActionListener;
|
import org.elasticsearch.action.ActionListener;
|
||||||
|
import org.elasticsearch.action.ActionRunnable;
|
||||||
import org.elasticsearch.action.NoShardAvailableActionException;
|
import org.elasticsearch.action.NoShardAvailableActionException;
|
||||||
import org.elasticsearch.action.support.ActionFilters;
|
import org.elasticsearch.action.support.ActionFilters;
|
||||||
import org.elasticsearch.action.support.HandledTransportAction;
|
import org.elasticsearch.action.support.HandledTransportAction;
|
||||||
|
@ -36,7 +37,6 @@ import org.elasticsearch.cluster.routing.ShardRouting;
|
||||||
import org.elasticsearch.cluster.service.ClusterService;
|
import org.elasticsearch.cluster.service.ClusterService;
|
||||||
import org.elasticsearch.common.Nullable;
|
import org.elasticsearch.common.Nullable;
|
||||||
import org.elasticsearch.common.io.stream.StreamInput;
|
import org.elasticsearch.common.io.stream.StreamInput;
|
||||||
import org.elasticsearch.common.util.concurrent.AbstractRunnable;
|
|
||||||
import org.elasticsearch.tasks.Task;
|
import org.elasticsearch.tasks.Task;
|
||||||
import org.elasticsearch.threadpool.ThreadPool;
|
import org.elasticsearch.threadpool.ThreadPool;
|
||||||
import org.elasticsearch.transport.TransportChannel;
|
import org.elasticsearch.transport.TransportChannel;
|
||||||
|
@ -287,18 +287,8 @@ public abstract class TransportBroadcastAction<
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void messageReceived(ShardRequest request, TransportChannel channel, Task task) throws Exception {
|
public void messageReceived(ShardRequest request, TransportChannel channel, Task task) throws Exception {
|
||||||
asyncShardOperation(request, task, new ActionListener<ShardResponse>() {
|
asyncShardOperation(request, task,
|
||||||
@Override
|
ActionListener.wrap(channel::sendResponse, e -> {
|
||||||
public void onResponse(ShardResponse response) {
|
|
||||||
try {
|
|
||||||
channel.sendResponse(response);
|
|
||||||
} catch (Exception e) {
|
|
||||||
onFailure(e);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public void onFailure(Exception e) {
|
|
||||||
try {
|
try {
|
||||||
channel.sendResponse(e);
|
channel.sendResponse(e);
|
||||||
} catch (Exception e1) {
|
} catch (Exception e1) {
|
||||||
|
@ -306,26 +296,16 @@ public abstract class TransportBroadcastAction<
|
||||||
"Failed to send error response for action [{}] and request [{}]", actionName, request), e1);
|
"Failed to send error response for action [{}] and request [{}]", actionName, request), e1);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
});
|
));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
protected void asyncShardOperation(ShardRequest request, Task task, ActionListener<ShardResponse> listener) {
|
protected void asyncShardOperation(ShardRequest request, Task task, ActionListener<ShardResponse> listener) {
|
||||||
transportService.getThreadPool().executor(getExecutor(request)).execute(new AbstractRunnable() {
|
transportService.getThreadPool().executor(shardExecutor).execute(new ActionRunnable<ShardResponse>(listener) {
|
||||||
@Override
|
|
||||||
public void onFailure(Exception e) {
|
|
||||||
listener.onFailure(e);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected void doRun() throws Exception {
|
protected void doRun() throws Exception {
|
||||||
listener.onResponse(shardOperation(request, task));
|
listener.onResponse(shardOperation(request, task));
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
protected String getExecutor(ShardRequest request) {
|
|
||||||
return shardExecutor;
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -254,18 +254,8 @@ public abstract class TransportInstanceSingleOperationAction<
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void messageReceived(final Request request, final TransportChannel channel, Task task) throws Exception {
|
public void messageReceived(final Request request, final TransportChannel channel, Task task) throws Exception {
|
||||||
shardOperation(request, new ActionListener<Response>() {
|
shardOperation(request,
|
||||||
@Override
|
ActionListener.wrap(channel::sendResponse, e -> {
|
||||||
public void onResponse(Response response) {
|
|
||||||
try {
|
|
||||||
channel.sendResponse(response);
|
|
||||||
} catch (Exception e) {
|
|
||||||
onFailure(e);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public void onFailure(Exception e) {
|
|
||||||
try {
|
try {
|
||||||
channel.sendResponse(e);
|
channel.sendResponse(e);
|
||||||
} catch (Exception inner) {
|
} catch (Exception inner) {
|
||||||
|
@ -273,8 +263,7 @@ public abstract class TransportInstanceSingleOperationAction<
|
||||||
logger.warn("failed to send response for get", inner);
|
logger.warn("failed to send response for get", inner);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
});
|
));
|
||||||
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -22,6 +22,7 @@ package org.elasticsearch.action.support.single.shard;
|
||||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||||
import org.elasticsearch.action.ActionListener;
|
import org.elasticsearch.action.ActionListener;
|
||||||
import org.elasticsearch.action.ActionResponse;
|
import org.elasticsearch.action.ActionResponse;
|
||||||
|
import org.elasticsearch.action.ActionRunnable;
|
||||||
import org.elasticsearch.action.NoShardAvailableActionException;
|
import org.elasticsearch.action.NoShardAvailableActionException;
|
||||||
import org.elasticsearch.action.support.ActionFilters;
|
import org.elasticsearch.action.support.ActionFilters;
|
||||||
import org.elasticsearch.action.support.ChannelActionListener;
|
import org.elasticsearch.action.support.ChannelActionListener;
|
||||||
|
@ -40,7 +41,6 @@ import org.elasticsearch.common.Nullable;
|
||||||
import org.elasticsearch.common.io.stream.StreamInput;
|
import org.elasticsearch.common.io.stream.StreamInput;
|
||||||
import org.elasticsearch.common.io.stream.Writeable;
|
import org.elasticsearch.common.io.stream.Writeable;
|
||||||
import org.elasticsearch.common.logging.LoggerMessageFormat;
|
import org.elasticsearch.common.logging.LoggerMessageFormat;
|
||||||
import org.elasticsearch.common.util.concurrent.AbstractRunnable;
|
|
||||||
import org.elasticsearch.index.shard.ShardId;
|
import org.elasticsearch.index.shard.ShardId;
|
||||||
import org.elasticsearch.tasks.Task;
|
import org.elasticsearch.tasks.Task;
|
||||||
import org.elasticsearch.threadpool.ThreadPool;
|
import org.elasticsearch.threadpool.ThreadPool;
|
||||||
|
@ -107,12 +107,7 @@ public abstract class TransportSingleShardAction<Request extends SingleShardRequ
|
||||||
protected abstract Response shardOperation(Request request, ShardId shardId) throws IOException;
|
protected abstract Response shardOperation(Request request, ShardId shardId) throws IOException;
|
||||||
|
|
||||||
protected void asyncShardOperation(Request request, ShardId shardId, ActionListener<Response> listener) throws IOException {
|
protected void asyncShardOperation(Request request, ShardId shardId, ActionListener<Response> listener) throws IOException {
|
||||||
threadPool.executor(getExecutor(request, shardId)).execute(new AbstractRunnable() {
|
threadPool.executor(getExecutor(request, shardId)).execute(new ActionRunnable<Response>(listener) {
|
||||||
@Override
|
|
||||||
public void onFailure(Exception e) {
|
|
||||||
listener.onFailure(e);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected void doRun() throws Exception {
|
protected void doRun() throws Exception {
|
||||||
listener.onResponse(shardOperation(request, shardId));
|
listener.onResponse(shardOperation(request, shardId));
|
||||||
|
|
|
@ -329,19 +329,8 @@ public abstract class TransportTasksAction<
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void messageReceived(final NodeTaskRequest request, final TransportChannel channel, Task task) throws Exception {
|
public void messageReceived(final NodeTaskRequest request, final TransportChannel channel, Task task) throws Exception {
|
||||||
nodeOperation(request, new ActionListener<NodeTasksResponse>() {
|
nodeOperation(request, ActionListener.wrap(channel::sendResponse,
|
||||||
@Override
|
e -> {
|
||||||
public void onResponse(
|
|
||||||
TransportTasksAction<OperationTask, TasksRequest, TasksResponse, TaskResponse>.NodeTasksResponse response) {
|
|
||||||
try {
|
|
||||||
channel.sendResponse(response);
|
|
||||||
} catch (Exception e) {
|
|
||||||
onFailure(e);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public void onFailure(Exception e) {
|
|
||||||
try {
|
try {
|
||||||
channel.sendResponse(e);
|
channel.sendResponse(e);
|
||||||
} catch (IOException e1) {
|
} catch (IOException e1) {
|
||||||
|
@ -349,11 +338,10 @@ public abstract class TransportTasksAction<
|
||||||
logger.warn("Failed to send failure", e1);
|
logger.warn("Failed to send failure", e1);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
});
|
));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
private class NodeTaskRequest extends TransportRequest {
|
private class NodeTaskRequest extends TransportRequest {
|
||||||
private TasksRequest tasksRequest;
|
private TasksRequest tasksRequest;
|
||||||
|
|
||||||
|
|
|
@ -44,7 +44,7 @@ import java.util.Objects;
|
||||||
public abstract class ElasticsearchNodeCommand extends EnvironmentAwareCommand {
|
public abstract class ElasticsearchNodeCommand extends EnvironmentAwareCommand {
|
||||||
private static final Logger logger = LogManager.getLogger(ElasticsearchNodeCommand.class);
|
private static final Logger logger = LogManager.getLogger(ElasticsearchNodeCommand.class);
|
||||||
protected final NamedXContentRegistry namedXContentRegistry;
|
protected final NamedXContentRegistry namedXContentRegistry;
|
||||||
static final String DELIMITER = "------------------------------------------------------------------------\n";
|
protected static final String DELIMITER = "------------------------------------------------------------------------\n";
|
||||||
|
|
||||||
static final String STOP_WARNING_MSG =
|
static final String STOP_WARNING_MSG =
|
||||||
DELIMITER +
|
DELIMITER +
|
||||||
|
@ -81,9 +81,8 @@ public abstract class ElasticsearchNodeCommand extends EnvironmentAwareCommand {
|
||||||
throw new ElasticsearchException(NO_NODE_FOLDER_FOUND_MSG);
|
throw new ElasticsearchException(NO_NODE_FOLDER_FOUND_MSG);
|
||||||
}
|
}
|
||||||
processNodePaths(terminal, dataPaths, env);
|
processNodePaths(terminal, dataPaths, env);
|
||||||
} catch (LockObtainFailedException ex) {
|
} catch (LockObtainFailedException e) {
|
||||||
throw new ElasticsearchException(
|
throw new ElasticsearchException(FAILED_TO_OBTAIN_NODE_LOCK_MSG, e);
|
||||||
FAILED_TO_OBTAIN_NODE_LOCK_MSG + " [" + ex.getMessage() + "]");
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -166,6 +165,18 @@ public abstract class ElasticsearchNodeCommand extends EnvironmentAwareCommand {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
protected NodeEnvironment.NodePath[] toNodePaths(Path[] dataPaths) {
|
||||||
|
return Arrays.stream(dataPaths).map(ElasticsearchNodeCommand::createNodePath).toArray(NodeEnvironment.NodePath[]::new);
|
||||||
|
}
|
||||||
|
|
||||||
|
private static NodeEnvironment.NodePath createNodePath(Path path) {
|
||||||
|
try {
|
||||||
|
return new NodeEnvironment.NodePath(path);
|
||||||
|
} catch (IOException e) {
|
||||||
|
throw new ElasticsearchException("Unable to investigate path [" + path + "]", e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
//package-private for testing
|
//package-private for testing
|
||||||
OptionParser getParser() {
|
OptionParser getParser() {
|
||||||
return parser;
|
return parser;
|
||||||
|
|
|
@ -22,6 +22,7 @@ import org.elasticsearch.cli.CommandLoggingConfigurator;
|
||||||
import org.elasticsearch.cli.MultiCommand;
|
import org.elasticsearch.cli.MultiCommand;
|
||||||
import org.elasticsearch.cli.Terminal;
|
import org.elasticsearch.cli.Terminal;
|
||||||
import org.elasticsearch.env.NodeRepurposeCommand;
|
import org.elasticsearch.env.NodeRepurposeCommand;
|
||||||
|
import org.elasticsearch.env.OverrideNodeVersionCommand;
|
||||||
|
|
||||||
// NodeToolCli does not extend LoggingAwareCommand, because LoggingAwareCommand performs logging initialization
|
// NodeToolCli does not extend LoggingAwareCommand, because LoggingAwareCommand performs logging initialization
|
||||||
// after LoggingAwareCommand instance is constructed.
|
// after LoggingAwareCommand instance is constructed.
|
||||||
|
@ -39,6 +40,7 @@ public class NodeToolCli extends MultiCommand {
|
||||||
subcommands.put("repurpose", new NodeRepurposeCommand());
|
subcommands.put("repurpose", new NodeRepurposeCommand());
|
||||||
subcommands.put("unsafe-bootstrap", new UnsafeBootstrapMasterCommand());
|
subcommands.put("unsafe-bootstrap", new UnsafeBootstrapMasterCommand());
|
||||||
subcommands.put("detach-cluster", new DetachClusterCommand());
|
subcommands.put("detach-cluster", new DetachClusterCommand());
|
||||||
|
subcommands.put("override-version", new OverrideNodeVersionCommand());
|
||||||
}
|
}
|
||||||
|
|
||||||
public static void main(String[] args) throws Exception {
|
public static void main(String[] args) throws Exception {
|
||||||
|
|
|
@ -467,6 +467,11 @@ public class Setting<T> implements ToXContentObject {
|
||||||
* @return the raw string representation of the setting value
|
* @return the raw string representation of the setting value
|
||||||
*/
|
*/
|
||||||
String innerGetRaw(final Settings settings) {
|
String innerGetRaw(final Settings settings) {
|
||||||
|
SecureSettings secureSettings = settings.getSecureSettings();
|
||||||
|
if (secureSettings != null && secureSettings.getSettingNames().contains(getKey())) {
|
||||||
|
throw new IllegalArgumentException("Setting [" + getKey() + "] is a non-secure setting" +
|
||||||
|
" and must be stored inside elasticsearch.yml, but was found inside the Elasticsearch keystore");
|
||||||
|
}
|
||||||
return settings.get(getKey(), defaultValue.apply(settings));
|
return settings.get(getKey(), defaultValue.apply(settings));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -31,6 +31,7 @@ import org.apache.lucene.store.LockObtainFailedException;
|
||||||
import org.apache.lucene.store.NativeFSLockFactory;
|
import org.apache.lucene.store.NativeFSLockFactory;
|
||||||
import org.apache.lucene.store.SimpleFSDirectory;
|
import org.apache.lucene.store.SimpleFSDirectory;
|
||||||
import org.elasticsearch.ElasticsearchException;
|
import org.elasticsearch.ElasticsearchException;
|
||||||
|
import org.elasticsearch.Version;
|
||||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||||
import org.elasticsearch.common.CheckedFunction;
|
import org.elasticsearch.common.CheckedFunction;
|
||||||
|
@ -250,7 +251,7 @@ public final class NodeEnvironment implements Closeable {
|
||||||
sharedDataPath = null;
|
sharedDataPath = null;
|
||||||
locks = null;
|
locks = null;
|
||||||
nodeLockId = -1;
|
nodeLockId = -1;
|
||||||
nodeMetaData = new NodeMetaData(generateNodeId(settings));
|
nodeMetaData = new NodeMetaData(generateNodeId(settings), Version.CURRENT);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
boolean success = false;
|
boolean success = false;
|
||||||
|
@ -395,7 +396,6 @@ public final class NodeEnvironment implements Closeable {
|
||||||
logger.info("heap size [{}], compressed ordinary object pointers [{}]", maxHeapSize, useCompressedOops);
|
logger.info("heap size [{}], compressed ordinary object pointers [{}]", maxHeapSize, useCompressedOops);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* scans the node paths and loads existing metaData file. If not found a new meta data will be generated
|
* scans the node paths and loads existing metaData file. If not found a new meta data will be generated
|
||||||
* and persisted into the nodePaths
|
* and persisted into the nodePaths
|
||||||
|
@ -405,10 +405,15 @@ public final class NodeEnvironment implements Closeable {
|
||||||
final Path[] paths = Arrays.stream(nodePaths).map(np -> np.path).toArray(Path[]::new);
|
final Path[] paths = Arrays.stream(nodePaths).map(np -> np.path).toArray(Path[]::new);
|
||||||
NodeMetaData metaData = NodeMetaData.FORMAT.loadLatestState(logger, NamedXContentRegistry.EMPTY, paths);
|
NodeMetaData metaData = NodeMetaData.FORMAT.loadLatestState(logger, NamedXContentRegistry.EMPTY, paths);
|
||||||
if (metaData == null) {
|
if (metaData == null) {
|
||||||
metaData = new NodeMetaData(generateNodeId(settings));
|
metaData = new NodeMetaData(generateNodeId(settings), Version.CURRENT);
|
||||||
|
} else {
|
||||||
|
metaData = metaData.upgradeToCurrentVersion();
|
||||||
}
|
}
|
||||||
|
|
||||||
// we write again to make sure all paths have the latest state file
|
// we write again to make sure all paths have the latest state file
|
||||||
|
assert metaData.nodeVersion().equals(Version.CURRENT) : metaData.nodeVersion() + " != " + Version.CURRENT;
|
||||||
NodeMetaData.FORMAT.writeAndCleanup(metaData, paths);
|
NodeMetaData.FORMAT.writeAndCleanup(metaData, paths);
|
||||||
|
|
||||||
return metaData;
|
return metaData;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -19,6 +19,7 @@
|
||||||
|
|
||||||
package org.elasticsearch.env;
|
package org.elasticsearch.env;
|
||||||
|
|
||||||
|
import org.elasticsearch.Version;
|
||||||
import org.elasticsearch.common.ParseField;
|
import org.elasticsearch.common.ParseField;
|
||||||
import org.elasticsearch.common.xcontent.ObjectParser;
|
import org.elasticsearch.common.xcontent.ObjectParser;
|
||||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||||
|
@ -31,65 +32,103 @@ import java.io.OutputStream;
|
||||||
import java.util.Objects;
|
import java.util.Objects;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Metadata associated with this node. Currently only contains the unique uuid describing this node.
|
* Metadata associated with this node: its persistent node ID and its version.
|
||||||
* The metadata is persisted in the data folder of this node and is reused across restarts.
|
* The metadata is persisted in the data folder of this node and is reused across restarts.
|
||||||
*/
|
*/
|
||||||
public final class NodeMetaData {
|
public final class NodeMetaData {
|
||||||
|
|
||||||
private static final String NODE_ID_KEY = "node_id";
|
private static final String NODE_ID_KEY = "node_id";
|
||||||
|
private static final String NODE_VERSION_KEY = "node_version";
|
||||||
|
|
||||||
private final String nodeId;
|
private final String nodeId;
|
||||||
|
|
||||||
public NodeMetaData(final String nodeId) {
|
private final Version nodeVersion;
|
||||||
|
|
||||||
|
public NodeMetaData(final String nodeId, final Version nodeVersion) {
|
||||||
this.nodeId = Objects.requireNonNull(nodeId);
|
this.nodeId = Objects.requireNonNull(nodeId);
|
||||||
|
this.nodeVersion = Objects.requireNonNull(nodeVersion);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public boolean equals(Object o) {
|
public boolean equals(Object o) {
|
||||||
if (this == o) {
|
if (this == o) return true;
|
||||||
return true;
|
if (o == null || getClass() != o.getClass()) return false;
|
||||||
}
|
|
||||||
if (o == null || getClass() != o.getClass()) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
NodeMetaData that = (NodeMetaData) o;
|
NodeMetaData that = (NodeMetaData) o;
|
||||||
|
return nodeId.equals(that.nodeId) &&
|
||||||
return Objects.equals(this.nodeId, that.nodeId);
|
nodeVersion.equals(that.nodeVersion);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public int hashCode() {
|
public int hashCode() {
|
||||||
return this.nodeId.hashCode();
|
return Objects.hash(nodeId, nodeVersion);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public String toString() {
|
public String toString() {
|
||||||
return "node_id [" + nodeId + "]";
|
return "NodeMetaData{" +
|
||||||
|
"nodeId='" + nodeId + '\'' +
|
||||||
|
", nodeVersion=" + nodeVersion +
|
||||||
|
'}';
|
||||||
}
|
}
|
||||||
|
|
||||||
private static ObjectParser<Builder, Void> PARSER = new ObjectParser<>("node_meta_data", Builder::new);
|
private static ObjectParser<Builder, Void> PARSER = new ObjectParser<>("node_meta_data", Builder::new);
|
||||||
|
|
||||||
static {
|
static {
|
||||||
PARSER.declareString(Builder::setNodeId, new ParseField(NODE_ID_KEY));
|
PARSER.declareString(Builder::setNodeId, new ParseField(NODE_ID_KEY));
|
||||||
|
PARSER.declareInt(Builder::setNodeVersionId, new ParseField(NODE_VERSION_KEY));
|
||||||
}
|
}
|
||||||
|
|
||||||
public String nodeId() {
|
public String nodeId() {
|
||||||
return nodeId;
|
return nodeId;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public Version nodeVersion() {
|
||||||
|
return nodeVersion;
|
||||||
|
}
|
||||||
|
|
||||||
|
public NodeMetaData upgradeToCurrentVersion() {
|
||||||
|
if (nodeVersion.equals(Version.V_EMPTY)) {
|
||||||
|
assert Version.CURRENT.major <= Version.V_7_0_0.major + 1 : "version is required in the node metadata from v9 onwards";
|
||||||
|
return new NodeMetaData(nodeId, Version.CURRENT);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (nodeVersion.before(Version.CURRENT.minimumIndexCompatibilityVersion())) {
|
||||||
|
throw new IllegalStateException(
|
||||||
|
"cannot upgrade a node from version [" + nodeVersion + "] directly to version [" + Version.CURRENT + "]");
|
||||||
|
}
|
||||||
|
|
||||||
|
if (nodeVersion.after(Version.CURRENT)) {
|
||||||
|
throw new IllegalStateException(
|
||||||
|
"cannot downgrade a node from version [" + nodeVersion + "] to version [" + Version.CURRENT + "]");
|
||||||
|
}
|
||||||
|
|
||||||
|
return nodeVersion.equals(Version.CURRENT) ? this : new NodeMetaData(nodeId, Version.CURRENT);
|
||||||
|
}
|
||||||
|
|
||||||
private static class Builder {
|
private static class Builder {
|
||||||
String nodeId;
|
String nodeId;
|
||||||
|
Version nodeVersion;
|
||||||
|
|
||||||
public void setNodeId(String nodeId) {
|
public void setNodeId(String nodeId) {
|
||||||
this.nodeId = nodeId;
|
this.nodeId = nodeId;
|
||||||
}
|
}
|
||||||
|
|
||||||
public NodeMetaData build() {
|
public void setNodeVersionId(int nodeVersionId) {
|
||||||
return new NodeMetaData(nodeId);
|
this.nodeVersion = Version.fromId(nodeVersionId);
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public NodeMetaData build() {
|
||||||
|
final Version nodeVersion;
|
||||||
|
if (this.nodeVersion == null) {
|
||||||
|
assert Version.CURRENT.major <= Version.V_7_0_0.major + 1 : "version is required in the node metadata from v9 onwards";
|
||||||
|
nodeVersion = Version.V_EMPTY;
|
||||||
|
} else {
|
||||||
|
nodeVersion = this.nodeVersion;
|
||||||
|
}
|
||||||
|
|
||||||
|
return new NodeMetaData(nodeId, nodeVersion);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
public static final MetaDataStateFormat<NodeMetaData> FORMAT = new MetaDataStateFormat<NodeMetaData>("node-") {
|
public static final MetaDataStateFormat<NodeMetaData> FORMAT = new MetaDataStateFormat<NodeMetaData>("node-") {
|
||||||
|
|
||||||
|
@ -103,10 +142,11 @@ public final class NodeMetaData {
|
||||||
@Override
|
@Override
|
||||||
public void toXContent(XContentBuilder builder, NodeMetaData nodeMetaData) throws IOException {
|
public void toXContent(XContentBuilder builder, NodeMetaData nodeMetaData) throws IOException {
|
||||||
builder.field(NODE_ID_KEY, nodeMetaData.nodeId);
|
builder.field(NODE_ID_KEY, nodeMetaData.nodeId);
|
||||||
|
builder.field(NODE_VERSION_KEY, nodeMetaData.nodeVersion.id);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public NodeMetaData fromXContent(XContentParser parser) throws IOException {
|
public NodeMetaData fromXContent(XContentParser parser) {
|
||||||
return PARSER.apply(parser, null).build();
|
return PARSER.apply(parser, null).build();
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
|
@ -172,10 +172,6 @@ public class NodeRepurposeCommand extends ElasticsearchNodeCommand {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
private NodeEnvironment.NodePath[] toNodePaths(Path[] dataPaths) {
|
|
||||||
return Arrays.stream(dataPaths).map(NodeRepurposeCommand::createNodePath).toArray(NodeEnvironment.NodePath[]::new);
|
|
||||||
}
|
|
||||||
|
|
||||||
private Set<String> indexUUIDsFor(Set<Path> indexPaths) {
|
private Set<String> indexUUIDsFor(Set<Path> indexPaths) {
|
||||||
return indexPaths.stream().map(Path::getFileName).map(Path::toString).collect(Collectors.toSet());
|
return indexPaths.stream().map(Path::getFileName).map(Path::toString).collect(Collectors.toSet());
|
||||||
}
|
}
|
||||||
|
@ -226,14 +222,6 @@ public class NodeRepurposeCommand extends ElasticsearchNodeCommand {
|
||||||
return Arrays.stream(paths).flatMap(Collection::stream).map(Path::getParent).collect(Collectors.toSet());
|
return Arrays.stream(paths).flatMap(Collection::stream).map(Path::getParent).collect(Collectors.toSet());
|
||||||
}
|
}
|
||||||
|
|
||||||
private static NodeEnvironment.NodePath createNodePath(Path path) {
|
|
||||||
try {
|
|
||||||
return new NodeEnvironment.NodePath(path);
|
|
||||||
} catch (IOException e) {
|
|
||||||
throw new ElasticsearchException("Unable to investigate path: " + path + ": " + e.getMessage());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
//package-private for testing
|
//package-private for testing
|
||||||
OptionParser getParser() {
|
OptionParser getParser() {
|
||||||
return parser;
|
return parser;
|
||||||
|
|
|
@ -0,0 +1,103 @@
|
||||||
|
/*
|
||||||
|
* Licensed to Elasticsearch under one or more contributor
|
||||||
|
* license agreements. See the NOTICE file distributed with
|
||||||
|
* this work for additional information regarding copyright
|
||||||
|
* ownership. Elasticsearch licenses this file to you under
|
||||||
|
* the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
* not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing,
|
||||||
|
* software distributed under the License is distributed on an
|
||||||
|
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||||
|
* KIND, either express or implied. See the License for the
|
||||||
|
* specific language governing permissions and limitations
|
||||||
|
* under the License.
|
||||||
|
*/
|
||||||
|
package org.elasticsearch.env;
|
||||||
|
|
||||||
|
import joptsimple.OptionParser;
|
||||||
|
import org.apache.logging.log4j.LogManager;
|
||||||
|
import org.apache.logging.log4j.Logger;
|
||||||
|
import org.elasticsearch.ElasticsearchException;
|
||||||
|
import org.elasticsearch.Version;
|
||||||
|
import org.elasticsearch.cli.Terminal;
|
||||||
|
import org.elasticsearch.cluster.coordination.ElasticsearchNodeCommand;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.nio.file.Path;
|
||||||
|
import java.util.Arrays;
|
||||||
|
|
||||||
|
public class OverrideNodeVersionCommand extends ElasticsearchNodeCommand {
|
||||||
|
private static final Logger logger = LogManager.getLogger(OverrideNodeVersionCommand.class);
|
||||||
|
|
||||||
|
private static final String TOO_NEW_MESSAGE =
|
||||||
|
DELIMITER +
|
||||||
|
"\n" +
|
||||||
|
"This data path was last written by Elasticsearch version [V_NEW] and may no\n" +
|
||||||
|
"longer be compatible with Elasticsearch version [V_CUR]. This tool will bypass\n" +
|
||||||
|
"this compatibility check, allowing a version [V_CUR] node to start on this data\n" +
|
||||||
|
"path, but a version [V_CUR] node may not be able to read this data or may read\n" +
|
||||||
|
"it incorrectly leading to data loss.\n" +
|
||||||
|
"\n" +
|
||||||
|
"You should not use this tool. Instead, continue to use a version [V_NEW] node\n" +
|
||||||
|
"on this data path. If necessary, you can use reindex-from-remote to copy the\n" +
|
||||||
|
"data from here into an older cluster.\n" +
|
||||||
|
"\n" +
|
||||||
|
"Do you want to proceed?\n";
|
||||||
|
|
||||||
|
private static final String TOO_OLD_MESSAGE =
|
||||||
|
DELIMITER +
|
||||||
|
"\n" +
|
||||||
|
"This data path was last written by Elasticsearch version [V_OLD] which may be\n" +
|
||||||
|
"too old to be readable by Elasticsearch version [V_CUR]. This tool will bypass\n" +
|
||||||
|
"this compatibility check, allowing a version [V_CUR] node to start on this data\n" +
|
||||||
|
"path, but this version [V_CUR] node may not be able to read this data or may\n" +
|
||||||
|
"read it incorrectly leading to data loss.\n" +
|
||||||
|
"\n" +
|
||||||
|
"You should not use this tool. Instead, upgrade this data path from [V_OLD] to\n" +
|
||||||
|
"[V_CUR] using one or more intermediate versions of Elasticsearch.\n" +
|
||||||
|
"\n" +
|
||||||
|
"Do you want to proceed?\n";
|
||||||
|
|
||||||
|
static final String NO_METADATA_MESSAGE = "no node metadata found, so there is no version to override";
|
||||||
|
static final String SUCCESS_MESSAGE = "Successfully overwrote this node's metadata to bypass its version compatibility checks.";
|
||||||
|
|
||||||
|
public OverrideNodeVersionCommand() {
|
||||||
|
super("Overwrite the version stored in this node's data path with [" + Version.CURRENT +
|
||||||
|
"] to bypass the version compatibility checks");
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
protected void processNodePaths(Terminal terminal, Path[] dataPaths, Environment env) throws IOException {
|
||||||
|
final Path[] nodePaths = Arrays.stream(toNodePaths(dataPaths)).map(p -> p.path).toArray(Path[]::new);
|
||||||
|
final NodeMetaData nodeMetaData = NodeMetaData.FORMAT.loadLatestState(logger, namedXContentRegistry, nodePaths);
|
||||||
|
if (nodeMetaData == null) {
|
||||||
|
throw new ElasticsearchException(NO_METADATA_MESSAGE);
|
||||||
|
}
|
||||||
|
|
||||||
|
try {
|
||||||
|
nodeMetaData.upgradeToCurrentVersion();
|
||||||
|
throw new ElasticsearchException("found [" + nodeMetaData + "] which is compatible with current version [" + Version.CURRENT
|
||||||
|
+ "], so there is no need to override the version checks");
|
||||||
|
} catch (IllegalStateException e) {
|
||||||
|
// ok, means the version change is not supported
|
||||||
|
}
|
||||||
|
|
||||||
|
confirm(terminal, (nodeMetaData.nodeVersion().before(Version.CURRENT) ? TOO_OLD_MESSAGE : TOO_NEW_MESSAGE)
|
||||||
|
.replace("V_OLD", nodeMetaData.nodeVersion().toString())
|
||||||
|
.replace("V_NEW", nodeMetaData.nodeVersion().toString())
|
||||||
|
.replace("V_CUR", Version.CURRENT.toString()));
|
||||||
|
|
||||||
|
NodeMetaData.FORMAT.writeAndCleanup(new NodeMetaData(nodeMetaData.nodeId(), Version.CURRENT), nodePaths);
|
||||||
|
|
||||||
|
terminal.println(SUCCESS_MESSAGE);
|
||||||
|
}
|
||||||
|
|
||||||
|
//package-private for testing
|
||||||
|
OptionParser getParser() {
|
||||||
|
return parser;
|
||||||
|
}
|
||||||
|
}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue