Merge branch 'master' into ccr

This commit is contained in:
Nhat Nguyen 2018-07-13 16:55:57 -04:00
commit e26f3e0c26
165 changed files with 6458 additions and 1384 deletions

View File

@ -0,0 +1,105 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.benchmark.indices.breaker;
import org.openjdk.jmh.annotations.Benchmark;
import org.openjdk.jmh.annotations.BenchmarkMode;
import org.openjdk.jmh.annotations.Fork;
import org.openjdk.jmh.annotations.Measurement;
import org.openjdk.jmh.annotations.Mode;
import org.openjdk.jmh.annotations.OutputTimeUnit;
import org.openjdk.jmh.annotations.Param;
import org.openjdk.jmh.annotations.Scope;
import org.openjdk.jmh.annotations.State;
import org.openjdk.jmh.annotations.Threads;
import org.openjdk.jmh.annotations.Warmup;
import org.openjdk.jmh.infra.Blackhole;
import java.lang.management.ManagementFactory;
import java.lang.management.MemoryMXBean;
import java.util.concurrent.TimeUnit;
@Fork(3)
@Warmup(iterations = 10)
@Measurement(iterations = 10)
@BenchmarkMode(Mode.AverageTime)
@OutputTimeUnit(TimeUnit.MICROSECONDS)
@State(Scope.Benchmark)
@SuppressWarnings("unused") //invoked by benchmarking framework
public class MemoryStatsBenchmark {
private static final MemoryMXBean MEMORY_MX_BEAN = ManagementFactory.getMemoryMXBean();
@Param({"0", "16", "256", "4096"})
private int tokens;
@Benchmark
public void baseline() {
Blackhole.consumeCPU(tokens);
}
@Benchmark
@Threads(1)
public long getMemoryStats_01() {
Blackhole.consumeCPU(tokens);
return MEMORY_MX_BEAN.getHeapMemoryUsage().getUsed();
}
@Benchmark
@Threads(2)
public long getMemoryStats_02() {
Blackhole.consumeCPU(tokens);
return MEMORY_MX_BEAN.getHeapMemoryUsage().getUsed();
}
@Benchmark
@Threads(4)
public long getMemoryStats_04() {
Blackhole.consumeCPU(tokens);
return MEMORY_MX_BEAN.getHeapMemoryUsage().getUsed();
}
@Benchmark
@Threads(8)
public long getMemoryStats_08() {
Blackhole.consumeCPU(tokens);
return MEMORY_MX_BEAN.getHeapMemoryUsage().getUsed();
}
@Benchmark
@Threads(16)
public long getMemoryStats_16() {
Blackhole.consumeCPU(tokens);
return MEMORY_MX_BEAN.getHeapMemoryUsage().getUsed();
}
@Benchmark
@Threads(32)
public long getMemoryStats_32() {
Blackhole.consumeCPU(tokens);
return MEMORY_MX_BEAN.getHeapMemoryUsage().getUsed();
}
@Benchmark
@Threads(64)
public long getMemoryStats_64() {
Blackhole.consumeCPU(tokens);
return MEMORY_MX_BEAN.getHeapMemoryUsage().getUsed();
}
}

View File

@ -1,41 +0,0 @@
package org.elasticsearch.gradle;
import groovy.lang.Closure;
import org.gradle.api.GradleException;
import org.gradle.api.Task;
import org.gradle.api.tasks.Exec;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.util.stream.Collectors;
/**
* A wrapper around gradle's Exec task to capture output and log on error.
*/
public class LoggedExec extends Exec {
protected ByteArrayOutputStream output = new ByteArrayOutputStream();
public LoggedExec() {
if (getLogger().isInfoEnabled() == false) {
setStandardOutput(output);
setErrorOutput(output);
setIgnoreExitValue(true);
doLast(new Closure<Void>(this, this) {
public void doCall(Task it) throws IOException {
if (getExecResult().getExitValue() != 0) {
for (String line : output.toString("UTF-8").split("\\R")) {
getLogger().error(line);
}
throw new GradleException(
"Process \'" + getExecutable() + " " +
getArgs().stream().collect(Collectors.joining(" "))+
"\' finished with non-zero exit value " +
String.valueOf(getExecResult().getExitValue())
);
}
}
});
}
}
}

View File

@ -1,147 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.gradle
import groovy.transform.Sortable
import java.util.regex.Matcher
import org.gradle.api.InvalidUserDataException
/**
* Encapsulates comparison and printing logic for an x.y.z version.
*/
@Sortable(includes=['id'])
public class Version {
final int major
final int minor
final int revision
final int id
final boolean snapshot
/**
* Suffix on the version name.
*/
final String suffix
public Version(int major, int minor, int revision,
String suffix, boolean snapshot) {
this.major = major
this.minor = minor
this.revision = revision
this.snapshot = snapshot
this.suffix = suffix
int suffixOffset = 0
if (suffix.contains("alpha")) {
suffixOffset += Integer.parseInt(suffix.substring(6))
} else if (suffix.contains("beta")) {
suffixOffset += 25 + Integer.parseInt(suffix.substring(5))
} else if (suffix.contains("rc")) {
suffixOffset += 50 + Integer.parseInt(suffix.substring(3));
}
this.id = major * 1000000 + minor * 10000 + revision * 100 + suffixOffset
}
public static Version fromString(String s) {
Matcher m = s =~ /(\d+)\.(\d+)\.(\d+)(-alpha\d+|-beta\d+|-rc\d+)?(-SNAPSHOT)?/
if (m.matches() == false) {
throw new InvalidUserDataException("Invalid version [${s}]")
}
return new Version(m.group(1) as int, m.group(2) as int,
m.group(3) as int, m.group(4) ?: '', m.group(5) != null)
}
@Override
public String toString() {
String snapshotStr = snapshot ? '-SNAPSHOT' : ''
return "${major}.${minor}.${revision}${suffix}${snapshotStr}"
}
public boolean before(Version compareTo) {
return id < compareTo.id
}
public boolean before(String compareTo) {
return before(fromString(compareTo))
}
public boolean onOrBefore(Version compareTo) {
return id <= compareTo.id
}
public boolean onOrBefore(String compareTo) {
return onOrBefore(fromString(compareTo))
}
public boolean onOrAfter(Version compareTo) {
return id >= compareTo.id
}
public boolean onOrAfter(String compareTo) {
return onOrAfter(fromString(compareTo))
}
public boolean after(Version compareTo) {
return id > compareTo.id
}
public boolean after(String compareTo) {
return after(fromString(compareTo))
}
public boolean onOrBeforeIncludingSuffix(Version otherVersion) {
if (id != otherVersion.id) {
return id < otherVersion.id
}
if (suffix == '') {
return otherVersion.suffix == ''
}
return otherVersion.suffix == '' || suffix < otherVersion.suffix
}
boolean equals(o) {
if (this.is(o)) return true
if (getClass() != o.class) return false
Version version = (Version) o
if (id != version.id) return false
if (major != version.major) return false
if (minor != version.minor) return false
if (revision != version.revision) return false
if (snapshot != version.snapshot) return false
if (suffix != version.suffix) return false
return true
}
int hashCode() {
int result
result = major
result = 31 * result + minor
result = 31 * result + revision
result = 31 * result + id
result = 31 * result + (snapshot ? 1 : 0)
result = 31 * result + (suffix != null ? suffix.hashCode() : 0)
return result
}
}

View File

@ -0,0 +1,44 @@
package org.elasticsearch.gradle;
import org.gradle.api.GradleException;
import org.gradle.api.tasks.Exec;
import java.io.ByteArrayOutputStream;
import java.io.UnsupportedEncodingException;
/**
* A wrapper around gradle's Exec task to capture output and log on error.
*/
@SuppressWarnings("unchecked")
public class LoggedExec extends Exec {
protected ByteArrayOutputStream output = new ByteArrayOutputStream();
public LoggedExec() {
if (getLogger().isInfoEnabled() == false) {
setStandardOutput(output);
setErrorOutput(output);
setIgnoreExitValue(true);
doLast((unused) -> {
if (getExecResult().getExitValue() != 0) {
try {
for (String line : output.toString("UTF-8").split("\\R")) {
getLogger().error(line);
}
} catch (UnsupportedEncodingException e) {
throw new GradleException("Failed to read exec output", e);
}
throw new GradleException(
String.format(
"Process '%s %s' finished with non-zero exit value %d",
getExecutable(),
getArgs(),
getExecResult().getExitValue()
)
);
}
}
);
}
}
}

View File

@ -0,0 +1,179 @@
package org.elasticsearch.gradle;
import java.util.Objects;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
/**
* Encapsulates comparison and printing logic for an x.y.z version.
*/
public final class Version implements Comparable<Version> {
private final int major;
private final int minor;
private final int revision;
private final int id;
private final boolean snapshot;
/**
* Suffix on the version name.
*/
private final String suffix;
private static final Pattern pattern =
Pattern.compile("(\\d)+\\.(\\d+)\\.(\\d+)(-alpha\\d+|-beta\\d+|-rc\\d+)?(-SNAPSHOT)?");
public Version(int major, int minor, int revision, String suffix, boolean snapshot) {
Objects.requireNonNull(major, "major version can't be null");
Objects.requireNonNull(minor, "minor version can't be null");
Objects.requireNonNull(revision, "revision version can't be null");
this.major = major;
this.minor = minor;
this.revision = revision;
this.snapshot = snapshot;
this.suffix = suffix == null ? "" : suffix;
int suffixOffset = 0;
if (this.suffix.isEmpty()) {
// no suffix will be considered smaller, uncomment to change that
// suffixOffset = 100;
} else {
if (this.suffix.contains("alpha")) {
suffixOffset += parseSuffixNumber(this.suffix.substring(6));
} else if (this.suffix.contains("beta")) {
suffixOffset += 25 + parseSuffixNumber(this.suffix.substring(5));
} else if (this.suffix.contains("rc")) {
suffixOffset += 50 + parseSuffixNumber(this.suffix.substring(3));
}
else {
throw new IllegalArgumentException("Suffix must contain one of: alpha, beta or rc");
}
}
// currently snapshot is not taken into account
this.id = major * 10000000 + minor * 100000 + revision * 1000 + suffixOffset * 10 /*+ (snapshot ? 1 : 0)*/;
}
private static int parseSuffixNumber(String substring) {
if (substring.isEmpty()) {
throw new IllegalArgumentException("Invalid suffix, must contain a number e.x. alpha2");
}
return Integer.parseInt(substring);
}
public static Version fromString(final String s) {
Objects.requireNonNull(s);
Matcher matcher = pattern.matcher(s);
if (matcher.matches() == false) {
throw new IllegalArgumentException(
"Invalid version format: '" + s + "'. Should be major.minor.revision[-(alpha|beta|rc)Number][-SNAPSHOT]"
);
}
return new Version(
Integer.parseInt(matcher.group(1)),
parseSuffixNumber(matcher.group(2)),
parseSuffixNumber(matcher.group(3)),
matcher.group(4),
matcher.group(5) != null
);
}
@Override
public String toString() {
final String snapshotStr = snapshot ? "-SNAPSHOT" : "";
return String.valueOf(getMajor()) + "." + String.valueOf(getMinor()) + "." + String.valueOf(getRevision()) +
(suffix == null ? "" : suffix) + snapshotStr;
}
public boolean before(Version compareTo) {
return id < compareTo.getId();
}
public boolean before(String compareTo) {
return before(fromString(compareTo));
}
public boolean onOrBefore(Version compareTo) {
return id <= compareTo.getId();
}
public boolean onOrBefore(String compareTo) {
return onOrBefore(fromString(compareTo));
}
public boolean onOrAfter(Version compareTo) {
return id >= compareTo.getId();
}
public boolean onOrAfter(String compareTo) {
return onOrAfter(fromString(compareTo));
}
public boolean after(Version compareTo) {
return id > compareTo.getId();
}
public boolean after(String compareTo) {
return after(fromString(compareTo));
}
public boolean onOrBeforeIncludingSuffix(Version otherVersion) {
if (id != otherVersion.getId()) {
return id < otherVersion.getId();
}
if (suffix.equals("")) {
return otherVersion.getSuffix().equals("");
}
return otherVersion.getSuffix().equals("") || suffix.compareTo(otherVersion.getSuffix()) < 0;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
Version version = (Version) o;
return major == version.major &&
minor == version.minor &&
revision == version.revision &&
id == version.id &&
snapshot == version.snapshot &&
Objects.equals(suffix, version.suffix);
}
@Override
public int hashCode() {
return Objects.hash(major, minor, revision, id, snapshot, suffix);
}
public int getMajor() {
return major;
}
public int getMinor() {
return minor;
}
public int getRevision() {
return revision;
}
protected int getId() {
return id;
}
public boolean isSnapshot() {
return snapshot;
}
public String getSuffix() {
return suffix;
}
@Override
public int compareTo(Version other) {
return Integer.compare(getId(), other.getId());
}
}

View File

@ -1,7 +1,6 @@
package org.elasticsearch.gradle.precommit;
import groovy.lang.Closure;
import org.codehaus.groovy.runtime.ResourceGroovyMethods;
import org.elasticsearch.gradle.LoggedExec;
import org.elasticsearch.test.NamingConventionsCheck;
import org.gradle.api.GradleException;
@ -10,12 +9,12 @@ import org.gradle.api.Task;
import org.gradle.api.file.FileCollection;
import org.gradle.api.plugins.ExtraPropertiesExtension;
import org.gradle.api.plugins.JavaPluginConvention;
import org.gradle.api.tasks.AbstractExecTask;
import org.gradle.api.tasks.Input;
import org.gradle.api.tasks.OutputFile;
import org.gradle.api.tasks.SourceSetContainer;
import java.io.File;
import java.io.FileWriter;
import java.io.IOException;
import java.util.Objects;
@ -24,6 +23,7 @@ import java.util.Objects;
* tests are named according to our conventions so they'll be picked up by
* gradle. Read the Javadoc for NamingConventionsCheck to learn more.
*/
@SuppressWarnings("unchecked")
public class NamingConventionsTask extends LoggedExec {
public NamingConventionsTask() {
setDescription("Tests that test classes aren't misnamed or misplaced");
@ -31,23 +31,23 @@ public class NamingConventionsTask extends LoggedExec {
SourceSetContainer sourceSets = getJavaSourceSets();
final FileCollection classpath = project.files(
// This works because the class only depends on one class from junit that will be available from the
// tests compile classpath. It's the most straight forward way of telling Java where to find the main
// class.
NamingConventionsCheck.class.getProtectionDomain().getCodeSource().getLocation().getPath(),
// the tests to be loaded
checkForTestsInMain ? sourceSets.getByName("main").getRuntimeClasspath() : project.files(),
sourceSets.getByName("test").getCompileClasspath(),
sourceSets.getByName("test").getOutput()
// This works because the class only depends on one class from junit that will be available from the
// tests compile classpath. It's the most straight forward way of telling Java where to find the main
// class.
NamingConventionsCheck.class.getProtectionDomain().getCodeSource().getLocation().getPath(),
// the tests to be loaded
checkForTestsInMain ? sourceSets.getByName("main").getRuntimeClasspath() : project.files(),
sourceSets.getByName("test").getCompileClasspath(),
sourceSets.getByName("test").getOutput()
);
dependsOn(project.getTasks().matching(it -> "testCompileClasspath".equals(it.getName())));
getInputs().files(classpath);
setExecutable(new File(
Objects.requireNonNull(
project.getExtensions().getByType(ExtraPropertiesExtension.class).get("runtimeJavaHome")
).toString(),
"bin/java")
Objects.requireNonNull(
project.getExtensions().getByType(ExtraPropertiesExtension.class).get("runtimeJavaHome")
).toString(),
"bin/java")
);
if (checkForTestsInMain == false) {
@ -61,36 +61,34 @@ public class NamingConventionsTask extends LoggedExec {
* We build the arguments in a funny afterEvaluate/doFirst closure so that we can wait for the classpath to be
* ready for us. Strangely neither one on their own are good enough.
*/
project.afterEvaluate(new Closure<Task>(this, this) {
public Task doCall(Project it) {
return doFirst(new Closure<AbstractExecTask>(NamingConventionsTask.this, NamingConventionsTask.this) {
public AbstractExecTask doCall(Task it) {
args("-Djna.nosys=true");
args("-cp", classpath.getAsPath(), "org.elasticsearch.test.NamingConventionsCheck");
args("--test-class", getTestClass());
if (skipIntegTestInDisguise) {
args("--skip-integ-tests-in-disguise");
} else {
args("--integ-test-class", getIntegTestClass());
}
if (getCheckForTestsInMain()) {
args("--main");
args("--");
} else {
args("--");
}
return args(getExistingClassesDirs().getAsPath());
project.afterEvaluate(new Closure<Void>(this, this) {
public void doCall(Project it) {
doFirst(unused -> {
args("-Djna.nosys=true");
args("-cp", classpath.getAsPath(), "org.elasticsearch.test.NamingConventionsCheck");
args("--test-class", getTestClass());
if (skipIntegTestInDisguise) {
args("--skip-integ-tests-in-disguise");
} else {
args("--integ-test-class", getIntegTestClass());
}
if (getCheckForTestsInMain()) {
args("--main");
args("--");
} else {
args("--");
}
args(getExistingClassesDirs().getAsPath());
});
}
});
doLast(new Closure<Object>(this, this) {
public void doCall(Task it) {
try {
ResourceGroovyMethods.setText(getSuccessMarker(), "", "UTF-8");
} catch (IOException e) {
throw new GradleException("io exception", e);
doLast((Task it) -> {
try {
try (FileWriter fw = new FileWriter(getSuccessMarker())) {
fw.write("");
}
} catch (IOException e) {
throw new GradleException("io exception", e);
}
});
}
@ -101,7 +99,7 @@ public class NamingConventionsTask extends LoggedExec {
public FileCollection getExistingClassesDirs() {
FileCollection classesDirs = getJavaSourceSets().getByName(checkForTestsInMain ? "main" : "test")
.getOutput().getClassesDirs();
.getOutput().getClassesDirs();
return classesDirs.filter(it -> it.exists());
}

View File

@ -69,6 +69,10 @@ public class NamingConventionsCheck {
fail("unsupported argument '" + arg + "'");
}
}
if (rootPathList == null) {
fail("No paths provided");
return;
}
NamingConventionsCheck check = new NamingConventionsCheck(testClass, integTestClass);
for (String rootDir : rootPathList.split(Pattern.quote(File.pathSeparator))) {

View File

@ -8,7 +8,7 @@ class VersionCollectionTests extends GradleUnitTestCase {
String formatVersion(String version) {
return " public static final Version V_${version.replaceAll("\\.", "_")} "
}
def allVersions = [formatVersion('5.0.0'), formatVersion('5.0.0_alpha1'), formatVersion('5.0.0_alpha2'), formatVersion('5.0.0_beta1'),
List<String> allVersions = [formatVersion('5.0.0'), formatVersion('5.0.0_alpha1'), formatVersion('5.0.0_alpha2'), formatVersion('5.0.0_beta1'),
formatVersion('5.0.0_rc1'),formatVersion('5.0.0_rc2'),formatVersion('5.0.1'), formatVersion('5.0.2'),
formatVersion('5.1.1'), formatVersion('5.1.2'), formatVersion('5.2.0'), formatVersion('5.2.1'), formatVersion('6.0.0'),
formatVersion('6.0.1'), formatVersion('6.1.0'), formatVersion('6.1.1'), formatVersion('6.2.0'), formatVersion('6.3.0'),
@ -223,7 +223,8 @@ class VersionCollectionTests extends GradleUnitTestCase {
Version.fromString("5.1.1"), Version.fromString("5.2.0"), Version.fromString("5.2.1"),
Version.fromString("5.3.0"), Version.fromString("5.3.1")]
assertTrue(wireCompatList.containsAll(vc.wireCompatible))
List<Version> compatible = vc.wireCompatible
assertTrue(wireCompatList.containsAll(compatible))
assertTrue(vc.wireCompatible.containsAll(wireCompatList))
assertEquals(vc.snapshotsIndexCompatible.size(), 1)

View File

@ -0,0 +1,181 @@
package org.elasticsearch.gradle;
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import org.elasticsearch.gradle.test.GradleUnitTestCase;
import org.junit.Rule;
import org.junit.rules.ExpectedException;
import java.util.Arrays;
import java.util.HashSet;
import java.util.Set;
public class VersionTests extends GradleUnitTestCase {
@Rule
public ExpectedException expectedEx = ExpectedException.none();
public void testVersionParsing() {
assertVersionEquals("7.0.1", 7, 0, 1, "", false);
assertVersionEquals("7.0.1-alpha2", 7, 0, 1, "-alpha2", false);
assertVersionEquals("5.1.2-rc3", 5, 1, 2, "-rc3", false);
assertVersionEquals("6.1.2-SNAPSHOT", 6, 1, 2, "", true);
assertVersionEquals("6.1.2-beta1-SNAPSHOT", 6, 1, 2, "-beta1", true);
}
public void testCompareWithStringVersions() {
assertTrue("1.10.20 is not interpreted as before 2.0.0",
Version.fromString("1.10.20").before("2.0.0")
);
assertTrue("7.0.0-alpha1 is not interpreted as before 7.0.0-alpha2",
Version.fromString("7.0.0-alpha1").before("7.0.0-alpha2")
);
assertTrue("7.0.0-alpha1 should be equal to 7.0.0-alpha1",
Version.fromString("7.0.0-alpha1").equals(Version.fromString("7.0.0-alpha1"))
);
assertTrue("7.0.0-SNAPSHOT should be equal to 7.0.0-SNAPSHOT",
Version.fromString("7.0.0-SNAPSHOT").equals(Version.fromString("7.0.0-SNAPSHOT"))
);
assertEquals(Version.fromString("5.2.1-SNAPSHOT"), Version.fromString("5.2.1-SNAPSHOT"));
}
public void testCollections() {
assertTrue(
Arrays.asList(
Version.fromString("5.2.0"), Version.fromString("5.2.1-SNAPSHOT"), Version.fromString("6.0.0"),
Version.fromString("6.0.1"), Version.fromString("6.1.0")
).containsAll(Arrays.asList(
Version.fromString("6.0.1"), Version.fromString("5.2.1-SNAPSHOT")
))
);
Set<Version> versions = new HashSet<>();
versions.addAll(Arrays.asList(
Version.fromString("5.2.0"), Version.fromString("5.2.1-SNAPSHOT"), Version.fromString("6.0.0"),
Version.fromString("6.0.1"), Version.fromString("6.1.0")
));
Set<Version> subset = new HashSet<>();
subset.addAll(Arrays.asList(
Version.fromString("6.0.1"), Version.fromString("5.2.1-SNAPSHOT")
));
assertTrue(versions.containsAll(subset));
}
public void testToString() {
assertEquals("7.0.1", new Version(7, 0, 1, null, false).toString());
}
public void testCompareVersions() {
assertEquals(0, new Version(7, 0, 0, null, true).compareTo(
new Version(7, 0, 0, null, true)
));
assertEquals(0, new Version(7, 0, 0, null, true).compareTo(
new Version(7, 0, 0, "", true)
));
// snapshot is not taken into account TODO inconsistent with equals
assertEquals(
0,
new Version(7, 0, 0, "", false).compareTo(
new Version(7, 0, 0, null, true))
);
// without sufix is smaller than with TODO
assertOrder(
new Version(7, 0, 0, null, false),
new Version(7, 0, 0, "-alpha1", false)
);
// numbered sufix
assertOrder(
new Version(7, 0, 0, "-alpha1", false),
new Version(7, 0, 0, "-alpha2", false)
);
// ranked sufix
assertOrder(
new Version(7, 0, 0, "-alpha8", false),
new Version(7, 0, 0, "-rc1", false)
);
// ranked sufix
assertOrder(
new Version(7, 0, 0, "-alpha8", false),
new Version(7, 0, 0, "-beta1", false)
);
// ranked sufix
assertOrder(
new Version(7, 0, 0, "-beta8", false),
new Version(7, 0, 0, "-rc1", false)
);
// major takes precedence
assertOrder(
new Version(6, 10, 10, "-alpha8", true),
new Version(7, 0, 0, "-alpha2", false)
);
// then minor
assertOrder(
new Version(7, 0, 10, "-alpha8", true),
new Version(7, 1, 0, "-alpha2", false)
);
// then revision
assertOrder(
new Version(7, 1, 0, "-alpha8", true),
new Version(7, 1, 10, "-alpha2", false)
);
}
public void testExceptionEmpty() {
expectedEx.expect(IllegalArgumentException.class);
expectedEx.expectMessage("Invalid version format");
Version.fromString("");
}
public void testExceptionSyntax() {
expectedEx.expect(IllegalArgumentException.class);
expectedEx.expectMessage("Invalid version format");
Version.fromString("foo.bar.baz");
}
public void testExceptionSuffixNumber() {
expectedEx.expect(IllegalArgumentException.class);
expectedEx.expectMessage("Invalid suffix");
new Version(7, 1, 1, "-alpha", true);
}
public void testExceptionSuffix() {
expectedEx.expect(IllegalArgumentException.class);
expectedEx.expectMessage("Suffix must contain one of:");
new Version(7, 1, 1, "foo1", true);
}
private void assertOrder(Version smaller, Version bigger) {
assertEquals(smaller + " should be smaller than " + bigger, -1, smaller.compareTo(bigger));
}
private void assertVersionEquals(String stringVersion, int major, int minor, int revision, String sufix, boolean snapshot) {
Version version = Version.fromString(stringVersion);
assertEquals(major, version.getMajor());
assertEquals(minor, version.getMinor());
assertEquals(revision, version.getRevision());
if (snapshot) {
assertTrue("Expected version to be a snapshot but it was not", version.isSnapshot());
} else {
assertFalse("Expected version not to be a snapshot but it was", version.isSnapshot());
}
assertEquals(sufix, version.getSuffix());
}
}

View File

@ -106,6 +106,7 @@ import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.index.VersionType;
import org.elasticsearch.index.rankeval.RankEvalRequest;
import org.elasticsearch.protocol.xpack.XPackInfoRequest;
import org.elasticsearch.protocol.xpack.XPackUsageRequest;
import org.elasticsearch.rest.action.search.RestSearchAction;
import org.elasticsearch.script.mustache.MultiSearchTemplateRequest;
import org.elasticsearch.script.mustache.SearchTemplateRequest;
@ -1096,6 +1097,13 @@ final class RequestConverters {
return request;
}
static Request xpackUsage(XPackUsageRequest usageRequest) {
Request request = new Request(HttpGet.METHOD_NAME, "/_xpack/usage");
Params parameters = new Params(request);
parameters.withMasterTimeout(usageRequest.masterNodeTimeout());
return request;
}
private static HttpEntity createEntity(ToXContent toXContent, XContentType xContentType) throws IOException {
BytesRef source = XContentHelper.toXContent(toXContent, xContentType, false).toBytesRef();
return new ByteArrayEntity(source.bytes, source.offset, source.length, createContentType(xContentType));

View File

@ -85,8 +85,10 @@ import org.elasticsearch.search.aggregations.bucket.geogrid.GeoGridAggregationBu
import org.elasticsearch.search.aggregations.bucket.geogrid.ParsedGeoHashGrid;
import org.elasticsearch.search.aggregations.bucket.global.GlobalAggregationBuilder;
import org.elasticsearch.search.aggregations.bucket.global.ParsedGlobal;
import org.elasticsearch.search.aggregations.bucket.histogram.AutoDateHistogramAggregationBuilder;
import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramAggregationBuilder;
import org.elasticsearch.search.aggregations.bucket.histogram.HistogramAggregationBuilder;
import org.elasticsearch.search.aggregations.bucket.histogram.ParsedAutoDateHistogram;
import org.elasticsearch.search.aggregations.bucket.histogram.ParsedDateHistogram;
import org.elasticsearch.search.aggregations.bucket.histogram.ParsedHistogram;
import org.elasticsearch.search.aggregations.bucket.missing.MissingAggregationBuilder;
@ -1004,6 +1006,7 @@ public class RestHighLevelClient implements Closeable {
map.put(GeoCentroidAggregationBuilder.NAME, (p, c) -> ParsedGeoCentroid.fromXContent(p, (String) c));
map.put(HistogramAggregationBuilder.NAME, (p, c) -> ParsedHistogram.fromXContent(p, (String) c));
map.put(DateHistogramAggregationBuilder.NAME, (p, c) -> ParsedDateHistogram.fromXContent(p, (String) c));
map.put(AutoDateHistogramAggregationBuilder.NAME, (p, c) -> ParsedAutoDateHistogram.fromXContent(p, (String) c));
map.put(StringTerms.NAME, (p, c) -> ParsedStringTerms.fromXContent(p, (String) c));
map.put(LongTerms.NAME, (p, c) -> ParsedLongTerms.fromXContent(p, (String) c));
map.put(DoubleTerms.NAME, (p, c) -> ParsedDoubleTerms.fromXContent(p, (String) c));

View File

@ -176,7 +176,7 @@ public final class SnapshotClient {
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-snapshots.html"> Snapshot and Restore
* API on elastic.co</a>
*/
public CreateSnapshotResponse createSnapshot(CreateSnapshotRequest createSnapshotRequest, RequestOptions options)
public CreateSnapshotResponse create(CreateSnapshotRequest createSnapshotRequest, RequestOptions options)
throws IOException {
return restHighLevelClient.performRequestAndParseEntity(createSnapshotRequest, RequestConverters::createSnapshot, options,
CreateSnapshotResponse::fromXContent, emptySet());
@ -188,7 +188,7 @@ public final class SnapshotClient {
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-snapshots.html"> Snapshot and Restore
* API on elastic.co</a>
*/
public void createSnapshotAsync(CreateSnapshotRequest createSnapshotRequest, RequestOptions options,
public void createAsync(CreateSnapshotRequest createSnapshotRequest, RequestOptions options,
ActionListener<CreateSnapshotResponse> listener) {
restHighLevelClient.performRequestAsyncAndParseEntity(createSnapshotRequest, RequestConverters::createSnapshot, options,
CreateSnapshotResponse::fromXContent, listener, emptySet());

View File

@ -22,6 +22,8 @@ package org.elasticsearch.client;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.protocol.xpack.XPackInfoRequest;
import org.elasticsearch.protocol.xpack.XPackInfoResponse;
import org.elasticsearch.protocol.xpack.XPackUsageRequest;
import org.elasticsearch.protocol.xpack.XPackUsageResponse;
import java.io.IOException;
@ -70,4 +72,25 @@ public final class XPackClient {
restHighLevelClient.performRequestAsyncAndParseEntity(request, RequestConverters::xPackInfo, options,
XPackInfoResponse::fromXContent, listener, emptySet());
}
/**
* Fetch usage information about X-Pack features from the cluster.
* @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized
* @return the response
* @throws IOException in case there is a problem sending the request or parsing back the response
*/
public XPackUsageResponse usage(XPackUsageRequest request, RequestOptions options) throws IOException {
return restHighLevelClient.performRequestAndParseEntity(request, RequestConverters::xpackUsage, options,
XPackUsageResponse::fromXContent, emptySet());
}
/**
* Asynchronously fetch usage information about X-Pack features from the cluster.
* @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized
* @param listener the listener to be notified upon request completion
*/
public void usageAsync(XPackUsageRequest request, RequestOptions options, ActionListener<XPackUsageResponse> listener) {
restHighLevelClient.performRequestAsyncAndParseEntity(request, RequestConverters::xpackUsage, options,
XPackUsageResponse::fromXContent, listener, emptySet());
}
}

View File

@ -61,8 +61,8 @@ public class SnapshotIT extends ESRestHighLevelClientTestCase {
private CreateSnapshotResponse createTestSnapshot(CreateSnapshotRequest createSnapshotRequest) throws IOException {
// assumes the repository already exists
return execute(createSnapshotRequest, highLevelClient().snapshot()::createSnapshot,
highLevelClient().snapshot()::createSnapshotAsync);
return execute(createSnapshotRequest, highLevelClient().snapshot()::create,
highLevelClient().snapshot()::createAsync);
}
public void testCreateRepository() throws IOException {

View File

@ -35,12 +35,17 @@ import org.elasticsearch.protocol.xpack.XPackInfoResponse;
import org.elasticsearch.protocol.xpack.XPackInfoResponse.BuildInfo;
import org.elasticsearch.protocol.xpack.XPackInfoResponse.FeatureSetsInfo;
import org.elasticsearch.protocol.xpack.XPackInfoResponse.LicenseInfo;
import org.elasticsearch.protocol.xpack.XPackUsageRequest;
import org.elasticsearch.protocol.xpack.XPackUsageResponse;
import java.io.IOException;
import java.util.EnumSet;
import java.util.Map;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import static org.hamcrest.Matchers.is;
/**
* Documentation for miscellaneous APIs in the high level java client.
* Code wrapped in {@code tag} and {@code end} tags is included in the docs.
@ -129,6 +134,50 @@ public class MiscellaneousDocumentationIT extends ESRestHighLevelClientTestCase
}
}
public void testXPackUsage() throws Exception {
RestHighLevelClient client = highLevelClient();
{
//tag::x-pack-usage-execute
XPackUsageRequest request = new XPackUsageRequest();
XPackUsageResponse response = client.xpack().usage(request, RequestOptions.DEFAULT);
//end::x-pack-usage-execute
//tag::x-pack-usage-response
Map<String, Map<String, Object>> usages = response.getUsages();
Map<String, Object> monitoringUsage = usages.get("monitoring");
assertThat(monitoringUsage.get("available"), is(true));
assertThat(monitoringUsage.get("enabled"), is(true));
assertThat(monitoringUsage.get("collection_enabled"), is(false));
//end::x-pack-usage-response
}
{
XPackUsageRequest request = new XPackUsageRequest();
// tag::x-pack-usage-execute-listener
ActionListener<XPackUsageResponse> listener = new ActionListener<XPackUsageResponse>() {
@Override
public void onResponse(XPackUsageResponse response) {
// <1>
}
@Override
public void onFailure(Exception e) {
// <2>
}
};
// end::x-pack-usage-execute-listener
// Replace the empty listener by a blocking listener in test
final CountDownLatch latch = new CountDownLatch(1);
listener = new LatchedActionListener<>(listener, latch);
// tag::x-pack-usage-execute-async
client.xpack().usageAsync(request, RequestOptions.DEFAULT, listener); // <1>
// end::x-pack-usage-execute-async
assertTrue(latch.await(30L, TimeUnit.SECONDS));
}
}
public void testInitializationFromClientBuilder() throws IOException {
//tag::rest-high-level-client-init
RestHighLevelClient client = new RestHighLevelClient(

View File

@ -295,6 +295,7 @@ public class SearchDocumentationIT extends ESRestHighLevelClientTestCase {
}
@SuppressWarnings({ "unused" })
@AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/32029")
public void testSearchRequestAggregations() throws IOException {
RestHighLevelClient client = highLevelClient();
{
@ -829,21 +830,21 @@ public class SearchDocumentationIT extends ESRestHighLevelClientTestCase {
assertTrue(latch.await(30L, TimeUnit.SECONDS));
}
public void testMultiSearchTemplateWithInlineScript() throws Exception {
indexSearchTestData();
RestHighLevelClient client = highLevelClient();
// tag::multi-search-template-request-inline
String [] searchTerms = {"elasticsearch", "logstash", "kibana"};
MultiSearchTemplateRequest multiRequest = new MultiSearchTemplateRequest(); // <1>
for (String searchTerm : searchTerms) {
SearchTemplateRequest request = new SearchTemplateRequest(); // <2>
request.setRequest(new SearchRequest("posts"));
request.setRequest(new SearchRequest("posts"));
request.setScriptType(ScriptType.INLINE);
request.setScript(
request.setScript(
"{" +
" \"query\": { \"match\" : { \"{{field}}\" : \"{{value}}\" } }," +
" \"size\" : \"{{size}}\"" +
@ -854,15 +855,15 @@ public class SearchDocumentationIT extends ESRestHighLevelClientTestCase {
scriptParams.put("value", searchTerm);
scriptParams.put("size", 5);
request.setScriptParams(scriptParams);
multiRequest.add(request); // <3>
multiRequest.add(request); // <3>
}
// end::multi-search-template-request-inline
// tag::multi-search-template-request-sync
MultiSearchTemplateResponse multiResponse = client.multiSearchTemplate(multiRequest, RequestOptions.DEFAULT);
// end::multi-search-template-request-sync
// tag::multi-search-template-response
for (Item item : multiResponse.getResponses()) { // <1>
if (item.isFailure()) {
@ -882,7 +883,7 @@ public class SearchDocumentationIT extends ESRestHighLevelClientTestCase {
assertTrue(searchResponse.getHits().totalHits > 0);
}
public void testMultiSearchTemplateWithStoredScript() throws Exception {
indexSearchTestData();
RestHighLevelClient client = highLevelClient();
@ -892,16 +893,16 @@ public class SearchDocumentationIT extends ESRestHighLevelClientTestCase {
// tag::multi-search-template-request-stored
MultiSearchTemplateRequest multiRequest = new MultiSearchTemplateRequest();
String [] searchTerms = {"elasticsearch", "logstash", "kibana"};
for (String searchTerm : searchTerms) {
SearchTemplateRequest request = new SearchTemplateRequest();
request.setRequest(new SearchRequest("posts"));
request.setScriptType(ScriptType.STORED);
request.setScript("title_search");
Map<String, Object> params = new HashMap<>();
params.put("field", "title");
params.put("value", searchTerm);
@ -911,8 +912,8 @@ public class SearchDocumentationIT extends ESRestHighLevelClientTestCase {
}
// end::multi-search-template-request-stored
// tag::multi-search-template-execute
MultiSearchTemplateResponse multiResponse = client.multiSearchTemplate(multiRequest, RequestOptions.DEFAULT);
@ -966,7 +967,7 @@ public class SearchDocumentationIT extends ESRestHighLevelClientTestCase {
// end::register-script
assertEquals(RestStatus.OK.getStatus(), scriptResponse.getStatusLine().getStatusCode());
}
public void testExplain() throws Exception {
indexSearchTestData();

View File

@ -425,7 +425,7 @@ public class SnapshotClientDocumentationIT extends ESRestHighLevelClientTestCase
// end::create-snapshot-request-waitForCompletion
// tag::create-snapshot-execute
CreateSnapshotResponse response = client.snapshot().createSnapshot(request, RequestOptions.DEFAULT);
CreateSnapshotResponse response = client.snapshot().create(request, RequestOptions.DEFAULT);
// end::create-snapshot-execute
// tag::create-snapshot-response
@ -433,6 +433,12 @@ public class SnapshotClientDocumentationIT extends ESRestHighLevelClientTestCase
// end::create-snapshot-response
assertEquals(RestStatus.OK, status);
// tag::create-snapshot-response-snapshot-info
SnapshotInfo snapshotInfo = response.getSnapshotInfo(); // <1>
// end::create-snapshot-response-snapshot-info
assertNotNull(snapshotInfo);
}
public void testSnapshotCreateAsync() throws InterruptedException {
@ -460,7 +466,7 @@ public class SnapshotClientDocumentationIT extends ESRestHighLevelClientTestCase
listener = new LatchedActionListener<>(listener, latch);
// tag::create-snapshot-execute-async
client.snapshot().createSnapshotAsync(request, RequestOptions.DEFAULT, listener); // <1>
client.snapshot().createAsync(request, RequestOptions.DEFAULT, listener); // <1>
// end::create-snapshot-execute-async
assertTrue(latch.await(30L, TimeUnit.SECONDS));

View File

@ -0,0 +1,54 @@
[[java-rest-high-x-pack-usage]]
=== X-Pack Usage API
[[java-rest-high-x-pack-usage-execution]]
==== Execution
Detailed information about the usage of features from {xpack} can be
retrieved using the `usage()` method:
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{doc-tests}/MiscellaneousDocumentationIT.java[x-pack-usage-execute]
--------------------------------------------------
[[java-rest-high-x-pack-info-response]]
==== Response
The returned `XPackUsageResponse` contains a `Map` keyed by feature name.
Every feature map has an `available` key, indicating whether that
feature is available given the current license, and an `enabled` key,
indicating whether that feature is currently enabled. Other keys
are specific to each feature.
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{doc-tests}/MiscellaneousDocumentationIT.java[x-pack-usage-response]
--------------------------------------------------
[[java-rest-high-x-pack-usage-async]]
==== Asynchronous Execution
This request can be executed asynchronously:
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{doc-tests}/MiscellaneousDocumentationIT.java[x-pack-usage-execute-async]
--------------------------------------------------
<1> The call to execute the usage api and the `ActionListener` to use when
the execution completes
The asynchronous method does not block and returns immediately. Once it is
completed the `ActionListener` is called back using the `onResponse` method
if the execution successfully completed or using the `onFailure` method if
it failed.
A typical listener for `XPackUsageResponse` looks like:
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{doc-tests}/MiscellaneousDocumentationIT.java[x-pack-usage-execute-listener]
--------------------------------------------------
<1> Called when the execution is successfully completed. The response is
provided as an argument
<2> Called in case of failure. The raised exception is provided as an argument

View File

@ -73,11 +73,22 @@ include-tagged::{doc-tests}/SnapshotClientDocumentationIT.java[create-snapshot-r
[[java-rest-high-snapshot-create-snapshot-sync]]
==== Synchronous Execution
Execute a `CreateSnapshotRequest` synchronously to receive a `CreateSnapshotResponse`.
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{doc-tests}/SnapshotClientDocumentationIT.java[create-snapshot-execute]
--------------------------------------------------
Retrieve the `SnapshotInfo` from a `CreateSnapshotResponse` when the snapshot is fully created.
(The `waitForCompletion` parameter is `true`).
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{doc-tests}/SnapshotClientDocumentationIT.java[create-snapshot-response-snapshot-info]
--------------------------------------------------
<1> The `SnapshotInfo` object.
[[java-rest-high-snapshot-create-snapshot-async]]
==== Asynchronous Execution

View File

@ -19,6 +19,8 @@ the limit will fail with an exception.
include::bucket/adjacency-matrix-aggregation.asciidoc[]
include::bucket/autodatehistogram-aggregation.asciidoc[]
include::bucket/children-aggregation.asciidoc[]
include::bucket/composite-aggregation.asciidoc[]

View File

@ -0,0 +1,283 @@
[[search-aggregations-bucket-autodatehistogram-aggregation]]
=== Auto-interval Date Histogram Aggregation
A multi-bucket aggregation similar to the <<search-aggregations-bucket-datehistogram-aggregation>> except
instead of providing an interval to use as the width of each bucket, a target number of buckets is provided
indicating the number of buckets needed and the interval of the buckets is automatically chosen to best achieve
that target. The number of buckets returned will always be less than or equal to this target number.
The buckets field is optional, and will default to 10 buckets if not specified.
Requesting a target of 10 buckets.
[source,js]
--------------------------------------------------
POST /sales/_search?size=0
{
"aggs" : {
"sales_over_time" : {
"auto_date_histogram" : {
"field" : "date",
"buckets" : 10
}
}
}
}
--------------------------------------------------
// CONSOLE
// TEST[setup:sales]
==== Keys
Internally, a date is represented as a 64 bit number representing a timestamp
in milliseconds-since-the-epoch. These timestamps are returned as the bucket
++key++s. The `key_as_string` is the same timestamp converted to a formatted
date string using the format specified with the `format` parameter:
TIP: If no `format` is specified, then it will use the first date
<<mapping-date-format,format>> specified in the field mapping.
[source,js]
--------------------------------------------------
POST /sales/_search?size=0
{
"aggs" : {
"sales_over_time" : {
"auto_date_histogram" : {
"field" : "date",
"buckets" : 5,
"format" : "yyyy-MM-dd" <1>
}
}
}
}
--------------------------------------------------
// CONSOLE
// TEST[setup:sales]
<1> Supports expressive date <<date-format-pattern,format pattern>>
Response:
[source,js]
--------------------------------------------------
{
...
"aggregations": {
"sales_over_time": {
"buckets": [
{
"key_as_string": "2015-01-01",
"key": 1420070400000,
"doc_count": 3
},
{
"key_as_string": "2015-02-01",
"key": 1422748800000,
"doc_count": 2
},
{
"key_as_string": "2015-03-01",
"key": 1425168000000,
"doc_count": 2
}
]
}
}
}
--------------------------------------------------
// TESTRESPONSE[s/\.\.\./"took": $body.took,"timed_out": false,"_shards": $body._shards,"hits": $body.hits,/]
=== Intervals
The interval of the returned buckets is selected based on the data collected by the
aggregation so that the number of buckets returned is less than or equal to the number
requested. The possible intervals returned are:
[horizontal]
seconds:: In multiples of 1, 5, 10 and 30
minutes:: In multiples of 1, 5, 10 and 30
hours:: In multiples of 1, 3 and 12
days:: In multiples of 1, and 7
months:: In multiples of 1, and 3
years:: In multiples of 1, 5, 10, 20, 50 and 100
In the worst case, where the number of daily buckets are too many for the requested
number of buckets, the number of buckets returned will be 1/7th of the number of
buckets requested.
==== Time Zone
Date-times are stored in Elasticsearch in UTC. By default, all bucketing and
rounding is also done in UTC. The `time_zone` parameter can be used to indicate
that bucketing should use a different time zone.
Time zones may either be specified as an ISO 8601 UTC offset (e.g. `+01:00` or
`-08:00`) or as a timezone id, an identifier used in the TZ database like
`America/Los_Angeles`.
Consider the following example:
[source,js]
---------------------------------
PUT my_index/log/1?refresh
{
"date": "2015-10-01T00:30:00Z"
}
PUT my_index/log/2?refresh
{
"date": "2015-10-01T01:30:00Z"
}
PUT my_index/log/3?refresh
{
"date": "2015-10-01T02:30:00Z"
}
GET my_index/_search?size=0
{
"aggs": {
"by_day": {
"auto_date_histogram": {
"field": "date",
"buckets" : 3
}
}
}
}
---------------------------------
// CONSOLE
UTC is used if no time zone is specified, three 1-hour buckets are returned
starting at midnight UTC on 1 October 2015:
[source,js]
---------------------------------
{
...
"aggregations": {
"by_day": {
"buckets": [
{
"key_as_string": "2015-10-01T00:00:00.000Z",
"key": 1443657600000,
"doc_count": 1
},
{
"key_as_string": "2015-10-01T01:00:00.000Z",
"key": 1443661200000,
"doc_count": 1
},
{
"key_as_string": "2015-10-01T02:00:00.000Z",
"key": 1443664800000,
"doc_count": 1
}
]
}
}
}
---------------------------------
// TESTRESPONSE[s/\.\.\./"took": $body.took,"timed_out": false,"_shards": $body._shards,"hits": $body.hits,/]
If a `time_zone` of `-01:00` is specified, then midnight starts at one hour before
midnight UTC:
[source,js]
---------------------------------
GET my_index/_search?size=0
{
"aggs": {
"by_day": {
"auto_date_histogram": {
"field": "date",
"buckets" : 3,
"time_zone": "-01:00"
}
}
}
}
---------------------------------
// CONSOLE
// TEST[continued]
Now three 1-hour buckets are still returned but the first bucket starts at
11:00pm on 30 September 2015 since that is the local time for the bucket in
the specified time zone.
[source,js]
---------------------------------
{
...
"aggregations": {
"by_day": {
"buckets": [
{
"key_as_string": "2015-09-30T23:00:00.000-01:00", <1>
"key": 1443657600000,
"doc_count": 1
},
{
"key_as_string": "2015-10-01T00:00:00.000-01:00",
"key": 1443661200000,
"doc_count": 1
},
{
"key_as_string": "2015-10-01T01:00:00.000-01:00",
"key": 1443664800000,
"doc_count": 1
}
]
}
}
}
---------------------------------
// TESTRESPONSE[s/\.\.\./"took": $body.took,"timed_out": false,"_shards": $body._shards,"hits": $body.hits,/]
<1> The `key_as_string` value represents midnight on each day
in the specified time zone.
WARNING: When using time zones that follow DST (daylight savings time) changes,
buckets close to the moment when those changes happen can have slightly different
sizes than neighbouring buckets.
For example, consider a DST start in the `CET` time zone: on 27 March 2016 at 2am,
clocks were turned forward 1 hour to 3am local time. If the result of the aggregation
was daily buckets, the bucket covering that day will only hold data for 23 hours
instead of the usual 24 hours for other buckets. The same is true for shorter intervals
like e.g. 12h. Here, we will have only a 11h bucket on the morning of 27 March when the
DST shift happens.
==== Scripts
Like with the normal <<search-aggregations-bucket-datehistogram-aggregation, `date_histogram`>>, both document level
scripts and value level scripts are supported. This aggregation does not however, support the `min_doc_count`,
`extended_bounds` and `order` parameters.
==== Missing value
The `missing` parameter defines how documents that are missing a value should be treated.
By default they will be ignored but it is also possible to treat them as if they
had a value.
[source,js]
--------------------------------------------------
POST /sales/_search?size=0
{
"aggs" : {
"sale_date" : {
"auto_date_histogram" : {
"field" : "date",
"buckets": 10,
"missing": "2000/01/01" <1>
}
}
}
}
--------------------------------------------------
// CONSOLE
// TEST[setup:sales]
<1> Documents without a value in the `publish_date` field will fall into the same bucket as documents that have the value `2000-01-01`.

View File

@ -104,10 +104,13 @@ With that out of the way, let's get started with the fun part...
== Installation
[TIP]
==============
You can skip installation completely by using our hosted
Elasticsearch Service on https://www.elastic.co/cloud[Elastic Cloud], which is
available on AWS and GCP. You can
https://www.elastic.co/cloud/elasticsearch-service/signup[try out the hosted service] for free.
==============
Elasticsearch requires at least Java 8. Specifically as of this writing, it is recommended that you use the Oracle JDK version {jdk}. Java installation varies from platform to platform so we won't go into those details here. Oracle's recommended installation documentation can be found on http://docs.oracle.com/javase/8/docs/technotes/guides/install/install_overview.html[Oracle's website]. Suffice to say, before you install Elasticsearch, please check your Java version first by running (and then install/upgrade accordingly if needed):

View File

@ -69,4 +69,12 @@ The following previously deprecated url parameter have been removed:
Previously the in flight requests circuit breaker considered only the raw byte representation.
By bumping the value of `network.breaker.inflight_requests.overhead` from 1 to 2, this circuit
breaker considers now also the memory overhead of representing the request as a structured object.
breaker considers now also the memory overhead of representing the request as a structured object.
==== Parent circuit breaker changes
The parent circuit breaker defines a new setting `indices.breaker.total.use_real_memory` which is
`true` by default. This means that the parent circuit breaker will trip based on currently used
heap memory instead of only considering the reserved memory by child circuit breakers. When this
setting is `true`, the default parent breaker limit also changes from 70% to 95% of the JVM heap size.
The previous behavior can be restored by setting `indices.breaker.total.use_real_memory` to `false`.

View File

@ -18,3 +18,8 @@ See {plugins}/repository-azure-repository-settings.html#repository-azure-reposit
must now be specified in the client settings instead.
See {plugins}/repository-gcs-client.html#repository-gcs-client[Google Cloud Storage Client Settings].
==== Analysis Plugin changes
* The misspelled helper method `requriesAnalysisSettings(AnalyzerProvider<T> provider)` has been
renamed to `requiresAnalysisSettings`

View File

@ -13,11 +13,18 @@ These settings can be dynamically updated on a live cluster with the
[float]
==== Parent circuit breaker
The parent-level breaker can be configured with the following setting:
The parent-level breaker can be configured with the following settings:
`indices.breaker.total.use_real_memory`::
Whether the parent breaker should take real memory usage into account (`true`) or only
consider the amount that is reserved by child circuit breakers (`false`). Defaults to `true`.
`indices.breaker.total.limit`::
Starting limit for overall parent breaker, defaults to 70% of JVM heap.
Starting limit for overall parent breaker, defaults to 70% of JVM heap if
`indices.breaker.total.use_real_memory` is `false`. If `indices.breaker.total.use_real_memory`
is `true`, defaults to 95% of the JVM heap.
[[fielddata-circuit-breaker]]
[float]

View File

@ -116,3 +116,105 @@ The default is based on the number of data nodes and the default search thread p
WARNING: `collapse` cannot be used in conjunction with <<search-request-scroll, scroll>>,
<<search-request-rescore, rescore>> or <<search-request-search-after, search after>>.
==== Second level of collapsing
Second level of collapsing is also supported and is applied to `inner_hits`.
For example, the following request finds the top scored tweets for
each country, and within each country finds the top scored tweets
for each user.
[source,js]
--------------------------------------------------
GET /twitter/_search
{
"query": {
"match": {
"message": "elasticsearch"
}
},
"collapse" : {
"field" : "country",
"inner_hits" : {
"name": "by_location",
"collapse" : {"field" : "user"},
"size": 3
}
}
}
--------------------------------------------------
// NOTCONSOLE
Response:
[source,js]
--------------------------------------------------
{
...
"hits": [
{
"_index": "twitter",
"_type": "_doc",
"_id": "9",
"_score": ...,
"_source": {...},
"fields": {"country": ["UK"]},
"inner_hits":{
"by_location": {
"hits": {
...,
"hits": [
{
...
"fields": {"user" : ["user124"]}
},
{
...
"fields": {"user" : ["user589"]}
},
{
...
"fields": {"user" : ["user001"]}
}
]
}
}
}
},
{
"_index": "twitter",
"_type": "_doc",
"_id": "1",
"_score": ..,
"_source": {...},
"fields": {"country": ["Canada"]},
"inner_hits":{
"by_location": {
"hits": {
...,
"hits": [
{
...
"fields": {"user" : ["user444"]}
},
{
...
"fields": {"user" : ["user1111"]}
},
{
...
"fields": {"user" : ["user999"]}
}
]
}
}
}
},
....
]
}
--------------------------------------------------
// NOTCONSOLE
NOTE: Second level of of collapsing doesn't allow `inner_hits`.

View File

@ -1,11 +1,16 @@
[[install-elasticsearch]]
== Installing Elasticsearch
[float]
=== Hosted Elasticsearch
Elasticsearch can be run on your own hardware or using our hosted
Elasticsearch Service on https://www.elastic.co/cloud[Elastic Cloud], which is
available on AWS and GCP. You can
https://www.elastic.co/cloud/elasticsearch-service/signup[try out the hosted service] for free.
[float]
=== Installing Elasticsearch Yourself
Elasticsearch is provided in the following package formats:
[horizontal]

View File

@ -135,7 +135,7 @@ import java.util.List;
import java.util.Map;
import java.util.TreeMap;
import static org.elasticsearch.plugins.AnalysisPlugin.requriesAnalysisSettings;
import static org.elasticsearch.plugins.AnalysisPlugin.requiresAnalysisSettings;
public class CommonAnalysisPlugin extends Plugin implements AnalysisPlugin {
@ -201,11 +201,11 @@ public class CommonAnalysisPlugin extends Plugin implements AnalysisPlugin {
filters.put("cjk_width", CJKWidthFilterFactory::new);
filters.put("classic", ClassicFilterFactory::new);
filters.put("czech_stem", CzechStemTokenFilterFactory::new);
filters.put("common_grams", requriesAnalysisSettings(CommonGramsTokenFilterFactory::new));
filters.put("common_grams", requiresAnalysisSettings(CommonGramsTokenFilterFactory::new));
filters.put("decimal_digit", DecimalDigitFilterFactory::new);
filters.put("delimited_payload_filter", LegacyDelimitedPayloadTokenFilterFactory::new);
filters.put("delimited_payload", DelimitedPayloadTokenFilterFactory::new);
filters.put("dictionary_decompounder", requriesAnalysisSettings(DictionaryCompoundWordTokenFilterFactory::new));
filters.put("dictionary_decompounder", requiresAnalysisSettings(DictionaryCompoundWordTokenFilterFactory::new));
filters.put("dutch_stem", DutchStemTokenFilterFactory::new);
filters.put("edge_ngram", EdgeNGramTokenFilterFactory::new);
filters.put("edgeNGram", EdgeNGramTokenFilterFactory::new);
@ -216,11 +216,11 @@ public class CommonAnalysisPlugin extends Plugin implements AnalysisPlugin {
filters.put("german_normalization", GermanNormalizationFilterFactory::new);
filters.put("german_stem", GermanStemTokenFilterFactory::new);
filters.put("hindi_normalization", HindiNormalizationFilterFactory::new);
filters.put("hyphenation_decompounder", requriesAnalysisSettings(HyphenationCompoundWordTokenFilterFactory::new));
filters.put("hyphenation_decompounder", requiresAnalysisSettings(HyphenationCompoundWordTokenFilterFactory::new));
filters.put("indic_normalization", IndicNormalizationFilterFactory::new);
filters.put("keep", requriesAnalysisSettings(KeepWordFilterFactory::new));
filters.put("keep_types", requriesAnalysisSettings(KeepTypesFilterFactory::new));
filters.put("keyword_marker", requriesAnalysisSettings(KeywordMarkerTokenFilterFactory::new));
filters.put("keep", requiresAnalysisSettings(KeepWordFilterFactory::new));
filters.put("keep_types", requiresAnalysisSettings(KeepTypesFilterFactory::new));
filters.put("keyword_marker", requiresAnalysisSettings(KeywordMarkerTokenFilterFactory::new));
filters.put("kstem", KStemTokenFilterFactory::new);
filters.put("length", LengthTokenFilterFactory::new);
filters.put("limit", LimitTokenCountFilterFactory::new);
@ -229,8 +229,8 @@ public class CommonAnalysisPlugin extends Plugin implements AnalysisPlugin {
filters.put("multiplexer", MultiplexerTokenFilterFactory::new);
filters.put("ngram", NGramTokenFilterFactory::new);
filters.put("nGram", NGramTokenFilterFactory::new);
filters.put("pattern_capture", requriesAnalysisSettings(PatternCaptureGroupTokenFilterFactory::new));
filters.put("pattern_replace", requriesAnalysisSettings(PatternReplaceTokenFilterFactory::new));
filters.put("pattern_capture", requiresAnalysisSettings(PatternCaptureGroupTokenFilterFactory::new));
filters.put("pattern_replace", requiresAnalysisSettings(PatternReplaceTokenFilterFactory::new));
filters.put("persian_normalization", PersianNormalizationFilterFactory::new);
filters.put("porter_stem", PorterStemTokenFilterFactory::new);
filters.put("remove_duplicates", RemoveDuplicatesTokenFilterFactory::new);
@ -241,10 +241,10 @@ public class CommonAnalysisPlugin extends Plugin implements AnalysisPlugin {
filters.put("serbian_normalization", SerbianNormalizationFilterFactory::new);
filters.put("snowball", SnowballTokenFilterFactory::new);
filters.put("sorani_normalization", SoraniNormalizationFilterFactory::new);
filters.put("stemmer_override", requriesAnalysisSettings(StemmerOverrideTokenFilterFactory::new));
filters.put("stemmer_override", requiresAnalysisSettings(StemmerOverrideTokenFilterFactory::new));
filters.put("stemmer", StemmerTokenFilterFactory::new);
filters.put("trim", TrimTokenFilterFactory::new);
filters.put("truncate", requriesAnalysisSettings(TruncateTokenFilterFactory::new));
filters.put("truncate", requiresAnalysisSettings(TruncateTokenFilterFactory::new));
filters.put("unique", UniqueTokenFilterFactory::new);
filters.put("uppercase", UpperCaseTokenFilterFactory::new);
filters.put("word_delimiter_graph", WordDelimiterGraphTokenFilterFactory::new);
@ -256,8 +256,8 @@ public class CommonAnalysisPlugin extends Plugin implements AnalysisPlugin {
public Map<String, AnalysisProvider<CharFilterFactory>> getCharFilters() {
Map<String, AnalysisProvider<CharFilterFactory>> filters = new TreeMap<>();
filters.put("html_strip", HtmlStripCharFilterFactory::new);
filters.put("pattern_replace", requriesAnalysisSettings(PatternReplaceCharFilterFactory::new));
filters.put("mapping", requriesAnalysisSettings(MappingCharFilterFactory::new));
filters.put("pattern_replace", requiresAnalysisSettings(PatternReplaceCharFilterFactory::new));
filters.put("mapping", requiresAnalysisSettings(MappingCharFilterFactory::new));
return filters;
}

View File

@ -69,7 +69,7 @@ public class ExpectedReciprocalRankTests extends ESTestCase {
* 4 | 1 | 0.03125 | 0.078125 | 0.00244140625 |
* }</pre>
*
* err => sum of last column
* err = sum of last column
*/
public void testERRAt() {
List<RatedDocument> rated = new ArrayList<>();
@ -94,7 +94,7 @@ public class ExpectedReciprocalRankTests extends ESTestCase {
* 4 | 1 | 0.03125 | 0.125 | 0.00390625 |
* }</pre>
*
* err => sum of last column
* err = sum of last column
*/
public void testERRMissingRatings() {
List<RatedDocument> rated = new ArrayList<>();

View File

@ -20,6 +20,7 @@
package org.elasticsearch.repositories.url;
import org.elasticsearch.cluster.metadata.RepositoryMetaData;
import org.elasticsearch.common.blobstore.BlobContainer;
import org.elasticsearch.common.blobstore.BlobPath;
import org.elasticsearch.common.blobstore.BlobStore;
import org.elasticsearch.common.blobstore.url.URLBlobStore;
@ -31,7 +32,6 @@ import org.elasticsearch.env.Environment;
import org.elasticsearch.repositories.RepositoryException;
import org.elasticsearch.repositories.blobstore.BlobStoreRepository;
import java.io.IOException;
import java.net.MalformedURLException;
import java.net.URISyntaxException;
import java.net.URL;
@ -71,33 +71,44 @@ public class URLRepository extends BlobStoreRepository {
private final Environment environment;
private final URLBlobStore blobStore;
private final BlobPath basePath;
private final URL url;
/**
* Constructs a read-only URL-based repository
*/
public URLRepository(RepositoryMetaData metadata, Environment environment,
NamedXContentRegistry namedXContentRegistry) throws IOException {
NamedXContentRegistry namedXContentRegistry) {
super(metadata, environment.settings(), namedXContentRegistry);
if (URL_SETTING.exists(metadata.settings()) == false && REPOSITORIES_URL_SETTING.exists(settings) == false) {
throw new RepositoryException(metadata.name(), "missing url");
}
this.environment = environment;
supportedProtocols = SUPPORTED_PROTOCOLS_SETTING.get(settings);
urlWhiteList = ALLOWED_URLS_SETTING.get(settings).toArray(new URIPattern[]{});
this.environment = environment;
URL url = URL_SETTING.exists(metadata.settings()) ? URL_SETTING.get(metadata.settings()) : REPOSITORIES_URL_SETTING.get(settings);
URL normalizedURL = checkURL(url);
blobStore = new URLBlobStore(settings, normalizedURL);
basePath = BlobPath.cleanPath();
url = URL_SETTING.exists(metadata.settings())
? URL_SETTING.get(metadata.settings()) : REPOSITORIES_URL_SETTING.get(settings);
}
@Override
protected BlobStore blobStore() {
return blobStore;
protected BlobStore createBlobStore() {
URL normalizedURL = checkURL(url);
return new URLBlobStore(settings, normalizedURL);
}
// only use for testing
@Override
protected BlobContainer blobContainer() {
return super.blobContainer();
}
// only use for testing
@Override
protected BlobStore getBlobStore() {
return super.getBlobStore();
}
@Override

View File

@ -31,8 +31,22 @@ import java.io.IOException;
import java.nio.file.Path;
import java.util.Collections;
import static org.hamcrest.CoreMatchers.is;
import static org.hamcrest.CoreMatchers.not;
import static org.hamcrest.CoreMatchers.nullValue;
public class URLRepositoryTests extends ESTestCase {
private URLRepository createRepository(Settings baseSettings, RepositoryMetaData repositoryMetaData) {
return new URLRepository(repositoryMetaData, TestEnvironment.newEnvironment(baseSettings),
new NamedXContentRegistry(Collections.emptyList())) {
@Override
protected void assertSnapshotOrGenericThread() {
// eliminate thread name check as we create repo manually on test/main threads
}
};
}
public void testWhiteListingRepoURL() throws IOException {
String repoPath = createTempDir().resolve("repository").toUri().toURL().toString();
Settings baseSettings = Settings.builder()
@ -41,8 +55,12 @@ public class URLRepositoryTests extends ESTestCase {
.put(URLRepository.REPOSITORIES_URL_SETTING.getKey(), repoPath)
.build();
RepositoryMetaData repositoryMetaData = new RepositoryMetaData("url", URLRepository.TYPE, baseSettings);
new URLRepository(repositoryMetaData, TestEnvironment.newEnvironment(baseSettings),
new NamedXContentRegistry(Collections.emptyList()));
final URLRepository repository = createRepository(baseSettings, repositoryMetaData);
repository.start();
assertThat("blob store has to be lazy initialized", repository.getBlobStore(), is(nullValue()));
repository.blobContainer();
assertThat("blobContainer has to initialize blob store", repository.getBlobStore(), not(nullValue()));
}
public void testIfNotWhiteListedMustSetRepoURL() throws IOException {
@ -52,9 +70,10 @@ public class URLRepositoryTests extends ESTestCase {
.put(URLRepository.REPOSITORIES_URL_SETTING.getKey(), repoPath)
.build();
RepositoryMetaData repositoryMetaData = new RepositoryMetaData("url", URLRepository.TYPE, baseSettings);
final URLRepository repository = createRepository(baseSettings, repositoryMetaData);
repository.start();
try {
new URLRepository(repositoryMetaData, TestEnvironment.newEnvironment(baseSettings),
new NamedXContentRegistry(Collections.emptyList()));
repository.blobContainer();
fail("RepositoryException should have been thrown.");
} catch (RepositoryException e) {
String msg = "[url] file url [" + repoPath
@ -73,13 +92,33 @@ public class URLRepositoryTests extends ESTestCase {
.put(URLRepository.SUPPORTED_PROTOCOLS_SETTING.getKey(), "http,https")
.build();
RepositoryMetaData repositoryMetaData = new RepositoryMetaData("url", URLRepository.TYPE, baseSettings);
final URLRepository repository = createRepository(baseSettings, repositoryMetaData);
repository.start();
try {
new URLRepository(repositoryMetaData, TestEnvironment.newEnvironment(baseSettings),
new NamedXContentRegistry(Collections.emptyList()));
repository.blobContainer();
fail("RepositoryException should have been thrown.");
} catch (RepositoryException e) {
assertEquals("[url] unsupported url protocol [file] from URL [" + repoPath +"]", e.getMessage());
}
}
public void testNonNormalizedUrl() throws IOException {
Settings baseSettings = Settings.builder()
.put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString())
.put(URLRepository.ALLOWED_URLS_SETTING.getKey(), "file:/tmp/")
.put(URLRepository.REPOSITORIES_URL_SETTING.getKey(), "file:/var/" )
.build();
RepositoryMetaData repositoryMetaData = new RepositoryMetaData("url", URLRepository.TYPE, baseSettings);
final URLRepository repository = createRepository(baseSettings, repositoryMetaData);
repository.start();
try {
repository.blobContainer();
fail("RepositoryException should have been thrown.");
} catch (RepositoryException e) {
assertEquals("[url] file url [file:/var/] doesn't match any of the locations "
+ "specified by path.repo or repositories.url.allowed_urls",
e.getMessage());
}
}
}

View File

@ -38,7 +38,6 @@ import org.elasticsearch.repositories.blobstore.BlobStoreRepository;
import org.elasticsearch.snapshots.SnapshotCreationException;
import org.elasticsearch.snapshots.SnapshotId;
import java.io.IOException;
import java.net.URISyntaxException;
import java.util.List;
import java.util.Locale;
@ -78,25 +77,21 @@ public class AzureRepository extends BlobStoreRepository {
public static final Setting<Boolean> READONLY_SETTING = Setting.boolSetting("readonly", false, Property.NodeScope);
}
private final AzureBlobStore blobStore;
private final BlobPath basePath;
private final ByteSizeValue chunkSize;
private final boolean compress;
private final Environment environment;
private final AzureStorageService storageService;
private final boolean readonly;
public AzureRepository(RepositoryMetaData metadata, Environment environment, NamedXContentRegistry namedXContentRegistry,
AzureStorageService storageService) throws IOException, URISyntaxException, StorageException {
AzureStorageService storageService) {
super(metadata, environment.settings(), namedXContentRegistry);
this.blobStore = new AzureBlobStore(metadata, environment.settings(), storageService);
this.chunkSize = Repository.CHUNK_SIZE_SETTING.get(metadata.settings());
this.compress = Repository.COMPRESS_SETTING.get(metadata.settings());
// If the user explicitly did not define a readonly value, we set it by ourselves depending on the location mode setting.
// For secondary_only setting, the repository should be read only
if (Repository.READONLY_SETTING.exists(metadata.settings())) {
this.readonly = Repository.READONLY_SETTING.get(metadata.settings());
} else {
this.readonly = this.blobStore.getLocationMode() == LocationMode.SECONDARY_ONLY;
}
this.environment = environment;
this.storageService = storageService;
final String basePath = Strings.trimLeadingCharacter(Repository.BASE_PATH_SETTING.get(metadata.settings()), '/');
if (Strings.hasLength(basePath)) {
// Remove starting / if any
@ -108,15 +103,33 @@ public class AzureRepository extends BlobStoreRepository {
} else {
this.basePath = BlobPath.cleanPath();
}
logger.debug((org.apache.logging.log4j.util.Supplier<?>) () -> new ParameterizedMessage(
"using container [{}], chunk_size [{}], compress [{}], base_path [{}]", blobStore, chunkSize, compress, basePath));
// If the user explicitly did not define a readonly value, we set it by ourselves depending on the location mode setting.
// For secondary_only setting, the repository should be read only
final LocationMode locationMode = Repository.LOCATION_MODE_SETTING.get(metadata.settings());
if (Repository.READONLY_SETTING.exists(metadata.settings())) {
this.readonly = Repository.READONLY_SETTING.get(metadata.settings());
} else {
this.readonly = locationMode == LocationMode.SECONDARY_ONLY;
}
}
// only use for testing
@Override
protected BlobStore getBlobStore() {
return super.getBlobStore();
}
/**
* {@inheritDoc}
*/
@Override
protected BlobStore blobStore() {
protected AzureBlobStore createBlobStore() throws URISyntaxException, StorageException {
final AzureBlobStore blobStore = new AzureBlobStore(metadata, environment.settings(), storageService);
logger.debug((org.apache.logging.log4j.util.Supplier<?>) () -> new ParameterizedMessage(
"using container [{}], chunk_size [{}], compress [{}], base_path [{}]",
blobStore, chunkSize, compress, basePath));
return blobStore;
}
@ -144,6 +157,7 @@ public class AzureRepository extends BlobStoreRepository {
@Override
public void initializeSnapshot(SnapshotId snapshotId, List<IndexId> indices, MetaData clusterMetadata) {
try {
final AzureBlobStore blobStore = (AzureBlobStore) blobStore();
if (blobStore.containerExist() == false) {
throw new IllegalArgumentException("The bucket [" + blobStore + "] does not exist. Please create it before "
+ " creating an azure snapshot repository backed by it.");

View File

@ -20,7 +20,6 @@
package org.elasticsearch.repositories.azure;
import com.microsoft.azure.storage.LocationMode;
import com.microsoft.azure.storage.StorageException;
import org.elasticsearch.cluster.metadata.RepositoryMetaData;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.ByteSizeUnit;
@ -30,76 +29,76 @@ import org.elasticsearch.env.Environment;
import org.elasticsearch.env.TestEnvironment;
import org.elasticsearch.test.ESTestCase;
import java.io.IOException;
import java.net.URISyntaxException;
import static org.hamcrest.Matchers.is;
import static org.hamcrest.Matchers.nullValue;
import static org.mockito.Mockito.mock;
public class AzureRepositorySettingsTests extends ESTestCase {
private AzureRepository azureRepository(Settings settings) throws StorageException, IOException, URISyntaxException {
private AzureRepository azureRepository(Settings settings) {
Settings internalSettings = Settings.builder()
.put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toAbsolutePath())
.putList(Environment.PATH_DATA_SETTING.getKey(), tmpPaths())
.put(settings)
.build();
return new AzureRepository(new RepositoryMetaData("foo", "azure", internalSettings),
final AzureRepository azureRepository = new AzureRepository(new RepositoryMetaData("foo", "azure", internalSettings),
TestEnvironment.newEnvironment(internalSettings), NamedXContentRegistry.EMPTY, mock(AzureStorageService.class));
assertThat(azureRepository.getBlobStore(), is(nullValue()));
return azureRepository;
}
public void testReadonlyDefault() throws StorageException, IOException, URISyntaxException {
public void testReadonlyDefault() {
assertThat(azureRepository(Settings.EMPTY).isReadOnly(), is(false));
}
public void testReadonlyDefaultAndReadonlyOn() throws StorageException, IOException, URISyntaxException {
public void testReadonlyDefaultAndReadonlyOn() {
assertThat(azureRepository(Settings.builder()
.put("readonly", true)
.build()).isReadOnly(), is(true));
}
public void testReadonlyWithPrimaryOnly() throws StorageException, IOException, URISyntaxException {
public void testReadonlyWithPrimaryOnly() {
assertThat(azureRepository(Settings.builder()
.put(AzureRepository.Repository.LOCATION_MODE_SETTING.getKey(), LocationMode.PRIMARY_ONLY.name())
.build()).isReadOnly(), is(false));
}
public void testReadonlyWithPrimaryOnlyAndReadonlyOn() throws StorageException, IOException, URISyntaxException {
public void testReadonlyWithPrimaryOnlyAndReadonlyOn() {
assertThat(azureRepository(Settings.builder()
.put(AzureRepository.Repository.LOCATION_MODE_SETTING.getKey(), LocationMode.PRIMARY_ONLY.name())
.put("readonly", true)
.build()).isReadOnly(), is(true));
}
public void testReadonlyWithSecondaryOnlyAndReadonlyOn() throws StorageException, IOException, URISyntaxException {
public void testReadonlyWithSecondaryOnlyAndReadonlyOn() {
assertThat(azureRepository(Settings.builder()
.put(AzureRepository.Repository.LOCATION_MODE_SETTING.getKey(), LocationMode.SECONDARY_ONLY.name())
.put("readonly", true)
.build()).isReadOnly(), is(true));
}
public void testReadonlyWithSecondaryOnlyAndReadonlyOff() throws StorageException, IOException, URISyntaxException {
public void testReadonlyWithSecondaryOnlyAndReadonlyOff() {
assertThat(azureRepository(Settings.builder()
.put(AzureRepository.Repository.LOCATION_MODE_SETTING.getKey(), LocationMode.SECONDARY_ONLY.name())
.put("readonly", false)
.build()).isReadOnly(), is(false));
}
public void testReadonlyWithPrimaryAndSecondaryOnlyAndReadonlyOn() throws StorageException, IOException, URISyntaxException {
public void testReadonlyWithPrimaryAndSecondaryOnlyAndReadonlyOn() {
assertThat(azureRepository(Settings.builder()
.put(AzureRepository.Repository.LOCATION_MODE_SETTING.getKey(), LocationMode.PRIMARY_THEN_SECONDARY.name())
.put("readonly", true)
.build()).isReadOnly(), is(true));
}
public void testReadonlyWithPrimaryAndSecondaryOnlyAndReadonlyOff() throws StorageException, IOException, URISyntaxException {
public void testReadonlyWithPrimaryAndSecondaryOnlyAndReadonlyOff() {
assertThat(azureRepository(Settings.builder()
.put(AzureRepository.Repository.LOCATION_MODE_SETTING.getKey(), LocationMode.PRIMARY_THEN_SECONDARY.name())
.put("readonly", false)
.build()).isReadOnly(), is(false));
}
public void testChunkSize() throws StorageException, IOException, URISyntaxException {
public void testChunkSize() {
// default chunk size
AzureRepository azureRepository = azureRepository(Settings.EMPTY);
assertEquals(AzureStorageService.MAX_CHUNK_SIZE, azureRepository.chunkSize());

View File

@ -22,7 +22,6 @@ package org.elasticsearch.repositories.gcs;
import org.elasticsearch.cluster.metadata.RepositoryMetaData;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.blobstore.BlobPath;
import org.elasticsearch.common.blobstore.BlobStore;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.unit.ByteSizeUnit;
import org.elasticsearch.common.unit.ByteSizeValue;
@ -56,18 +55,19 @@ class GoogleCloudStorageRepository extends BlobStoreRepository {
byteSizeSetting("chunk_size", MAX_CHUNK_SIZE, MIN_CHUNK_SIZE, MAX_CHUNK_SIZE, Property.NodeScope, Property.Dynamic);
static final Setting<String> CLIENT_NAME = new Setting<>("client", "default", Function.identity());
private final ByteSizeValue chunkSize;
private final boolean compress;
private final GoogleCloudStorageService storageService;
private final BlobPath basePath;
private final GoogleCloudStorageBlobStore blobStore;
private final boolean compress;
private final ByteSizeValue chunkSize;
private final String bucket;
private final String clientName;
GoogleCloudStorageRepository(RepositoryMetaData metadata, Environment environment,
NamedXContentRegistry namedXContentRegistry,
GoogleCloudStorageService storageService) throws Exception {
GoogleCloudStorageService storageService) {
super(metadata, environment.settings(), namedXContentRegistry);
this.storageService = storageService;
String bucket = getSetting(BUCKET, metadata);
String clientName = CLIENT_NAME.get(metadata.settings());
String basePath = BASE_PATH.get(metadata.settings());
if (Strings.hasLength(basePath)) {
BlobPath path = new BlobPath();
@ -81,16 +81,14 @@ class GoogleCloudStorageRepository extends BlobStoreRepository {
this.compress = getSetting(COMPRESS, metadata);
this.chunkSize = getSetting(CHUNK_SIZE, metadata);
this.bucket = getSetting(BUCKET, metadata);
this.clientName = CLIENT_NAME.get(metadata.settings());
logger.debug("using bucket [{}], base_path [{}], chunk_size [{}], compress [{}]", bucket, basePath, chunkSize, compress);
this.blobStore = new GoogleCloudStorageBlobStore(settings, bucket, clientName, storageService);
}
@Override
protected BlobStore blobStore() {
return blobStore;
protected GoogleCloudStorageBlobStore createBlobStore() {
return new GoogleCloudStorageBlobStore(settings, bucket, clientName, storageService);
}
@Override

View File

@ -25,6 +25,7 @@ import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.ByteSizeUnit;
import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.plugins.Plugin;
import org.elasticsearch.repositories.Repository;
import org.elasticsearch.repositories.blobstore.ESBlobStoreRepositoryIntegTestCase;
import org.junit.AfterClass;
@ -34,6 +35,7 @@ import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
import static org.hamcrest.Matchers.instanceOf;
public class GoogleCloudStorageBlobStoreRepositoryTests extends ESBlobStoreRepositoryIntegTestCase {
@ -49,9 +51,10 @@ public class GoogleCloudStorageBlobStoreRepositoryTests extends ESBlobStoreRepos
}
@Override
protected void createTestRepository(String name) {
protected void createTestRepository(String name, boolean verify) {
assertAcked(client().admin().cluster().preparePutRepository(name)
.setType(GoogleCloudStorageRepository.TYPE)
.setVerify(verify)
.setSettings(Settings.builder()
.put("bucket", BUCKET)
.put("base_path", GoogleCloudStorageBlobStoreRepositoryTests.class.getSimpleName())
@ -59,6 +62,11 @@ public class GoogleCloudStorageBlobStoreRepositoryTests extends ESBlobStoreRepos
.put("chunk_size", randomIntBetween(100, 1000), ByteSizeUnit.BYTES)));
}
@Override
protected void afterCreationCheck(Repository repository) {
assertThat(repository, instanceOf(GoogleCloudStorageRepository.class));
}
@AfterClass
public static void wipeRepository() {
blobs.clear();

View File

@ -42,7 +42,6 @@ import org.elasticsearch.cluster.metadata.RepositoryMetaData;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.SuppressForbidden;
import org.elasticsearch.common.blobstore.BlobPath;
import org.elasticsearch.common.blobstore.BlobStore;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.ByteSizeUnit;
@ -61,29 +60,26 @@ public final class HdfsRepository extends BlobStoreRepository {
private final ByteSizeValue chunkSize;
private final boolean compress;
private final BlobPath basePath = BlobPath.cleanPath();
private HdfsBlobStore blobStore;
private final URI uri;
private final String pathSetting;
// buffer size passed to HDFS read/write methods
// TODO: why 100KB?
private static final ByteSizeValue DEFAULT_BUFFER_SIZE = new ByteSizeValue(100, ByteSizeUnit.KB);
public HdfsRepository(RepositoryMetaData metadata, Environment environment,
NamedXContentRegistry namedXContentRegistry) throws IOException {
NamedXContentRegistry namedXContentRegistry) {
super(metadata, environment.settings(), namedXContentRegistry);
this.environment = environment;
this.chunkSize = metadata.settings().getAsBytesSize("chunk_size", null);
this.compress = metadata.settings().getAsBoolean("compress", false);
}
@Override
protected void doStart() {
String uriSetting = getMetadata().settings().get("uri");
if (Strings.hasText(uriSetting) == false) {
throw new IllegalArgumentException("No 'uri' defined for hdfs snapshot/restore");
}
URI uri = URI.create(uriSetting);
uri = URI.create(uriSetting);
if ("hdfs".equalsIgnoreCase(uri.getScheme()) == false) {
throw new IllegalArgumentException(String.format(Locale.ROOT,
"Invalid scheme [%s] specified in uri [%s]; only 'hdfs' uri allowed for hdfs snapshot/restore", uri.getScheme(), uriSetting));
@ -93,16 +89,11 @@ public final class HdfsRepository extends BlobStoreRepository {
"Use 'path' option to specify a path [%s], not the uri [%s] for hdfs snapshot/restore", uri.getPath(), uriSetting));
}
String pathSetting = getMetadata().settings().get("path");
pathSetting = getMetadata().settings().get("path");
// get configuration
if (pathSetting == null) {
throw new IllegalArgumentException("No 'path' defined for hdfs snapshot/restore");
}
// initialize our blobstore using elevated privileges.
SpecialPermission.check();
blobStore = AccessController.doPrivileged((PrivilegedAction<HdfsBlobStore>) () -> createBlobstore(uri, pathSetting, getMetadata().settings()));
super.doStart();
}
private HdfsBlobStore createBlobstore(URI uri, String path, Settings repositorySettings) {
@ -229,7 +220,12 @@ public final class HdfsRepository extends BlobStoreRepository {
}
@Override
protected BlobStore blobStore() {
protected HdfsBlobStore createBlobStore() {
// initialize our blobstore using elevated privileges.
SpecialPermission.check();
final HdfsBlobStore blobStore =
AccessController.doPrivileged((PrivilegedAction<HdfsBlobStore>)
() -> createBlobstore(uri, pathSetting, getMetadata().settings()));
return blobStore;
}

View File

@ -35,7 +35,6 @@ import org.elasticsearch.monitor.jvm.JvmInfo;
import org.elasticsearch.repositories.RepositoryException;
import org.elasticsearch.repositories.blobstore.BlobStoreRepository;
import java.io.IOException;
import java.util.Map;
import java.util.function.Function;
@ -144,30 +143,43 @@ class S3Repository extends BlobStoreRepository {
*/
static final Setting<String> BASE_PATH_SETTING = Setting.simpleString("base_path");
private final S3BlobStore blobStore;
private final S3Service service;
private final BlobPath basePath;
private final String bucket;
private final ByteSizeValue bufferSize;
private final ByteSizeValue chunkSize;
private final boolean compress;
private final BlobPath basePath;
private final boolean serverSideEncryption;
private final String storageClass;
private final String cannedACL;
private final String clientName;
/**
* Constructs an s3 backed repository
*/
S3Repository(final RepositoryMetaData metadata,
final Settings settings,
final NamedXContentRegistry namedXContentRegistry,
final S3Service service) throws IOException {
final S3Service service) {
super(metadata, settings, namedXContentRegistry);
this.service = service;
final String bucket = BUCKET_SETTING.get(metadata.settings());
// Parse and validate the user's S3 Storage Class setting
this.bucket = BUCKET_SETTING.get(metadata.settings());
if (bucket == null) {
throw new RepositoryException(metadata.name(), "No bucket defined for s3 repository");
}
final boolean serverSideEncryption = SERVER_SIDE_ENCRYPTION_SETTING.get(metadata.settings());
final ByteSizeValue bufferSize = BUFFER_SIZE_SETTING.get(metadata.settings());
this.bufferSize = BUFFER_SIZE_SETTING.get(metadata.settings());
this.chunkSize = CHUNK_SIZE_SETTING.get(metadata.settings());
this.compress = COMPRESS_SETTING.get(metadata.settings());
@ -177,33 +189,44 @@ class S3Repository extends BlobStoreRepository {
") can't be lower than " + BUFFER_SIZE_SETTING.getKey() + " (" + bufferSize + ").");
}
// Parse and validate the user's S3 Storage Class setting
final String storageClass = STORAGE_CLASS_SETTING.get(metadata.settings());
final String cannedACL = CANNED_ACL_SETTING.get(metadata.settings());
final String clientName = CLIENT_NAME.get(metadata.settings());
logger.debug("using bucket [{}], chunk_size [{}], server_side_encryption [{}], " +
"buffer_size [{}], cannedACL [{}], storageClass [{}]",
bucket, chunkSize, serverSideEncryption, bufferSize, cannedACL, storageClass);
// deprecated behavior: override client credentials from the cluster state
// (repository settings)
if (S3ClientSettings.checkDeprecatedCredentials(metadata.settings())) {
overrideCredentialsFromClusterState(service);
}
blobStore = new S3BlobStore(settings, service, clientName, bucket, serverSideEncryption, bufferSize, cannedACL, storageClass);
final String basePath = BASE_PATH_SETTING.get(metadata.settings());
if (Strings.hasLength(basePath)) {
this.basePath = new BlobPath().add(basePath);
} else {
this.basePath = BlobPath.cleanPath();
}
this.serverSideEncryption = SERVER_SIDE_ENCRYPTION_SETTING.get(metadata.settings());
this.storageClass = STORAGE_CLASS_SETTING.get(metadata.settings());
this.cannedACL = CANNED_ACL_SETTING.get(metadata.settings());
this.clientName = CLIENT_NAME.get(metadata.settings());
logger.debug("using bucket [{}], chunk_size [{}], server_side_encryption [{}], " +
"buffer_size [{}], cannedACL [{}], storageClass [{}]",
bucket, chunkSize, serverSideEncryption, bufferSize, cannedACL, storageClass);
// (repository settings)
if (S3ClientSettings.checkDeprecatedCredentials(metadata.settings())) {
overrideCredentialsFromClusterState(service);
}
}
@Override
protected S3BlobStore createBlobStore() {
return new S3BlobStore(settings, service, clientName, bucket, serverSideEncryption, bufferSize, cannedACL, storageClass);
}
// only use for testing
@Override
protected BlobStore blobStore() {
return blobStore;
return super.blobStore();
}
// only use for testing
@Override
protected BlobStore getBlobStore() {
return super.getBlobStore();
}
@Override

View File

@ -61,7 +61,7 @@ public class S3RepositoryPlugin extends Plugin implements RepositoryPlugin, Relo
});
}
private final S3Service service;
protected final S3Service service;
public S3RepositoryPlugin(final Settings settings) {
this(settings, new S3Service(settings));
@ -77,7 +77,7 @@ public class S3RepositoryPlugin extends Plugin implements RepositoryPlugin, Relo
// proxy method for testing
protected S3Repository createRepository(final RepositoryMetaData metadata,
final Settings settings,
final NamedXContentRegistry registry) throws IOException {
final NamedXContentRegistry registry) {
return new S3Repository(metadata, settings, registry, service);
}

View File

@ -80,6 +80,16 @@ public class RepositoryCredentialsTests extends ESTestCase {
ProxyS3RepositoryPlugin(Settings settings) {
super(settings, new ProxyS3Service(settings));
}
@Override
protected S3Repository createRepository(RepositoryMetaData metadata, Settings settings, NamedXContentRegistry registry) {
return new S3Repository(metadata, settings, registry, service){
@Override
protected void assertSnapshotOrGenericThread() {
// eliminate thread name check as we create repo manually on test/main threads
}
};
}
}
public void testRepositoryCredentialsOverrideSecureCredentials() throws IOException {
@ -102,8 +112,8 @@ public class RepositoryCredentialsTests extends ESTestCase {
.put(S3Repository.ACCESS_KEY_SETTING.getKey(), "insecure_aws_key")
.put(S3Repository.SECRET_KEY_SETTING.getKey(), "insecure_aws_secret").build());
try (S3RepositoryPlugin s3Plugin = new ProxyS3RepositoryPlugin(settings);
S3Repository s3repo = s3Plugin.createRepository(metadata, Settings.EMPTY, NamedXContentRegistry.EMPTY);
AmazonS3Reference s3Ref = ((S3BlobStore) s3repo.blobStore()).clientReference()) {
S3Repository s3repo = createAndStartRepository(metadata, s3Plugin);
AmazonS3Reference s3Ref = ((S3BlobStore) s3repo.blobStore()).clientReference()) {
final AWSCredentials credentials = ((ProxyS3RepositoryPlugin.ClientAndCredentials) s3Ref.client()).credentials.getCredentials();
assertThat(credentials.getAWSAccessKeyId(), is("insecure_aws_key"));
assertThat(credentials.getAWSSecretKey(), is("insecure_aws_secret"));
@ -125,8 +135,8 @@ public class RepositoryCredentialsTests extends ESTestCase {
.put(S3Repository.SECRET_KEY_SETTING.getKey(), "insecure_aws_secret")
.build());
try (S3RepositoryPlugin s3Plugin = new ProxyS3RepositoryPlugin(Settings.EMPTY);
S3Repository s3repo = s3Plugin.createRepository(metadata, Settings.EMPTY, NamedXContentRegistry.EMPTY);
AmazonS3Reference s3Ref = ((S3BlobStore) s3repo.blobStore()).clientReference()) {
S3Repository s3repo = createAndStartRepository(metadata, s3Plugin);
AmazonS3Reference s3Ref = ((S3BlobStore) s3repo.blobStore()).clientReference()) {
final AWSCredentials credentials = ((ProxyS3RepositoryPlugin.ClientAndCredentials) s3Ref.client()).credentials.getCredentials();
assertThat(credentials.getAWSAccessKeyId(), is("insecure_aws_key"));
assertThat(credentials.getAWSSecretKey(), is("insecure_aws_secret"));
@ -140,6 +150,12 @@ public class RepositoryCredentialsTests extends ESTestCase {
+ " See the breaking changes documentation for the next major version.");
}
private S3Repository createAndStartRepository(RepositoryMetaData metadata, S3RepositoryPlugin s3Plugin) {
final S3Repository repository = s3Plugin.createRepository(metadata, Settings.EMPTY, NamedXContentRegistry.EMPTY);
repository.start();
return repository;
}
public void testReinitSecureCredentials() throws IOException {
final String clientName = randomFrom("default", "some_client");
// initial client node settings
@ -156,7 +172,7 @@ public class RepositoryCredentialsTests extends ESTestCase {
}
final RepositoryMetaData metadata = new RepositoryMetaData("dummy-repo", "mock", builder.build());
try (S3RepositoryPlugin s3Plugin = new ProxyS3RepositoryPlugin(settings);
S3Repository s3repo = s3Plugin.createRepository(metadata, Settings.EMPTY, NamedXContentRegistry.EMPTY)) {
S3Repository s3repo = createAndStartRepository(metadata, s3Plugin)) {
try (AmazonS3Reference s3Ref = ((S3BlobStore) s3repo.blobStore()).clientReference()) {
final AWSCredentials credentials = ((ProxyS3RepositoryPlugin.ClientAndCredentials) s3Ref.client()).credentials
.getCredentials();

View File

@ -51,6 +51,7 @@ import java.util.concurrent.atomic.AtomicReference;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
import static org.hamcrest.Matchers.containsString;
import static org.hamcrest.Matchers.instanceOf;
import static org.hamcrest.Matchers.not;
import static org.mockito.Mockito.mock;
@ -84,8 +85,11 @@ public class S3BlobStoreRepositoryTests extends ESBlobStoreRepositoryIntegTestCa
}
@Override
protected void createTestRepository(final String name) {
assertAcked(client().admin().cluster().preparePutRepository(name).setType(S3Repository.TYPE).setSettings(Settings.builder()
protected void createTestRepository(final String name, boolean verify) {
assertAcked(client().admin().cluster().preparePutRepository(name)
.setType(S3Repository.TYPE)
.setVerify(verify)
.setSettings(Settings.builder()
.put(S3Repository.BUCKET_SETTING.getKey(), bucket)
.put(S3Repository.CLIENT_NAME.getKey(), client)
.put(S3Repository.BUFFER_SIZE_SETTING.getKey(), bufferSize)
@ -96,6 +100,11 @@ public class S3BlobStoreRepositoryTests extends ESBlobStoreRepositoryIntegTestCa
.put(S3Repository.SECRET_KEY_SETTING.getKey(), "not_used_but_this_is_a_secret")));
}
@Override
protected void afterCreationCheck(Repository repository) {
assertThat(repository, instanceOf(S3Repository.class));
}
@Override
protected Collection<Class<? extends Plugin>> nodePlugins() {
return Collections.singletonList(TestS3RepositoryPlugin.class);
@ -125,7 +134,7 @@ public class S3BlobStoreRepositoryTests extends ESBlobStoreRepositoryIntegTestCa
public void testInsecureRepositoryCredentials() throws Exception {
final String repositoryName = "testInsecureRepositoryCredentials";
createTestRepository(repositoryName);
createAndCheckTestRepository(repositoryName);
final NodeClient nodeClient = internalCluster().getInstance(NodeClient.class);
final RestGetRepositoriesAction getRepoAction = new RestGetRepositoriesAction(Settings.EMPTY, mock(RestController.class),
internalCluster().getInstance(SettingsFilter.class));

View File

@ -29,11 +29,13 @@ import org.elasticsearch.repositories.RepositoryException;
import org.elasticsearch.test.ESTestCase;
import org.hamcrest.Matchers;
import java.io.IOException;
import java.util.Collections;
import java.util.Map;
import static org.hamcrest.Matchers.containsString;
import static org.hamcrest.Matchers.is;
import static org.hamcrest.Matchers.not;
import static org.hamcrest.Matchers.nullValue;
public class S3RepositoryTests extends ESTestCase {
@ -70,27 +72,27 @@ public class S3RepositoryTests extends ESTestCase {
}
}
public void testInvalidChunkBufferSizeSettings() throws IOException {
public void testInvalidChunkBufferSizeSettings() {
// chunk < buffer should fail
final Settings s1 = bufferAndChunkSettings(10, 5);
final Exception e1 = expectThrows(RepositoryException.class,
() -> new S3Repository(getRepositoryMetaData(s1), Settings.EMPTY, NamedXContentRegistry.EMPTY, new DummyS3Service()));
() -> createS3Repo(getRepositoryMetaData(s1)));
assertThat(e1.getMessage(), containsString("chunk_size (5mb) can't be lower than buffer_size (10mb)"));
// chunk > buffer should pass
final Settings s2 = bufferAndChunkSettings(5, 10);
new S3Repository(getRepositoryMetaData(s2), Settings.EMPTY, NamedXContentRegistry.EMPTY, new DummyS3Service()).close();
createS3Repo(getRepositoryMetaData(s2)).close();
// chunk = buffer should pass
final Settings s3 = bufferAndChunkSettings(5, 5);
new S3Repository(getRepositoryMetaData(s3), Settings.EMPTY, NamedXContentRegistry.EMPTY, new DummyS3Service()).close();
createS3Repo(getRepositoryMetaData(s3)).close();
// buffer < 5mb should fail
final Settings s4 = bufferAndChunkSettings(4, 10);
final IllegalArgumentException e2 = expectThrows(IllegalArgumentException.class,
() -> new S3Repository(getRepositoryMetaData(s4), Settings.EMPTY, NamedXContentRegistry.EMPTY, new DummyS3Service())
() -> createS3Repo(getRepositoryMetaData(s4))
.close());
assertThat(e2.getMessage(), containsString("failed to parse value [4mb] for setting [buffer_size], must be >= [5mb]"));
final Settings s5 = bufferAndChunkSettings(5, 6000000);
final IllegalArgumentException e3 = expectThrows(IllegalArgumentException.class,
() -> new S3Repository(getRepositoryMetaData(s5), Settings.EMPTY, NamedXContentRegistry.EMPTY, new DummyS3Service())
() -> createS3Repo(getRepositoryMetaData(s5))
.close());
assertThat(e3.getMessage(), containsString("failed to parse value [6000000mb] for setting [chunk_size], must be <= [5tb]"));
}
@ -106,20 +108,32 @@ public class S3RepositoryTests extends ESTestCase {
return new RepositoryMetaData("dummy-repo", "mock", Settings.builder().put(settings).build());
}
public void testBasePathSetting() throws IOException {
public void testBasePathSetting() {
final RepositoryMetaData metadata = new RepositoryMetaData("dummy-repo", "mock", Settings.builder()
.put(S3Repository.BASE_PATH_SETTING.getKey(), "foo/bar").build());
try (S3Repository s3repo = new S3Repository(metadata, Settings.EMPTY, NamedXContentRegistry.EMPTY, new DummyS3Service())) {
try (S3Repository s3repo = createS3Repo(metadata)) {
assertEquals("foo/bar/", s3repo.basePath().buildAsString());
}
}
public void testDefaultBufferSize() throws IOException {
public void testDefaultBufferSize() {
final RepositoryMetaData metadata = new RepositoryMetaData("dummy-repo", "mock", Settings.EMPTY);
try (S3Repository s3repo = new S3Repository(metadata, Settings.EMPTY, NamedXContentRegistry.EMPTY, new DummyS3Service())) {
final long defaultBufferSize = ((S3BlobStore) s3repo.blobStore()).bufferSizeInBytes();
try (S3Repository s3repo = createS3Repo(metadata)) {
assertThat(s3repo.getBlobStore(), is(nullValue()));
s3repo.start();
final long defaultBufferSize = ((S3BlobStore)s3repo.blobStore()).bufferSizeInBytes();
assertThat(s3repo.getBlobStore(), not(nullValue()));
assertThat(defaultBufferSize, Matchers.lessThanOrEqualTo(100L * 1024 * 1024));
assertThat(defaultBufferSize, Matchers.greaterThanOrEqualTo(5L * 1024 * 1024));
}
}
private S3Repository createS3Repo(RepositoryMetaData metadata) {
return new S3Repository(metadata, Settings.EMPTY, NamedXContentRegistry.EMPTY, new DummyS3Service()) {
@Override
protected void assertSnapshotOrGenericThread() {
// eliminate thread name check as we create repo manually on test/main threads
}
};
}
}

View File

@ -0,0 +1,141 @@
---
"two levels fields collapsing":
- skip:
version: " - 6.99.99"
reason: using multiple field collapsing from 7.0 on
- do:
indices.create:
index: addresses
body:
settings:
number_of_shards: 1
number_of_replicas: 1
mappings:
_doc:
properties:
country: {"type": "keyword"}
city: {"type": "keyword"}
address: {"type": "text"}
- do:
bulk:
refresh: true
body:
- '{ "index" : { "_index" : "addresses", "_type" : "_doc", "_id" : "1" } }'
- '{"country" : "Canada", "city" : "Saskatoon", "address" : "701 Victoria Avenue" }'
- '{ "index" : { "_index" : "addresses", "_type" : "_doc", "_id" : "2" } }'
- '{"country" : "Canada", "city" : "Toronto", "address" : "74 Victoria Street, Suite, 74 Victoria Street, Suite 300" }'
- '{ "index" : { "_index" : "addresses", "_type" : "_doc", "_id" : "3" } }'
- '{"country" : "Canada", "city" : "Toronto", "address" : "350 Victoria St" }'
- '{ "index" : { "_index" : "addresses", "_type" : "_doc", "_id" : "4" } }'
- '{"country" : "Canada", "city" : "Toronto", "address" : "20 Victoria Street" }'
- '{ "index" : { "_index" : "addresses", "_type" : "_doc", "_id" : "5" } }'
- '{"country" : "UK", "city" : "London", "address" : "58 Victoria Street" }'
- '{ "index" : { "_index" : "addresses", "_type" : "_doc", "_id" : "6" } }'
- '{"country" : "UK", "city" : "London", "address" : "Victoria Street Victoria Palace Theatre" }'
- '{ "index" : { "_index" : "addresses", "_type" : "_doc", "_id" : "7" } }'
- '{"country" : "UK", "city" : "Manchester", "address" : "75 Victoria street Westminster" }'
- '{ "index" : { "_index" : "addresses", "_type" : "_doc", "_id" : "8" } }'
- '{"country" : "UK", "city" : "London", "address" : "Victoria Station Victoria Arcade" }'
# ************* error if internal collapse contains inner_hits
- do:
catch: /parse_exception/
search:
index: addresses
body:
query: { "match" : { "address" : "victoria" }}
collapse:
field: country
inner_hits:
collapse:
field : city
inner_hits: {}
# ************* error if internal collapse contains another collapse
- do:
catch: /parse_exception/
search:
index: addresses
body:
query: { "match" : { "address" : "victoria" }}
collapse:
field: country
inner_hits:
collapse:
field : city
collapse: { field: city }
# ************* top scored
- do:
search:
index: addresses
body:
query: { "match" : { "address" : "victoria" }}
collapse:
field: country
inner_hits:
name: by_location
size: 3
collapse:
field : city
- match: { hits.total: 8 }
- length: { hits.hits: 2 }
- match: { hits.hits.0.fields.country: ["UK"] }
- match: { hits.hits.0.inner_hits.by_location.hits.total: 4 }
# 2 inner hits returned instead of requested 3 as they are collapsed by city
- length: { hits.hits.0.inner_hits.by_location.hits.hits : 2}
- match: { hits.hits.0.inner_hits.by_location.hits.hits.0._id: "8" }
- match: { hits.hits.0.inner_hits.by_location.hits.hits.0.fields.city: ["London"] }
- match: { hits.hits.0.inner_hits.by_location.hits.hits.1._id: "7" }
- match: { hits.hits.0.inner_hits.by_location.hits.hits.1.fields.city: ["Manchester"] }
- match: { hits.hits.1.fields.country: ["Canada"] }
- match: { hits.hits.1.inner_hits.by_location.hits.total: 4 }
# 2 inner hits returned instead of requested 3 as they are collapsed by city
- length: { hits.hits.1.inner_hits.by_location.hits.hits : 2 }
- match: { hits.hits.1.inner_hits.by_location.hits.hits.0._id: "1" }
- match: { hits.hits.1.inner_hits.by_location.hits.hits.0.fields.city: ["Saskatoon"] }
- match: { hits.hits.1.inner_hits.by_location.hits.hits.1._id: "3" }
- match: { hits.hits.1.inner_hits.by_location.hits.hits.1.fields.city: ["Toronto"] }
# ************* sorted
- do:
search:
index: addresses
body:
query: { "match" : { "address" : "victoria" }}
collapse:
field: country
inner_hits:
name: by_location
size: 3
sort: [{ "city": "desc" }]
collapse:
field : city
- match: { hits.total: 8 }
- length: { hits.hits: 2 }
- match: { hits.hits.0.fields.country: ["UK"] }
- match: { hits.hits.0.inner_hits.by_location.hits.total: 4 }
# 2 inner hits returned instead of requested 3 as they are collapsed by city
- length: { hits.hits.0.inner_hits.by_location.hits.hits : 2}
- match: { hits.hits.0.inner_hits.by_location.hits.hits.0._id: "7" }
- match: { hits.hits.0.inner_hits.by_location.hits.hits.0.fields.city: ["Manchester"] }
- match: { hits.hits.0.inner_hits.by_location.hits.hits.1._id: "5" }
- match: { hits.hits.0.inner_hits.by_location.hits.hits.1.fields.city: ["London"] }
- match: { hits.hits.1.fields.country: ["Canada"] }
- match: { hits.hits.1.inner_hits.by_location.hits.total: 4 }
# 2 inner hits returned instead of requested 3 as they are collapsed by city
- length: { hits.hits.1.inner_hits.by_location.hits.hits : 2 }
- match: { hits.hits.1.inner_hits.by_location.hits.hits.0._id: "2" }
- match: { hits.hits.1.inner_hits.by_location.hits.hits.0.fields.city: ["Toronto"] }
- match: { hits.hits.1.inner_hits.by_location.hits.hits.1._id: "1" }
- match: { hits.hits.1.inner_hits.by_location.hits.hits.1.fields.city: ["Saskatoon"] }

View File

@ -42,9 +42,9 @@ import java.util.Objects;
import static org.elasticsearch.action.ValidateActions.addValidationError;
import static org.elasticsearch.common.Strings.EMPTY_ARRAY;
import static org.elasticsearch.common.settings.Settings.Builder.EMPTY_SETTINGS;
import static org.elasticsearch.common.settings.Settings.readSettingsFromStream;
import static org.elasticsearch.common.settings.Settings.writeSettingsToStream;
import static org.elasticsearch.common.settings.Settings.Builder.EMPTY_SETTINGS;
import static org.elasticsearch.common.xcontent.support.XContentMapValues.nodeBooleanValue;
/**
@ -433,8 +433,6 @@ public class CreateSnapshotRequest extends MasterNodeRequest<CreateSnapshotReque
if (indicesOptions != null) {
indicesOptions.toXContent(builder, params);
}
builder.field("wait_for_completion", waitForCompletion);
builder.field("master_node_timeout", masterNodeTimeout.toString());
builder.endObject();
return builder;
}

View File

@ -21,14 +21,16 @@ package org.elasticsearch.action.admin.cluster.snapshots.create;
import org.elasticsearch.action.ActionResponse;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.ParseField;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.xcontent.ObjectParser;
import org.elasticsearch.common.xcontent.ToXContentObject;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.common.xcontent.XContentParser.Token;
import org.elasticsearch.rest.RestStatus;
import org.elasticsearch.snapshots.SnapshotInfo;
import org.elasticsearch.snapshots.SnapshotInfo.SnapshotInfoBuilder;
import java.io.IOException;
import java.util.Objects;
@ -38,6 +40,14 @@ import java.util.Objects;
*/
public class CreateSnapshotResponse extends ActionResponse implements ToXContentObject {
private static final ObjectParser<CreateSnapshotResponse, Void> PARSER =
new ObjectParser<>(CreateSnapshotResponse.class.getName(), true, CreateSnapshotResponse::new);
static {
PARSER.declareObject(CreateSnapshotResponse::setSnapshotInfoFromBuilder,
SnapshotInfo.SNAPSHOT_INFO_PARSER, new ParseField("snapshot"));
}
@Nullable
private SnapshotInfo snapshotInfo;
@ -48,8 +58,8 @@ public class CreateSnapshotResponse extends ActionResponse implements ToXContent
CreateSnapshotResponse() {
}
void setSnapshotInfo(SnapshotInfo snapshotInfo) {
this.snapshotInfo = snapshotInfo;
private void setSnapshotInfoFromBuilder(SnapshotInfoBuilder snapshotInfoBuilder) {
this.snapshotInfo = snapshotInfoBuilder.build();
}
/**
@ -101,38 +111,8 @@ public class CreateSnapshotResponse extends ActionResponse implements ToXContent
return builder;
}
public static CreateSnapshotResponse fromXContent(XContentParser parser) throws IOException {
CreateSnapshotResponse createSnapshotResponse = new CreateSnapshotResponse();
parser.nextToken(); // move to '{'
if (parser.currentToken() != Token.START_OBJECT) {
throw new IllegalArgumentException("unexpected token [" + parser.currentToken() + "], expected ['{']");
}
parser.nextToken(); // move to 'snapshot' || 'accepted'
if ("snapshot".equals(parser.currentName())) {
createSnapshotResponse.snapshotInfo = SnapshotInfo.fromXContent(parser);
} else if ("accepted".equals(parser.currentName())) {
parser.nextToken(); // move to 'accepted' field value
if (parser.booleanValue()) {
// ensure accepted is a boolean value
}
parser.nextToken(); // move past 'true'/'false'
} else {
throw new IllegalArgumentException("unexpected token [" + parser.currentToken() + "] expected ['snapshot', 'accepted']");
}
if (parser.currentToken() != Token.END_OBJECT) {
throw new IllegalArgumentException("unexpected token [" + parser.currentToken() + "], expected ['}']");
}
parser.nextToken(); // move past '}'
return createSnapshotResponse;
public static CreateSnapshotResponse fromXContent(XContentParser parser) {
return PARSER.apply(parser, null);
}
@Override

View File

@ -87,7 +87,8 @@ final class ExpandSearchPhase extends SearchPhase {
groupQuery.must(origQuery);
}
for (InnerHitBuilder innerHitBuilder : innerHitBuilders) {
SearchSourceBuilder sourceBuilder = buildExpandSearchSourceBuilder(innerHitBuilder)
CollapseBuilder innerCollapseBuilder = innerHitBuilder.getInnerCollapseBuilder();
SearchSourceBuilder sourceBuilder = buildExpandSearchSourceBuilder(innerHitBuilder, innerCollapseBuilder)
.query(groupQuery)
.postFilter(searchRequest.source().postFilter());
SearchRequest groupRequest = buildExpandSearchRequest(searchRequest, sourceBuilder);
@ -135,7 +136,7 @@ final class ExpandSearchPhase extends SearchPhase {
return groupRequest;
}
private SearchSourceBuilder buildExpandSearchSourceBuilder(InnerHitBuilder options) {
private SearchSourceBuilder buildExpandSearchSourceBuilder(InnerHitBuilder options, CollapseBuilder innerCollapseBuilder) {
SearchSourceBuilder groupSource = new SearchSourceBuilder();
groupSource.from(options.getFrom());
groupSource.size(options.getSize());
@ -167,6 +168,9 @@ final class ExpandSearchPhase extends SearchPhase {
groupSource.explain(options.isExplain());
groupSource.trackScores(options.isTrackScores());
groupSource.version(options.isVersion());
if (innerCollapseBuilder != null) {
groupSource.collapse(innerCollapseBuilder);
}
return groupSource;
}
}

View File

@ -22,6 +22,7 @@ package org.elasticsearch.action.support;
import org.elasticsearch.Version;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.ToXContentFragment;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.rest.RestRequest;
@ -316,21 +317,6 @@ public class IndicesOptions implements ToXContentFragment {
defaultSettings);
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startArray("expand_wildcards");
for (WildcardStates expandWildcard : expandWildcards) {
builder.value(expandWildcard.toString().toLowerCase(Locale.ROOT));
}
builder.endArray();
builder.field("ignore_unavailable", ignoreUnavailable());
builder.field("allow_no_indices", allowNoIndices());
builder.field("forbid_aliases_to_multiple_indices", allowAliasesToMultipleIndices() == false);
builder.field("forbid_closed_indices", forbidClosedIndices());
builder.field("ignore_aliases", ignoreAliases());
return builder;
}
/**
* Returns true if the name represents a valid name for one of the indices option
* false otherwise
@ -360,6 +346,18 @@ public class IndicesOptions implements ToXContentFragment {
);
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params params) throws IOException {
builder.startArray("expand_wildcards");
for (WildcardStates expandWildcard : expandWildcards) {
builder.value(expandWildcard.toString().toLowerCase(Locale.ROOT));
}
builder.endArray();
builder.field("ignore_unavailable", ignoreUnavailable());
builder.field("allow_no_indices", allowNoIndices());
return builder;
}
/**
* @return indices options that requires every specified index to exist, expands wildcards only to open indices and
* allows that no indices are resolved from wildcard expressions (not returning an error).

View File

@ -125,7 +125,7 @@ public class ChildMemoryCircuitBreaker implements CircuitBreaker {
// Additionally, we need to check that we haven't exceeded the parent's limit
try {
parent.checkParentLimit(label);
parent.checkParentLimit((long) (bytes * overheadConstant), label);
} catch (CircuitBreakingException e) {
// If the parent breaker is tripped, this breaker has to be
// adjusted back down because the allocation is "blocked" but the

View File

@ -254,6 +254,7 @@ public final class ClusterSettings extends AbstractScopedSettings {
HttpTransportSettings.SETTING_HTTP_TCP_REUSE_ADDRESS,
HttpTransportSettings.SETTING_HTTP_TCP_SEND_BUFFER_SIZE,
HttpTransportSettings.SETTING_HTTP_TCP_RECEIVE_BUFFER_SIZE,
HierarchyCircuitBreakerService.USE_REAL_MEMORY_USAGE_SETTING,
HierarchyCircuitBreakerService.TOTAL_CIRCUIT_BREAKER_LIMIT_SETTING,
HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_LIMIT_SETTING,
HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_OVERHEAD_SETTING,

View File

@ -37,6 +37,7 @@ import org.elasticsearch.search.fetch.subphase.DocValueFieldsContext.FieldAndFor
import org.elasticsearch.search.fetch.subphase.FetchSourceContext;
import org.elasticsearch.search.fetch.subphase.highlight.HighlightBuilder;
import org.elasticsearch.search.sort.SortBuilder;
import org.elasticsearch.search.collapse.CollapseBuilder;
import java.io.IOException;
import java.util.ArrayList;
@ -55,6 +56,8 @@ public final class InnerHitBuilder implements Writeable, ToXContentObject {
public static final ParseField NAME_FIELD = new ParseField("name");
public static final ParseField IGNORE_UNMAPPED = new ParseField("ignore_unmapped");
public static final QueryBuilder DEFAULT_INNER_HIT_QUERY = new MatchAllQueryBuilder();
public static final ParseField COLLAPSE_FIELD = new ParseField("collapse");
public static final ParseField FIELD_FIELD = new ParseField("field");
private static final ObjectParser<InnerHitBuilder, Void> PARSER = new ObjectParser<>("inner_hits", InnerHitBuilder::new);
@ -91,6 +94,28 @@ public final class InnerHitBuilder implements Writeable, ToXContentObject {
}, SearchSourceBuilder._SOURCE_FIELD, ObjectParser.ValueType.OBJECT_ARRAY_BOOLEAN_OR_STRING);
PARSER.declareObject(InnerHitBuilder::setHighlightBuilder, (p, c) -> HighlightBuilder.fromXContent(p),
SearchSourceBuilder.HIGHLIGHT_FIELD);
PARSER.declareField((parser, builder, context) -> {
Boolean isParsedCorrectly = false;
String field;
if (parser.currentToken() == XContentParser.Token.START_OBJECT) {
if (parser.nextToken() == XContentParser.Token.FIELD_NAME) {
if (FIELD_FIELD.match(parser.currentName(), parser.getDeprecationHandler())) {
if (parser.nextToken() == XContentParser.Token.VALUE_STRING){
field = parser.text();
if (parser.nextToken() == XContentParser.Token.END_OBJECT){
isParsedCorrectly = true;
CollapseBuilder cb = new CollapseBuilder(field);
builder.setInnerCollapse(cb);
}
}
}
}
}
if (isParsedCorrectly == false) {
throw new ParsingException(parser.getTokenLocation(), "Invalid token in the inner collapse");
}
}, COLLAPSE_FIELD, ObjectParser.ValueType.OBJECT);
}
private String name;
@ -109,6 +134,7 @@ public final class InnerHitBuilder implements Writeable, ToXContentObject {
private Set<ScriptField> scriptFields;
private HighlightBuilder highlightBuilder;
private FetchSourceContext fetchSourceContext;
private CollapseBuilder innerCollapseBuilder = null;
public InnerHitBuilder() {
this.name = null;
@ -173,6 +199,9 @@ public final class InnerHitBuilder implements Writeable, ToXContentObject {
boolean hasChildren = in.readBoolean();
assert hasChildren == false;
}
if (in.getVersion().onOrAfter(Version.V_7_0_0_alpha1)) {
this.innerCollapseBuilder = in.readOptionalWriteable(CollapseBuilder::new);
}
}
@Override
@ -218,6 +247,9 @@ public final class InnerHitBuilder implements Writeable, ToXContentObject {
}
}
out.writeOptionalWriteable(highlightBuilder);
if (out.getVersion().onOrAfter(Version.V_7_0_0_alpha1)) {
out.writeOptionalWriteable(innerCollapseBuilder);
}
}
/**
@ -501,6 +533,15 @@ public final class InnerHitBuilder implements Writeable, ToXContentObject {
return query;
}
public InnerHitBuilder setInnerCollapse(CollapseBuilder innerCollapseBuilder) {
this.innerCollapseBuilder = innerCollapseBuilder;
return this;
}
public CollapseBuilder getInnerCollapseBuilder() {
return innerCollapseBuilder;
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject();
@ -550,6 +591,9 @@ public final class InnerHitBuilder implements Writeable, ToXContentObject {
if (highlightBuilder != null) {
builder.field(SearchSourceBuilder.HIGHLIGHT_FIELD.getPreferredName(), highlightBuilder, params);
}
if (innerCollapseBuilder != null) {
builder.field(COLLAPSE_FIELD.getPreferredName(), innerCollapseBuilder);
}
builder.endObject();
return builder;
}
@ -572,13 +616,14 @@ public final class InnerHitBuilder implements Writeable, ToXContentObject {
Objects.equals(scriptFields, that.scriptFields) &&
Objects.equals(fetchSourceContext, that.fetchSourceContext) &&
Objects.equals(sorts, that.sorts) &&
Objects.equals(highlightBuilder, that.highlightBuilder);
Objects.equals(highlightBuilder, that.highlightBuilder) &&
Objects.equals(innerCollapseBuilder, that.innerCollapseBuilder);
}
@Override
public int hashCode() {
return Objects.hash(name, ignoreUnmapped, from, size, explain, version, trackScores,
storedFieldsContext, docValueFields, scriptFields, fetchSourceContext, sorts, highlightBuilder);
storedFieldsContext, docValueFields, scriptFields, fetchSourceContext, sorts, highlightBuilder, innerCollapseBuilder);
}
public static InnerHitBuilder fromXContent(XContentParser parser) throws IOException {

View File

@ -54,7 +54,7 @@ import java.util.Locale;
import java.util.Map;
import static java.util.Collections.unmodifiableMap;
import static org.elasticsearch.plugins.AnalysisPlugin.requriesAnalysisSettings;
import static org.elasticsearch.plugins.AnalysisPlugin.requiresAnalysisSettings;
/**
* Sets up {@link AnalysisRegistry}.
@ -118,7 +118,7 @@ public final class AnalysisModule {
tokenFilters.register("stop", StopTokenFilterFactory::new);
tokenFilters.register("standard", StandardTokenFilterFactory::new);
tokenFilters.register("shingle", ShingleTokenFilterFactory::new);
tokenFilters.register("hunspell", requriesAnalysisSettings((indexSettings, env, name, settings) -> new HunspellTokenFilterFactory
tokenFilters.register("hunspell", requiresAnalysisSettings((indexSettings, env, name, settings) -> new HunspellTokenFilterFactory
(indexSettings, name, settings, hunspellService)));
tokenFilters.extractAndRegister(plugins, AnalysisPlugin::getTokenFilters);

View File

@ -30,6 +30,8 @@ import org.elasticsearch.common.settings.Setting.Property;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.ByteSizeValue;
import java.lang.management.ManagementFactory;
import java.lang.management.MemoryMXBean;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.ConcurrentHashMap;
@ -44,10 +46,21 @@ public class HierarchyCircuitBreakerService extends CircuitBreakerService {
private static final String CHILD_LOGGER_PREFIX = "org.elasticsearch.indices.breaker.";
private static final MemoryMXBean MEMORY_MX_BEAN = ManagementFactory.getMemoryMXBean();
private final ConcurrentMap<String, CircuitBreaker> breakers = new ConcurrentHashMap<>();
public static final Setting<Boolean> USE_REAL_MEMORY_USAGE_SETTING =
Setting.boolSetting("indices.breaker.total.use_real_memory", true, Property.NodeScope);
public static final Setting<ByteSizeValue> TOTAL_CIRCUIT_BREAKER_LIMIT_SETTING =
Setting.memorySizeSetting("indices.breaker.total.limit", "70%", Property.Dynamic, Property.NodeScope);
Setting.memorySizeSetting("indices.breaker.total.limit", settings -> {
if (USE_REAL_MEMORY_USAGE_SETTING.get(settings)) {
return "95%";
} else {
return "70%";
}
}, Property.Dynamic, Property.NodeScope);
public static final Setting<ByteSizeValue> FIELDDATA_CIRCUIT_BREAKER_LIMIT_SETTING =
Setting.memorySizeSetting("indices.breaker.fielddata.limit", "60%", Property.Dynamic, Property.NodeScope);
@ -77,6 +90,7 @@ public class HierarchyCircuitBreakerService extends CircuitBreakerService {
public static final Setting<CircuitBreaker.Type> IN_FLIGHT_REQUESTS_CIRCUIT_BREAKER_TYPE_SETTING =
new Setting<>("network.breaker.inflight_requests.type", "memory", CircuitBreaker.Type::parseValue, Property.NodeScope);
private final boolean trackRealMemoryUsage;
private volatile BreakerSettings parentSettings;
private volatile BreakerSettings fielddataSettings;
private volatile BreakerSettings inFlightRequestsSettings;
@ -120,6 +134,8 @@ public class HierarchyCircuitBreakerService extends CircuitBreakerService {
logger.trace("parent circuit breaker with settings {}", this.parentSettings);
}
this.trackRealMemoryUsage = USE_REAL_MEMORY_USAGE_SETTING.get(settings);
registerBreaker(this.requestSettings);
registerBreaker(this.fielddataSettings);
registerBreaker(this.inFlightRequestsSettings);
@ -191,17 +207,15 @@ public class HierarchyCircuitBreakerService extends CircuitBreakerService {
@Override
public AllCircuitBreakerStats stats() {
long parentEstimated = 0;
List<CircuitBreakerStats> allStats = new ArrayList<>(this.breakers.size());
// Gather the "estimated" count for the parent breaker by adding the
// estimations for each individual breaker
for (CircuitBreaker breaker : this.breakers.values()) {
allStats.add(stats(breaker.getName()));
parentEstimated += breaker.getUsed();
}
// Manually add the parent breaker settings since they aren't part of the breaker map
allStats.add(new CircuitBreakerStats(CircuitBreaker.PARENT, parentSettings.getLimit(),
parentEstimated, 1.0, parentTripCount.get()));
parentUsed(0L), 1.0, parentTripCount.get()));
return new AllCircuitBreakerStats(allStats.toArray(new CircuitBreakerStats[allStats.size()]));
}
@ -211,15 +225,28 @@ public class HierarchyCircuitBreakerService extends CircuitBreakerService {
return new CircuitBreakerStats(breaker.getName(), breaker.getLimit(), breaker.getUsed(), breaker.getOverhead(), breaker.getTrippedCount());
}
private long parentUsed(long newBytesReserved) {
if (this.trackRealMemoryUsage) {
return currentMemoryUsage() + newBytesReserved;
} else {
long parentEstimated = 0;
for (CircuitBreaker breaker : this.breakers.values()) {
parentEstimated += breaker.getUsed() * breaker.getOverhead();
}
return parentEstimated;
}
}
//package private to allow overriding it in tests
long currentMemoryUsage() {
return MEMORY_MX_BEAN.getHeapMemoryUsage().getUsed();
}
/**
* Checks whether the parent breaker has been tripped
*/
public void checkParentLimit(String label) throws CircuitBreakingException {
long totalUsed = 0;
for (CircuitBreaker breaker : this.breakers.values()) {
totalUsed += (breaker.getUsed() * breaker.getOverhead());
}
public void checkParentLimit(long newBytesReserved, String label) throws CircuitBreakingException {
long totalUsed = parentUsed(newBytesReserved);
long parentLimit = this.parentSettings.getLimit();
if (totalUsed > parentLimit) {
this.parentTripCount.incrementAndGet();

View File

@ -57,12 +57,12 @@ import static java.util.Collections.emptyMap;
* }</pre>
*
* Elasticsearch doesn't have any automatic mechanism to share these components between indexes. If any component is heavy enough to warrant
* such sharing then it is the Pugin's responsibility to do it in their {@link AnalysisProvider} implementation. We recommend against doing
* such sharing then it is the Plugin's responsibility to do it in their {@link AnalysisProvider} implementation. We recommend against doing
* this unless absolutely necessary because it can be difficult to get the caching right given things like behavior changes across versions.
*/
public interface AnalysisPlugin {
/**
* Override to add additional {@link CharFilter}s. See {@link #requriesAnalysisSettings(AnalysisProvider)}
* Override to add additional {@link CharFilter}s. See {@link #requiresAnalysisSettings(AnalysisProvider)}
* how to on get the configuration from the index.
*/
default Map<String, AnalysisProvider<CharFilterFactory>> getCharFilters() {
@ -70,7 +70,7 @@ public interface AnalysisPlugin {
}
/**
* Override to add additional {@link TokenFilter}s. See {@link #requriesAnalysisSettings(AnalysisProvider)}
* Override to add additional {@link TokenFilter}s. See {@link #requiresAnalysisSettings(AnalysisProvider)}
* how to on get the configuration from the index.
*/
default Map<String, AnalysisProvider<TokenFilterFactory>> getTokenFilters() {
@ -78,7 +78,7 @@ public interface AnalysisPlugin {
}
/**
* Override to add additional {@link Tokenizer}s. See {@link #requriesAnalysisSettings(AnalysisProvider)}
* Override to add additional {@link Tokenizer}s. See {@link #requiresAnalysisSettings(AnalysisProvider)}
* how to on get the configuration from the index.
*/
default Map<String, AnalysisProvider<TokenizerFactory>> getTokenizers() {
@ -86,7 +86,7 @@ public interface AnalysisPlugin {
}
/**
* Override to add additional {@link Analyzer}s. See {@link #requriesAnalysisSettings(AnalysisProvider)}
* Override to add additional {@link Analyzer}s. See {@link #requiresAnalysisSettings(AnalysisProvider)}
* how to on get the configuration from the index.
*/
default Map<String, AnalysisProvider<AnalyzerProvider<? extends Analyzer>>> getAnalyzers() {
@ -131,7 +131,7 @@ public interface AnalysisPlugin {
/**
* Mark an {@link AnalysisProvider} as requiring the index's settings.
*/
static <T> AnalysisProvider<T> requriesAnalysisSettings(AnalysisProvider<T> provider) {
static <T> AnalysisProvider<T> requiresAnalysisSettings(AnalysisProvider<T> provider) {
return new AnalysisProvider<T>() {
@Override
public T get(IndexSettings indexSettings, Environment environment, String name, Settings settings) throws IOException {

View File

@ -38,6 +38,7 @@ import org.elasticsearch.common.regex.Regex;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.snapshots.RestoreService;
import org.elasticsearch.snapshots.SnapshotsService;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService;
import java.io.IOException;
@ -58,16 +59,20 @@ public class RepositoriesService extends AbstractComponent implements ClusterSta
private final ClusterService clusterService;
private final ThreadPool threadPool;
private final VerifyNodeRepositoryAction verifyAction;
private volatile Map<String, Repository> repositories = Collections.emptyMap();
@Inject
public RepositoriesService(Settings settings, ClusterService clusterService, TransportService transportService,
Map<String, Repository.Factory> typesRegistry) {
Map<String, Repository.Factory> typesRegistry,
ThreadPool threadPool) {
super(settings);
this.typesRegistry = typesRegistry;
this.clusterService = clusterService;
this.threadPool = threadPool;
// Doesn't make sense to maintain repositories on non-master and non-data nodes
// Nothing happens there anyway
if (DiscoveryNode.isDataNode(settings) || DiscoveryNode.isMasterNode(settings)) {
@ -208,39 +213,51 @@ public class RepositoriesService extends AbstractComponent implements ClusterSta
public void verifyRepository(final String repositoryName, final ActionListener<VerifyResponse> listener) {
final Repository repository = repository(repositoryName);
try {
final String verificationToken = repository.startVerification();
if (verificationToken != null) {
threadPool.executor(ThreadPool.Names.SNAPSHOT).execute(() -> {
try {
verifyAction.verify(repositoryName, verificationToken, new ActionListener<VerifyResponse>() {
@Override
public void onResponse(VerifyResponse verifyResponse) {
try {
repository.endVerification(verificationToken);
} catch (Exception e) {
logger.warn(() -> new ParameterizedMessage("[{}] failed to finish repository verification", repositoryName), e);
listener.onFailure(e);
return;
}
listener.onResponse(verifyResponse);
}
final String verificationToken = repository.startVerification();
if (verificationToken != null) {
try {
verifyAction.verify(repositoryName, verificationToken, new ActionListener<VerifyResponse>() {
@Override
public void onResponse(VerifyResponse verifyResponse) {
threadPool.executor(ThreadPool.Names.SNAPSHOT).execute(() -> {
try {
repository.endVerification(verificationToken);
} catch (Exception e) {
logger.warn(() -> new ParameterizedMessage(
"[{}] failed to finish repository verification", repositoryName), e);
listener.onFailure(e);
return;
}
listener.onResponse(verifyResponse);
});
}
@Override
public void onFailure(Exception e) {
listener.onFailure(e);
@Override
public void onFailure(Exception e) {
listener.onFailure(e);
}
});
} catch (Exception e) {
threadPool.executor(ThreadPool.Names.SNAPSHOT).execute(() -> {
try {
repository.endVerification(verificationToken);
} catch (Exception inner) {
inner.addSuppressed(e);
logger.warn(() -> new ParameterizedMessage(
"[{}] failed to finish repository verification", repositoryName), inner);
}
listener.onFailure(e);
});
}
});
} catch (Exception e) {
try {
repository.endVerification(verificationToken);
} catch (Exception inner) {
inner.addSuppressed(e);
logger.warn(() -> new ParameterizedMessage("[{}] failed to finish repository verification", repositoryName), inner);
} else {
listener.onResponse(new VerifyResponse(new DiscoveryNode[0], new VerificationFailure[0]));
}
} catch (Exception e) {
listener.onFailure(e);
}
} else {
listener.onResponse(new VerifyResponse(new DiscoveryNode[0], new VerificationFailure[0]));
}
});
} catch (Exception e) {
listener.onFailure(e);
}

View File

@ -61,7 +61,7 @@ public class VerifyNodeRepositoryAction extends AbstractComponent {
this.transportService = transportService;
this.clusterService = clusterService;
this.repositoriesService = repositoriesService;
transportService.registerRequestHandler(ACTION_NAME, VerifyNodeRepositoryRequest::new, ThreadPool.Names.SAME, new VerifyNodeRepositoryRequestHandler());
transportService.registerRequestHandler(ACTION_NAME, VerifyNodeRepositoryRequest::new, ThreadPool.Names.SNAPSHOT, new VerifyNodeRepositoryRequestHandler());
}
public void verify(String repository, String verificationToken, final ActionListener<VerifyResponse> listener) {

View File

@ -34,6 +34,7 @@ import org.apache.lucene.store.IndexOutput;
import org.apache.lucene.store.RateLimiter;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.BytesRefBuilder;
import org.apache.lucene.util.SetOnce;
import org.elasticsearch.ElasticsearchParseException;
import org.elasticsearch.ExceptionsHelper;
import org.elasticsearch.ResourceNotFoundException;
@ -102,6 +103,7 @@ import org.elasticsearch.snapshots.SnapshotId;
import org.elasticsearch.snapshots.SnapshotInfo;
import org.elasticsearch.snapshots.SnapshotMissingException;
import org.elasticsearch.snapshots.SnapshotShardFailure;
import org.elasticsearch.threadpool.ThreadPool;
import java.io.FilterInputStream;
import java.io.IOException;
@ -126,8 +128,8 @@ import static org.elasticsearch.index.snapshots.blobstore.BlobStoreIndexShardSna
/**
* BlobStore - based implementation of Snapshot Repository
* <p>
* This repository works with any {@link BlobStore} implementation. The blobStore should be initialized in the derived
* class before {@link #doStart()} is called.
* This repository works with any {@link BlobStore} implementation. The blobStore could be (and preferred) lazy initialized in
* {@link #createBlobStore()}.
* <p>
* BlobStoreRepository maintains the following structure in the blob store
* <pre>
@ -169,8 +171,6 @@ import static org.elasticsearch.index.snapshots.blobstore.BlobStoreIndexShardSna
*/
public abstract class BlobStoreRepository extends AbstractLifecycleComponent implements Repository {
private BlobContainer snapshotsBlobContainer;
protected final RepositoryMetaData metadata;
protected final NamedXContentRegistry namedXContentRegistry;
@ -225,6 +225,12 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp
private final ChecksumBlobStoreFormat<BlobStoreIndexShardSnapshots> indexShardSnapshotsFormat;
private final Object lock = new Object();
private final SetOnce<BlobContainer> blobContainer = new SetOnce<>();
private final SetOnce<BlobStore> blobStore = new SetOnce<>();
/**
* Constructs new BlobStoreRepository
*
@ -251,7 +257,6 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp
@Override
protected void doStart() {
this.snapshotsBlobContainer = blobStore().blobContainer(basePath());
globalMetaDataFormat = new ChecksumBlobStoreFormat<>(METADATA_CODEC, METADATA_NAME_FORMAT,
MetaData::fromXContent, namedXContentRegistry, isCompress());
indexMetaDataFormat = new ChecksumBlobStoreFormat<>(INDEX_METADATA_CODEC, METADATA_NAME_FORMAT,
@ -265,17 +270,82 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp
@Override
protected void doClose() {
try {
blobStore().close();
} catch (Exception t) {
logger.warn("cannot close blob store", t);
BlobStore store;
// to close blobStore if blobStore initialization is started during close
synchronized (lock) {
store = blobStore.get();
}
if (store != null) {
try {
store.close();
} catch (Exception t) {
logger.warn("cannot close blob store", t);
}
}
}
// package private, only use for testing
BlobContainer getBlobContainer() {
return blobContainer.get();
}
// for test purposes only
protected BlobStore getBlobStore() {
return blobStore.get();
}
/**
* Returns the BlobStore to read and write data.
* maintains single lazy instance of {@link BlobContainer}
*/
protected abstract BlobStore blobStore();
protected BlobContainer blobContainer() {
assertSnapshotOrGenericThread();
BlobContainer blobContainer = this.blobContainer.get();
if (blobContainer == null) {
synchronized (lock) {
blobContainer = this.blobContainer.get();
if (blobContainer == null) {
blobContainer = blobStore().blobContainer(basePath());
this.blobContainer.set(blobContainer);
}
}
}
return blobContainer;
}
/**
* maintains single lazy instance of {@link BlobStore}
*/
protected BlobStore blobStore() {
assertSnapshotOrGenericThread();
BlobStore store = blobStore.get();
if (store == null) {
synchronized (lock) {
store = blobStore.get();
if (store == null) {
if (lifecycle.started() == false) {
throw new RepositoryException(metadata.name(), "repository is not in started state");
}
try {
store = createBlobStore();
} catch (RepositoryException e) {
throw e;
} catch (Exception e) {
throw new RepositoryException(metadata.name(), "cannot create blob store" , e);
}
blobStore.set(store);
}
}
}
return store;
}
/**
* Creates new BlobStore to read and write data.
*/
protected abstract BlobStore createBlobStore() throws Exception;
/**
* Returns base path of the repository
@ -319,12 +389,12 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp
if (repositoryData.getAllSnapshotIds().stream().anyMatch(s -> s.getName().equals(snapshotName))) {
throw new InvalidSnapshotNameException(metadata.name(), snapshotId.getName(), "snapshot with the same name already exists");
}
if (snapshotFormat.exists(snapshotsBlobContainer, snapshotId.getUUID())) {
if (snapshotFormat.exists(blobContainer(), snapshotId.getUUID())) {
throw new InvalidSnapshotNameException(metadata.name(), snapshotId.getName(), "snapshot with the same name already exists");
}
// Write Global MetaData
globalMetaDataFormat.write(clusterMetaData, snapshotsBlobContainer, snapshotId.getUUID());
globalMetaDataFormat.write(clusterMetaData, blobContainer(), snapshotId.getUUID());
// write the index metadata for each index in the snapshot
for (IndexId index : indices) {
@ -421,7 +491,7 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp
private void deleteSnapshotBlobIgnoringErrors(final SnapshotInfo snapshotInfo, final String blobId) {
try {
snapshotFormat.delete(snapshotsBlobContainer, blobId);
snapshotFormat.delete(blobContainer(), blobId);
} catch (IOException e) {
if (snapshotInfo != null) {
logger.warn(() -> new ParameterizedMessage("[{}] Unable to delete snapshot file [{}]",
@ -434,7 +504,7 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp
private void deleteGlobalMetaDataBlobIgnoringErrors(final SnapshotInfo snapshotInfo, final String blobId) {
try {
globalMetaDataFormat.delete(snapshotsBlobContainer, blobId);
globalMetaDataFormat.delete(blobContainer(), blobId);
} catch (IOException e) {
if (snapshotInfo != null) {
logger.warn(() -> new ParameterizedMessage("[{}] Unable to delete global metadata file [{}]",
@ -472,7 +542,7 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp
startTime, failure, System.currentTimeMillis(), totalShards, shardFailures,
includeGlobalState);
try {
snapshotFormat.write(blobStoreSnapshot, snapshotsBlobContainer, snapshotId.getUUID());
snapshotFormat.write(blobStoreSnapshot, blobContainer(), snapshotId.getUUID());
final RepositoryData repositoryData = getRepositoryData();
writeIndexGen(repositoryData.addSnapshot(snapshotId, blobStoreSnapshot.state(), indices), repositoryStateId);
} catch (FileAlreadyExistsException ex) {
@ -490,7 +560,7 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp
@Override
public SnapshotInfo getSnapshotInfo(final SnapshotId snapshotId) {
try {
return snapshotFormat.read(snapshotsBlobContainer, snapshotId.getUUID());
return snapshotFormat.read(blobContainer(), snapshotId.getUUID());
} catch (NoSuchFileException ex) {
throw new SnapshotMissingException(metadata.name(), snapshotId, ex);
} catch (IOException | NotXContentException ex) {
@ -501,7 +571,7 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp
@Override
public MetaData getSnapshotGlobalMetaData(final SnapshotId snapshotId) {
try {
return globalMetaDataFormat.read(snapshotsBlobContainer, snapshotId.getUUID());
return globalMetaDataFormat.read(blobContainer(), snapshotId.getUUID());
} catch (NoSuchFileException ex) {
throw new SnapshotMissingException(metadata.name(), snapshotId, ex);
} catch (IOException ex) {
@ -543,11 +613,21 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp
return restoreRateLimitingTimeInNanos.count();
}
protected void assertSnapshotOrGenericThread() {
assert Thread.currentThread().getName().contains(ThreadPool.Names.SNAPSHOT)
|| Thread.currentThread().getName().contains(ThreadPool.Names.GENERIC) :
"Expected current thread [" + Thread.currentThread() + "] to be the snapshot or generic thread.";
}
@Override
public String startVerification() {
try {
if (isReadOnly()) {
// It's readonly - so there is not much we can do here to verify it
// TODO: add repository verification for read-only repositories
// It's readonly - so there is not much we can do here to verify it apart try to create blobStore()
// and check that is is accessible on the master
blobStore();
return null;
} else {
String seed = UUIDs.randomBase64UUID();
@ -584,7 +664,7 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp
final String snapshotsIndexBlobName = INDEX_FILE_PREFIX + Long.toString(indexGen);
RepositoryData repositoryData;
try (InputStream blob = snapshotsBlobContainer.readBlob(snapshotsIndexBlobName)) {
try (InputStream blob = blobContainer().readBlob(snapshotsIndexBlobName)) {
BytesStreamOutput out = new BytesStreamOutput();
Streams.copy(blob, out);
// EMPTY is safe here because RepositoryData#fromXContent calls namedObject
@ -598,7 +678,7 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp
}
// now load the incompatible snapshot ids, if they exist
try (InputStream blob = snapshotsBlobContainer.readBlob(INCOMPATIBLE_SNAPSHOTS_BLOB)) {
try (InputStream blob = blobContainer().readBlob(INCOMPATIBLE_SNAPSHOTS_BLOB)) {
BytesStreamOutput out = new BytesStreamOutput();
Streams.copy(blob, out);
try (XContentParser parser = XContentHelper.createParser(NamedXContentRegistry.EMPTY,
@ -636,11 +716,6 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp
return readOnly;
}
// package private, only use for testing
BlobContainer blobContainer() {
return snapshotsBlobContainer;
}
protected void writeIndexGen(final RepositoryData repositoryData, final long repositoryStateId) throws IOException {
assert isReadOnly() == false; // can not write to a read only repository
final long currentGen = latestIndexBlobId();
@ -668,7 +743,7 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp
// delete the N-2 index file if it exists, keep the previous one around as a backup
if (isReadOnly() == false && newGen - 2 >= 0) {
final String oldSnapshotIndexFile = INDEX_FILE_PREFIX + Long.toString(newGen - 2);
snapshotsBlobContainer.deleteBlobIgnoringIfNotExists(oldSnapshotIndexFile);
blobContainer().deleteBlobIgnoringIfNotExists(oldSnapshotIndexFile);
}
// write the current generation to the index-latest file
@ -736,7 +811,7 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp
// package private for testing
long readSnapshotIndexLatestBlob() throws IOException {
try (InputStream blob = snapshotsBlobContainer.readBlob(INDEX_LATEST_BLOB)) {
try (InputStream blob = blobContainer().readBlob(INDEX_LATEST_BLOB)) {
BytesStreamOutput out = new BytesStreamOutput();
Streams.copy(blob, out);
return Numbers.bytesToLong(out.bytes().toBytesRef());
@ -744,7 +819,7 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp
}
private long listBlobsToGetLatestIndexId() throws IOException {
Map<String, BlobMetaData> blobs = snapshotsBlobContainer.listBlobsByPrefix(INDEX_FILE_PREFIX);
Map<String, BlobMetaData> blobs = blobContainer().listBlobsByPrefix(INDEX_FILE_PREFIX);
long latest = RepositoryData.EMPTY_REPO_GEN;
if (blobs.isEmpty()) {
// no snapshot index blobs have been written yet
@ -766,7 +841,7 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp
private void writeAtomic(final String blobName, final BytesReference bytesRef, boolean failIfAlreadyExists) throws IOException {
try (InputStream stream = bytesRef.streamInput()) {
snapshotsBlobContainer.writeBlobAtomic(blobName, stream, bytesRef.length(), failIfAlreadyExists);
blobContainer().writeBlobAtomic(blobName, stream, bytesRef.length(), failIfAlreadyExists);
}
}
@ -806,6 +881,7 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp
@Override
public void verify(String seed, DiscoveryNode localNode) {
assertSnapshotOrGenericThread();
BlobContainer testBlobContainer = blobStore().blobContainer(basePath().add(testBlobPrefix(seed)));
if (testBlobContainer.blobExists("master.dat")) {
try {

View File

@ -31,7 +31,6 @@ import org.elasticsearch.env.Environment;
import org.elasticsearch.repositories.RepositoryException;
import org.elasticsearch.repositories.blobstore.BlobStoreRepository;
import java.io.IOException;
import java.nio.file.Path;
import java.util.function.Function;
@ -61,8 +60,7 @@ public class FsRepository extends BlobStoreRepository {
public static final Setting<Boolean> COMPRESS_SETTING = Setting.boolSetting("compress", false, Property.NodeScope);
public static final Setting<Boolean> REPOSITORIES_COMPRESS_SETTING =
Setting.boolSetting("repositories.fs.compress", false, Property.NodeScope);
private final FsBlobStore blobStore;
private final Environment environment;
private ByteSizeValue chunkSize;
@ -74,37 +72,45 @@ public class FsRepository extends BlobStoreRepository {
* Constructs a shared file system repository.
*/
public FsRepository(RepositoryMetaData metadata, Environment environment,
NamedXContentRegistry namedXContentRegistry) throws IOException {
NamedXContentRegistry namedXContentRegistry) {
super(metadata, environment.settings(), namedXContentRegistry);
this.environment = environment;
String location = REPOSITORIES_LOCATION_SETTING.get(metadata.settings());
if (location.isEmpty()) {
logger.warn("the repository location is missing, it should point to a shared file system location that is available on all master and data nodes");
logger.warn("the repository location is missing, it should point to a shared file system location"
+ " that is available on all master and data nodes");
throw new RepositoryException(metadata.name(), "missing location");
}
Path locationFile = environment.resolveRepoFile(location);
if (locationFile == null) {
if (environment.repoFiles().length > 0) {
logger.warn("The specified location [{}] doesn't start with any repository paths specified by the path.repo setting: [{}] ", location, environment.repoFiles());
throw new RepositoryException(metadata.name(), "location [" + location + "] doesn't match any of the locations specified by path.repo");
logger.warn("The specified location [{}] doesn't start with any "
+ "repository paths specified by the path.repo setting: [{}] ", location, environment.repoFiles());
throw new RepositoryException(metadata.name(), "location [" + location
+ "] doesn't match any of the locations specified by path.repo");
} else {
logger.warn("The specified location [{}] should start with a repository path specified by the path.repo setting, but the path.repo setting was not set on this node", location);
throw new RepositoryException(metadata.name(), "location [" + location + "] doesn't match any of the locations specified by path.repo because this setting is empty");
logger.warn("The specified location [{}] should start with a repository path specified by"
+ " the path.repo setting, but the path.repo setting was not set on this node", location);
throw new RepositoryException(metadata.name(), "location [" + location
+ "] doesn't match any of the locations specified by path.repo because this setting is empty");
}
}
blobStore = new FsBlobStore(settings, locationFile);
if (CHUNK_SIZE_SETTING.exists(metadata.settings())) {
this.chunkSize = CHUNK_SIZE_SETTING.get(metadata.settings());
} else {
this.chunkSize = REPOSITORIES_CHUNK_SIZE_SETTING.get(settings);
}
this.compress = COMPRESS_SETTING.exists(metadata.settings()) ? COMPRESS_SETTING.get(metadata.settings()) : REPOSITORIES_COMPRESS_SETTING.get(settings);
this.compress = COMPRESS_SETTING.exists(metadata.settings())
? COMPRESS_SETTING.get(metadata.settings()) : REPOSITORIES_COMPRESS_SETTING.get(settings);
this.basePath = BlobPath.cleanPath();
}
@Override
protected BlobStore blobStore() {
return blobStore;
protected BlobStore createBlobStore() throws Exception {
final String location = REPOSITORIES_LOCATION_SETTING.get(metadata.settings());
final Path locationFile = environment.resolveRepoFile(location);
return new FsBlobStore(settings, locationFile);
}
@Override

View File

@ -109,8 +109,10 @@ import org.elasticsearch.search.aggregations.bucket.geogrid.GeoGridAggregationBu
import org.elasticsearch.search.aggregations.bucket.geogrid.InternalGeoHashGrid;
import org.elasticsearch.search.aggregations.bucket.global.GlobalAggregationBuilder;
import org.elasticsearch.search.aggregations.bucket.global.InternalGlobal;
import org.elasticsearch.search.aggregations.bucket.histogram.AutoDateHistogramAggregationBuilder;
import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramAggregationBuilder;
import org.elasticsearch.search.aggregations.bucket.histogram.HistogramAggregationBuilder;
import org.elasticsearch.search.aggregations.bucket.histogram.InternalAutoDateHistogram;
import org.elasticsearch.search.aggregations.bucket.histogram.InternalDateHistogram;
import org.elasticsearch.search.aggregations.bucket.histogram.InternalHistogram;
import org.elasticsearch.search.aggregations.bucket.missing.InternalMissing;
@ -395,6 +397,8 @@ public class SearchModule {
HistogramAggregationBuilder::parse).addResultReader(InternalHistogram::new));
registerAggregation(new AggregationSpec(DateHistogramAggregationBuilder.NAME, DateHistogramAggregationBuilder::new,
DateHistogramAggregationBuilder::parse).addResultReader(InternalDateHistogram::new));
registerAggregation(new AggregationSpec(AutoDateHistogramAggregationBuilder.NAME, AutoDateHistogramAggregationBuilder::new,
AutoDateHistogramAggregationBuilder::parse).addResultReader(InternalAutoDateHistogram::new));
registerAggregation(new AggregationSpec(GeoDistanceAggregationBuilder.NAME, GeoDistanceAggregationBuilder::new,
GeoDistanceAggregationBuilder::parse).addResultReader(InternalGeoDistance::new));
registerAggregation(new AggregationSpec(GeoGridAggregationBuilder.NAME, GeoGridAggregationBuilder::new,

View File

@ -84,6 +84,19 @@ public abstract class BucketsAggregator extends AggregatorBase {
subCollector.collect(doc, bucketOrd);
}
public final void mergeBuckets(long[] mergeMap, long newNumBuckets) {
try (IntArray oldDocCounts = docCounts) {
docCounts = bigArrays.newIntArray(newNumBuckets, true);
docCounts.fill(0, newNumBuckets, 0);
for (int i = 0; i < oldDocCounts.size(); i++) {
int docCount = oldDocCounts.get(i);
if (docCount != 0) {
docCounts.increment(mergeMap[i], docCount);
}
}
}
}
public IntArray getDocCounts() {
return docCounts;
}

View File

@ -0,0 +1,236 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.search.aggregations.bucket;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.search.DocIdSetIterator;
import org.apache.lucene.search.Scorer;
import org.apache.lucene.search.Weight;
import org.apache.lucene.util.packed.PackedInts;
import org.apache.lucene.util.packed.PackedLongValues;
import org.elasticsearch.common.util.BigArrays;
import org.elasticsearch.common.util.LongHash;
import org.elasticsearch.search.aggregations.Aggregator;
import org.elasticsearch.search.aggregations.BucketCollector;
import org.elasticsearch.search.aggregations.InternalAggregation;
import org.elasticsearch.search.aggregations.LeafBucketCollector;
import org.elasticsearch.search.internal.SearchContext;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
/**
* A specialization of {@link DeferringBucketCollector} that collects all
* matches and then is able to replay a given subset of buckets. Exposes
* mergeBuckets, which can be invoked by the aggregator when increasing the
* rounding interval.
*/
public class MergingBucketsDeferringCollector extends DeferringBucketCollector {
List<Entry> entries = new ArrayList<>();
BucketCollector collector;
final SearchContext searchContext;
LeafReaderContext context;
PackedLongValues.Builder docDeltas;
PackedLongValues.Builder buckets;
long maxBucket = -1;
boolean finished = false;
LongHash selectedBuckets;
public MergingBucketsDeferringCollector(SearchContext context) {
this.searchContext = context;
}
@Override
public void setDeferredCollector(Iterable<BucketCollector> deferredCollectors) {
this.collector = BucketCollector.wrap(deferredCollectors);
}
@Override
public boolean needsScores() {
if (collector == null) {
throw new IllegalStateException();
}
return collector.needsScores();
}
@Override
public void preCollection() throws IOException {
collector.preCollection();
}
private void finishLeaf() {
if (context != null) {
entries.add(new Entry(context, docDeltas.build(), buckets.build()));
}
context = null;
docDeltas = null;
buckets = null;
}
@Override
public LeafBucketCollector getLeafCollector(LeafReaderContext ctx) throws IOException {
finishLeaf();
context = ctx;
docDeltas = PackedLongValues.packedBuilder(PackedInts.DEFAULT);
buckets = PackedLongValues.packedBuilder(PackedInts.DEFAULT);
return new LeafBucketCollector() {
int lastDoc = 0;
@Override
public void collect(int doc, long bucket) {
docDeltas.add(doc - lastDoc);
buckets.add(bucket);
lastDoc = doc;
maxBucket = Math.max(maxBucket, bucket);
}
};
}
public void mergeBuckets(long[] mergeMap) {
List<Entry> newEntries = new ArrayList<>(entries.size());
for (Entry sourceEntry : entries) {
PackedLongValues.Builder newBuckets = PackedLongValues.packedBuilder(PackedInts.DEFAULT);
for (PackedLongValues.Iterator itr = sourceEntry.buckets.iterator(); itr.hasNext();) {
long bucket = itr.next();
newBuckets.add(mergeMap[Math.toIntExact(bucket)]);
}
newEntries.add(new Entry(sourceEntry.context, sourceEntry.docDeltas, newBuckets.build()));
}
entries = newEntries;
// if there are buckets that have been collected in the current segment
// we need to update the bucket ordinals there too
if (buckets.size() > 0) {
PackedLongValues currentBuckets = buckets.build();
PackedLongValues.Builder newBuckets = PackedLongValues.packedBuilder(PackedInts.DEFAULT);
for (PackedLongValues.Iterator itr = currentBuckets.iterator(); itr.hasNext();) {
long bucket = itr.next();
newBuckets.add(mergeMap[Math.toIntExact(bucket)]);
}
buckets = newBuckets;
}
}
@Override
public void postCollection() {
finishLeaf();
finished = true;
}
/**
* Replay the wrapped collector, but only on a selection of buckets.
*/
@Override
public void prepareSelectedBuckets(long... selectedBuckets) throws IOException {
if (finished == false) {
throw new IllegalStateException("Cannot replay yet, collection is not finished: postCollect() has not been called");
}
if (this.selectedBuckets != null) {
throw new IllegalStateException("Already been replayed");
}
final LongHash hash = new LongHash(selectedBuckets.length, BigArrays.NON_RECYCLING_INSTANCE);
for (long bucket : selectedBuckets) {
hash.add(bucket);
}
this.selectedBuckets = hash;
boolean needsScores = collector.needsScores();
Weight weight = null;
if (needsScores) {
weight = searchContext.searcher().createNormalizedWeight(searchContext.query(), true);
}
for (Entry entry : entries) {
final LeafBucketCollector leafCollector = collector.getLeafCollector(entry.context);
DocIdSetIterator docIt = null;
if (needsScores && entry.docDeltas.size() > 0) {
Scorer scorer = weight.scorer(entry.context);
// We don't need to check if the scorer is null
// since we are sure that there are documents to replay
// (entry.docDeltas it not empty).
docIt = scorer.iterator();
leafCollector.setScorer(scorer);
}
final PackedLongValues.Iterator docDeltaIterator = entry.docDeltas.iterator();
final PackedLongValues.Iterator buckets = entry.buckets.iterator();
int doc = 0;
for (long i = 0, end = entry.docDeltas.size(); i < end; ++i) {
doc += docDeltaIterator.next();
final long bucket = buckets.next();
final long rebasedBucket = hash.find(bucket);
if (rebasedBucket != -1) {
if (needsScores) {
if (docIt.docID() < doc) {
docIt.advance(doc);
}
// aggregations should only be replayed on matching
// documents
assert docIt.docID() == doc;
}
leafCollector.collect(doc, rebasedBucket);
}
}
}
collector.postCollection();
}
/**
* Wrap the provided aggregator so that it behaves (almost) as if it had
* been collected directly.
*/
@Override
public Aggregator wrap(final Aggregator in) {
return new WrappedAggregator(in) {
@Override
public InternalAggregation buildAggregation(long bucket) throws IOException {
if (selectedBuckets == null) {
throw new IllegalStateException("Collection has not been replayed yet.");
}
final long rebasedBucket = selectedBuckets.find(bucket);
if (rebasedBucket == -1) {
throw new IllegalStateException("Cannot build for a bucket which has not been collected [" + bucket + "]");
}
return in.buildAggregation(rebasedBucket);
}
};
}
private static class Entry {
final LeafReaderContext context;
final PackedLongValues docDeltas;
final PackedLongValues buckets;
Entry(LeafReaderContext context, PackedLongValues docDeltas, PackedLongValues buckets) {
this.context = context;
this.docDeltas = docDeltas;
this.buckets = buckets;
}
}
}

View File

@ -0,0 +1,218 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.search.aggregations.bucket.histogram;
import org.elasticsearch.common.ParseField;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.common.rounding.DateTimeUnit;
import org.elasticsearch.common.rounding.Rounding;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.ObjectParser;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.search.aggregations.AggregationBuilder;
import org.elasticsearch.search.aggregations.AggregatorFactories.Builder;
import org.elasticsearch.search.aggregations.AggregatorFactory;
import org.elasticsearch.search.aggregations.MultiBucketConsumerService;
import org.elasticsearch.search.aggregations.support.ValueType;
import org.elasticsearch.search.aggregations.support.ValuesSource;
import org.elasticsearch.search.aggregations.support.ValuesSource.Numeric;
import org.elasticsearch.search.aggregations.support.ValuesSourceAggregationBuilder;
import org.elasticsearch.search.aggregations.support.ValuesSourceAggregatorFactory;
import org.elasticsearch.search.aggregations.support.ValuesSourceConfig;
import org.elasticsearch.search.aggregations.support.ValuesSourceParserHelper;
import org.elasticsearch.search.aggregations.support.ValuesSourceType;
import org.elasticsearch.search.internal.SearchContext;
import java.io.IOException;
import java.util.Arrays;
import java.util.Map;
import java.util.Objects;
public class AutoDateHistogramAggregationBuilder
extends ValuesSourceAggregationBuilder<ValuesSource.Numeric, AutoDateHistogramAggregationBuilder> {
public static final String NAME = "auto_date_histogram";
public static final ParseField NUM_BUCKETS_FIELD = new ParseField("buckets");
private static final ObjectParser<AutoDateHistogramAggregationBuilder, Void> PARSER;
static {
PARSER = new ObjectParser<>(AutoDateHistogramAggregationBuilder.NAME);
ValuesSourceParserHelper.declareNumericFields(PARSER, true, true, true);
PARSER.declareInt(AutoDateHistogramAggregationBuilder::setNumBuckets, NUM_BUCKETS_FIELD);
}
public static AutoDateHistogramAggregationBuilder parse(String aggregationName, XContentParser parser) throws IOException {
return PARSER.parse(parser, new AutoDateHistogramAggregationBuilder(aggregationName), null);
}
private int numBuckets = 10;
/** Create a new builder with the given name. */
public AutoDateHistogramAggregationBuilder(String name) {
super(name, ValuesSourceType.NUMERIC, ValueType.DATE);
}
/** Read from a stream, for internal use only. */
public AutoDateHistogramAggregationBuilder(StreamInput in) throws IOException {
super(in, ValuesSourceType.NUMERIC, ValueType.DATE);
numBuckets = in.readVInt();
}
protected AutoDateHistogramAggregationBuilder(AutoDateHistogramAggregationBuilder clone, Builder factoriesBuilder,
Map<String, Object> metaData) {
super(clone, factoriesBuilder, metaData);
this.numBuckets = clone.numBuckets;
}
@Override
protected AggregationBuilder shallowCopy(Builder factoriesBuilder, Map<String, Object> metaData) {
return new AutoDateHistogramAggregationBuilder(this, factoriesBuilder, metaData);
}
@Override
protected void innerWriteTo(StreamOutput out) throws IOException {
out.writeVInt(numBuckets);
}
@Override
public String getType() {
return NAME;
}
public AutoDateHistogramAggregationBuilder setNumBuckets(int numBuckets) {
if (numBuckets <= 0) {
throw new IllegalArgumentException(NUM_BUCKETS_FIELD.getPreferredName() + " must be greater than 0 for [" + name + "]");
}
this.numBuckets = numBuckets;
return this;
}
public int getNumBuckets() {
return numBuckets;
}
@Override
protected ValuesSourceAggregatorFactory<Numeric, ?> innerBuild(SearchContext context, ValuesSourceConfig<Numeric> config,
AggregatorFactory<?> parent, Builder subFactoriesBuilder) throws IOException {
RoundingInfo[] roundings = new RoundingInfo[6];
roundings[0] = new RoundingInfo(createRounding(DateTimeUnit.SECOND_OF_MINUTE), 1000L, 1, 5, 10, 30);
roundings[1] = new RoundingInfo(createRounding(DateTimeUnit.MINUTES_OF_HOUR), 60 * 1000L, 1, 5, 10, 30);
roundings[2] = new RoundingInfo(createRounding(DateTimeUnit.HOUR_OF_DAY), 60 * 60 * 1000L, 1, 3, 12);
roundings[3] = new RoundingInfo(createRounding(DateTimeUnit.DAY_OF_MONTH), 24 * 60 * 60 * 1000L, 1, 7);
roundings[4] = new RoundingInfo(createRounding(DateTimeUnit.MONTH_OF_YEAR), 30 * 24 * 60 * 60 * 1000L, 1, 3);
roundings[5] = new RoundingInfo(createRounding(DateTimeUnit.YEAR_OF_CENTURY), 365 * 24 * 60 * 60 * 1000L, 1, 5, 10, 20, 50, 100);
int maxRoundingInterval = Arrays.stream(roundings,0, roundings.length-1)
.map(rounding -> rounding.innerIntervals)
.flatMapToInt(Arrays::stream)
.boxed()
.reduce(Integer::max).get();
Settings settings = context.getQueryShardContext().getIndexSettings().getNodeSettings();
int maxBuckets = MultiBucketConsumerService.MAX_BUCKET_SETTING.get(settings);
int bucketCeiling = maxBuckets / maxRoundingInterval;
if (numBuckets > bucketCeiling) {
throw new IllegalArgumentException(NUM_BUCKETS_FIELD.getPreferredName()+
" must be less than " + bucketCeiling);
}
return new AutoDateHistogramAggregatorFactory(name, config, numBuckets, roundings, context, parent, subFactoriesBuilder, metaData);
}
private Rounding createRounding(DateTimeUnit interval) {
Rounding.Builder tzRoundingBuilder = Rounding.builder(interval);
if (timeZone() != null) {
tzRoundingBuilder.timeZone(timeZone());
}
Rounding rounding = tzRoundingBuilder.build();
return rounding;
}
@Override
protected XContentBuilder doXContentBody(XContentBuilder builder, Params params) throws IOException {
builder.field(NUM_BUCKETS_FIELD.getPreferredName(), numBuckets);
return builder;
}
@Override
protected int innerHashCode() {
return Objects.hash(numBuckets);
}
@Override
protected boolean innerEquals(Object obj) {
AutoDateHistogramAggregationBuilder other = (AutoDateHistogramAggregationBuilder) obj;
return Objects.equals(numBuckets, other.numBuckets);
}
public static class RoundingInfo implements Writeable {
final Rounding rounding;
final int[] innerIntervals;
final long roughEstimateDurationMillis;
public RoundingInfo(Rounding rounding, long roughEstimateDurationMillis, int... innerIntervals) {
this.rounding = rounding;
this.roughEstimateDurationMillis = roughEstimateDurationMillis;
this.innerIntervals = innerIntervals;
}
public RoundingInfo(StreamInput in) throws IOException {
rounding = Rounding.Streams.read(in);
roughEstimateDurationMillis = in.readVLong();
innerIntervals = in.readIntArray();
}
@Override
public void writeTo(StreamOutput out) throws IOException {
Rounding.Streams.write(rounding, out);
out.writeVLong(roughEstimateDurationMillis);
out.writeIntArray(innerIntervals);
}
public int getMaximumInnerInterval() {
return innerIntervals[innerIntervals.length - 1];
}
public long getRoughEstimateDurationMillis() {
return roughEstimateDurationMillis;
}
@Override
public int hashCode() {
return Objects.hash(rounding, Arrays.hashCode(innerIntervals));
}
@Override
public boolean equals(Object obj) {
if (obj == null) {
return false;
}
if (obj.getClass() != getClass()) {
return false;
}
RoundingInfo other = (RoundingInfo) obj;
return Objects.equals(rounding, other.rounding) &&
Objects.deepEquals(innerIntervals, other.innerIntervals);
}
}
}

View File

@ -0,0 +1,199 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.search.aggregations.bucket.histogram;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.SortedNumericDocValues;
import org.apache.lucene.util.CollectionUtil;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.lease.Releasables;
import org.elasticsearch.common.rounding.Rounding;
import org.elasticsearch.common.util.LongHash;
import org.elasticsearch.search.DocValueFormat;
import org.elasticsearch.search.aggregations.Aggregator;
import org.elasticsearch.search.aggregations.AggregatorFactories;
import org.elasticsearch.search.aggregations.BucketOrder;
import org.elasticsearch.search.aggregations.InternalAggregation;
import org.elasticsearch.search.aggregations.LeafBucketCollector;
import org.elasticsearch.search.aggregations.LeafBucketCollectorBase;
import org.elasticsearch.search.aggregations.bucket.DeferableBucketAggregator;
import org.elasticsearch.search.aggregations.bucket.DeferringBucketCollector;
import org.elasticsearch.search.aggregations.bucket.MergingBucketsDeferringCollector;
import org.elasticsearch.search.aggregations.bucket.histogram.AutoDateHistogramAggregationBuilder.RoundingInfo;
import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator;
import org.elasticsearch.search.aggregations.support.ValuesSource;
import org.elasticsearch.search.internal.SearchContext;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.Map;
/**
* An aggregator for date values. Every date is rounded down using a configured
* {@link Rounding}.
*
* @see Rounding
*/
class AutoDateHistogramAggregator extends DeferableBucketAggregator {
private final ValuesSource.Numeric valuesSource;
private final DocValueFormat formatter;
private final RoundingInfo[] roundingInfos;
private int roundingIdx = 0;
private LongHash bucketOrds;
private int targetBuckets;
private MergingBucketsDeferringCollector deferringCollector;
AutoDateHistogramAggregator(String name, AggregatorFactories factories, int numBuckets, RoundingInfo[] roundingInfos,
@Nullable ValuesSource.Numeric valuesSource, DocValueFormat formatter, SearchContext aggregationContext, Aggregator parent,
List<PipelineAggregator> pipelineAggregators, Map<String, Object> metaData) throws IOException {
super(name, factories, aggregationContext, parent, pipelineAggregators, metaData);
this.targetBuckets = numBuckets;
this.valuesSource = valuesSource;
this.formatter = formatter;
this.roundingInfos = roundingInfos;
bucketOrds = new LongHash(1, aggregationContext.bigArrays());
}
@Override
public boolean needsScores() {
return (valuesSource != null && valuesSource.needsScores()) || super.needsScores();
}
@Override
protected boolean shouldDefer(Aggregator aggregator) {
return true;
}
@Override
public DeferringBucketCollector getDeferringCollector() {
deferringCollector = new MergingBucketsDeferringCollector(context);
return deferringCollector;
}
@Override
public LeafBucketCollector getLeafCollector(LeafReaderContext ctx,
final LeafBucketCollector sub) throws IOException {
if (valuesSource == null) {
return LeafBucketCollector.NO_OP_COLLECTOR;
}
final SortedNumericDocValues values = valuesSource.longValues(ctx);
return new LeafBucketCollectorBase(sub, values) {
@Override
public void collect(int doc, long bucket) throws IOException {
assert bucket == 0;
if (values.advanceExact(doc)) {
final int valuesCount = values.docValueCount();
long previousRounded = Long.MIN_VALUE;
for (int i = 0; i < valuesCount; ++i) {
long value = values.nextValue();
long rounded = roundingInfos[roundingIdx].rounding.round(value);
assert rounded >= previousRounded;
if (rounded == previousRounded) {
continue;
}
long bucketOrd = bucketOrds.add(rounded);
if (bucketOrd < 0) { // already seen
bucketOrd = -1 - bucketOrd;
collectExistingBucket(sub, doc, bucketOrd);
} else {
collectBucket(sub, doc, bucketOrd);
while (roundingIdx < roundingInfos.length - 1
&& bucketOrds.size() > (targetBuckets * roundingInfos[roundingIdx].getMaximumInnerInterval())) {
increaseRounding();
}
}
previousRounded = rounded;
}
}
}
private void increaseRounding() {
try (LongHash oldBucketOrds = bucketOrds) {
LongHash newBucketOrds = new LongHash(1, context.bigArrays());
long[] mergeMap = new long[(int) oldBucketOrds.size()];
Rounding newRounding = roundingInfos[++roundingIdx].rounding;
for (int i = 0; i < oldBucketOrds.size(); i++) {
long oldKey = oldBucketOrds.get(i);
long newKey = newRounding.round(oldKey);
long newBucketOrd = newBucketOrds.add(newKey);
if (newBucketOrd >= 0) {
mergeMap[i] = newBucketOrd;
} else {
mergeMap[i] = -1 - newBucketOrd;
}
}
mergeBuckets(mergeMap, newBucketOrds.size());
if (deferringCollector != null) {
deferringCollector.mergeBuckets(mergeMap);
}
bucketOrds = newBucketOrds;
}
}
};
}
@Override
public InternalAggregation buildAggregation(long owningBucketOrdinal) throws IOException {
assert owningBucketOrdinal == 0;
consumeBucketsAndMaybeBreak((int) bucketOrds.size());
long[] bucketOrdArray = new long[(int) bucketOrds.size()];
for (int i = 0; i < bucketOrds.size(); i++) {
bucketOrdArray[i] = i;
}
runDeferredCollections(bucketOrdArray);
List<InternalAutoDateHistogram.Bucket> buckets = new ArrayList<>((int) bucketOrds.size());
for (long i = 0; i < bucketOrds.size(); i++) {
buckets.add(new InternalAutoDateHistogram.Bucket(bucketOrds.get(i), bucketDocCount(i), formatter, bucketAggregations(i)));
}
// the contract of the histogram aggregation is that shards must return
// buckets ordered by key in ascending order
CollectionUtil.introSort(buckets, BucketOrder.key(true).comparator(this));
// value source will be null for unmapped fields
InternalAutoDateHistogram.BucketInfo emptyBucketInfo = new InternalAutoDateHistogram.BucketInfo(roundingInfos, roundingIdx,
buildEmptySubAggregations());
return new InternalAutoDateHistogram(name, buckets, targetBuckets, emptyBucketInfo, formatter, pipelineAggregators(), metaData());
}
@Override
public InternalAggregation buildEmptyAggregation() {
InternalAutoDateHistogram.BucketInfo emptyBucketInfo = new InternalAutoDateHistogram.BucketInfo(roundingInfos, roundingIdx,
buildEmptySubAggregations());
return new InternalAutoDateHistogram(name, Collections.emptyList(), targetBuckets, emptyBucketInfo, formatter,
pipelineAggregators(), metaData());
}
@Override
public void doClose() {
Releasables.close(bucketOrds);
}
}

View File

@ -0,0 +1,72 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.search.aggregations.bucket.histogram;
import org.elasticsearch.search.aggregations.Aggregator;
import org.elasticsearch.search.aggregations.AggregatorFactories;
import org.elasticsearch.search.aggregations.AggregatorFactory;
import org.elasticsearch.search.aggregations.bucket.histogram.AutoDateHistogramAggregationBuilder.RoundingInfo;
import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator;
import org.elasticsearch.search.aggregations.support.ValuesSource;
import org.elasticsearch.search.aggregations.support.ValuesSource.Numeric;
import org.elasticsearch.search.aggregations.support.ValuesSourceAggregatorFactory;
import org.elasticsearch.search.aggregations.support.ValuesSourceConfig;
import org.elasticsearch.search.internal.SearchContext;
import java.io.IOException;
import java.util.List;
import java.util.Map;
public final class AutoDateHistogramAggregatorFactory
extends ValuesSourceAggregatorFactory<ValuesSource.Numeric, AutoDateHistogramAggregatorFactory> {
private final int numBuckets;
private RoundingInfo[] roundingInfos;
public AutoDateHistogramAggregatorFactory(String name, ValuesSourceConfig<Numeric> config, int numBuckets, RoundingInfo[] roundingInfos,
SearchContext context, AggregatorFactory<?> parent, AggregatorFactories.Builder subFactoriesBuilder,
Map<String, Object> metaData) throws IOException {
super(name, config, context, parent, subFactoriesBuilder, metaData);
this.numBuckets = numBuckets;
this.roundingInfos = roundingInfos;
}
@Override
protected Aggregator doCreateInternal(ValuesSource.Numeric valuesSource, Aggregator parent, boolean collectsFromSingleBucket,
List<PipelineAggregator> pipelineAggregators, Map<String, Object> metaData) throws IOException {
if (collectsFromSingleBucket == false) {
return asMultiBucketAggregator(this, context, parent);
}
return createAggregator(valuesSource, parent, pipelineAggregators, metaData);
}
private Aggregator createAggregator(ValuesSource.Numeric valuesSource, Aggregator parent, List<PipelineAggregator> pipelineAggregators,
Map<String, Object> metaData) throws IOException {
return new AutoDateHistogramAggregator(name, factories, numBuckets, roundingInfos, valuesSource, config.format(), context, parent,
pipelineAggregators,
metaData);
}
@Override
protected Aggregator createUnmapped(Aggregator parent, List<PipelineAggregator> pipelineAggregators, Map<String, Object> metaData)
throws IOException {
return createAggregator(null, parent, pipelineAggregators, metaData);
}
}

View File

@ -28,13 +28,13 @@ import org.elasticsearch.common.util.LongHash;
import org.elasticsearch.search.DocValueFormat;
import org.elasticsearch.search.aggregations.Aggregator;
import org.elasticsearch.search.aggregations.AggregatorFactories;
import org.elasticsearch.search.aggregations.BucketOrder;
import org.elasticsearch.search.aggregations.InternalAggregation;
import org.elasticsearch.search.aggregations.InternalOrder;
import org.elasticsearch.search.aggregations.LeafBucketCollector;
import org.elasticsearch.search.aggregations.LeafBucketCollectorBase;
import org.elasticsearch.search.aggregations.bucket.BucketsAggregator;
import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator;
import org.elasticsearch.search.aggregations.BucketOrder;
import org.elasticsearch.search.aggregations.InternalOrder;
import org.elasticsearch.search.aggregations.support.ValuesSource;
import org.elasticsearch.search.internal.SearchContext;

View File

@ -0,0 +1,601 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.search.aggregations.bucket.histogram;
import org.apache.lucene.util.PriorityQueue;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.rounding.Rounding;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.search.DocValueFormat;
import org.elasticsearch.search.aggregations.Aggregations;
import org.elasticsearch.search.aggregations.InternalAggregation;
import org.elasticsearch.search.aggregations.InternalAggregations;
import org.elasticsearch.search.aggregations.InternalMultiBucketAggregation;
import org.elasticsearch.search.aggregations.KeyComparable;
import org.elasticsearch.search.aggregations.bucket.MultiBucketsAggregation;
import org.elasticsearch.search.aggregations.bucket.histogram.AutoDateHistogramAggregationBuilder.RoundingInfo;
import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator;
import org.joda.time.DateTime;
import org.joda.time.DateTimeZone;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.Iterator;
import java.util.List;
import java.util.ListIterator;
import java.util.Map;
import java.util.Objects;
/**
* Implementation of {@link Histogram}.
*/
public final class InternalAutoDateHistogram extends
InternalMultiBucketAggregation<InternalAutoDateHistogram, InternalAutoDateHistogram.Bucket> implements Histogram, HistogramFactory {
public static class Bucket extends InternalMultiBucketAggregation.InternalBucket implements Histogram.Bucket, KeyComparable<Bucket> {
final long key;
final long docCount;
final InternalAggregations aggregations;
protected final transient DocValueFormat format;
public Bucket(long key, long docCount, DocValueFormat format,
InternalAggregations aggregations) {
this.format = format;
this.key = key;
this.docCount = docCount;
this.aggregations = aggregations;
}
/**
* Read from a stream.
*/
public Bucket(StreamInput in, DocValueFormat format) throws IOException {
this.format = format;
key = in.readLong();
docCount = in.readVLong();
aggregations = InternalAggregations.readAggregations(in);
}
@Override
public boolean equals(Object obj) {
if (obj == null || obj.getClass() != InternalAutoDateHistogram.Bucket.class) {
return false;
}
InternalAutoDateHistogram.Bucket that = (InternalAutoDateHistogram.Bucket) obj;
// No need to take the keyed and format parameters into account,
// they are already stored and tested on the InternalDateHistogram object
return key == that.key
&& docCount == that.docCount
&& Objects.equals(aggregations, that.aggregations);
}
@Override
public int hashCode() {
return Objects.hash(getClass(), key, docCount, aggregations);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeLong(key);
out.writeVLong(docCount);
aggregations.writeTo(out);
}
@Override
public String getKeyAsString() {
return format.format(key).toString();
}
@Override
public Object getKey() {
return new DateTime(key, DateTimeZone.UTC);
}
@Override
public long getDocCount() {
return docCount;
}
@Override
public Aggregations getAggregations() {
return aggregations;
}
Bucket reduce(List<Bucket> buckets, Rounding rounding, ReduceContext context) {
List<InternalAggregations> aggregations = new ArrayList<>(buckets.size());
long docCount = 0;
for (Bucket bucket : buckets) {
docCount += bucket.docCount;
aggregations.add((InternalAggregations) bucket.getAggregations());
}
InternalAggregations aggs = InternalAggregations.reduce(aggregations, context);
return new InternalAutoDateHistogram.Bucket(rounding.round(key), docCount, format, aggs);
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
String keyAsString = format.format(key).toString();
builder.startObject();
if (format != DocValueFormat.RAW) {
builder.field(CommonFields.KEY_AS_STRING.getPreferredName(), keyAsString);
}
builder.field(CommonFields.KEY.getPreferredName(), key);
builder.field(CommonFields.DOC_COUNT.getPreferredName(), docCount);
aggregations.toXContentInternal(builder, params);
builder.endObject();
return builder;
}
@Override
public int compareKey(Bucket other) {
return Long.compare(key, other.key);
}
public DocValueFormat getFormatter() {
return format;
}
}
static class BucketInfo {
final RoundingInfo[] roundingInfos;
final int roundingIdx;
final InternalAggregations emptySubAggregations;
BucketInfo(RoundingInfo[] roundings, int roundingIdx, InternalAggregations subAggregations) {
this.roundingInfos = roundings;
this.roundingIdx = roundingIdx;
this.emptySubAggregations = subAggregations;
}
BucketInfo(StreamInput in) throws IOException {
int size = in.readVInt();
roundingInfos = new RoundingInfo[size];
for (int i = 0; i < size; i++) {
roundingInfos[i] = new RoundingInfo(in);
}
roundingIdx = in.readVInt();
emptySubAggregations = InternalAggregations.readAggregations(in);
}
void writeTo(StreamOutput out) throws IOException {
out.writeVInt(roundingInfos.length);
for (RoundingInfo roundingInfo : roundingInfos) {
roundingInfo.writeTo(out);
}
out.writeVInt(roundingIdx);
emptySubAggregations.writeTo(out);
}
@Override
public boolean equals(Object obj) {
if (obj == null || getClass() != obj.getClass()) {
return false;
}
BucketInfo that = (BucketInfo) obj;
return Objects.deepEquals(roundingInfos, that.roundingInfos)
&& Objects.equals(roundingIdx, that.roundingIdx)
&& Objects.equals(emptySubAggregations, that.emptySubAggregations);
}
@Override
public int hashCode() {
return Objects.hash(getClass(), Arrays.hashCode(roundingInfos), roundingIdx, emptySubAggregations);
}
}
private final List<Bucket> buckets;
private final DocValueFormat format;
private final BucketInfo bucketInfo;
private final int targetBuckets;
InternalAutoDateHistogram(String name, List<Bucket> buckets, int targetBuckets, BucketInfo emptyBucketInfo, DocValueFormat formatter,
List<PipelineAggregator> pipelineAggregators, Map<String, Object> metaData) {
super(name, pipelineAggregators, metaData);
this.buckets = buckets;
this.bucketInfo = emptyBucketInfo;
this.format = formatter;
this.targetBuckets = targetBuckets;
}
/**
* Stream from a stream.
*/
public InternalAutoDateHistogram(StreamInput in) throws IOException {
super(in);
bucketInfo = new BucketInfo(in);
format = in.readNamedWriteable(DocValueFormat.class);
buckets = in.readList(stream -> new Bucket(stream, format));
this.targetBuckets = in.readVInt();
}
@Override
protected void doWriteTo(StreamOutput out) throws IOException {
bucketInfo.writeTo(out);
out.writeNamedWriteable(format);
out.writeList(buckets);
out.writeVInt(targetBuckets);
}
@Override
public String getWriteableName() {
return AutoDateHistogramAggregationBuilder.NAME;
}
@Override
public List<InternalAutoDateHistogram.Bucket> getBuckets() {
return Collections.unmodifiableList(buckets);
}
DocValueFormat getFormatter() {
return format;
}
public int getTargetBuckets() {
return targetBuckets;
}
public BucketInfo getBucketInfo() {
return bucketInfo;
}
@Override
public InternalAutoDateHistogram create(List<Bucket> buckets) {
return new InternalAutoDateHistogram(name, buckets, targetBuckets, bucketInfo, format, pipelineAggregators(), metaData);
}
@Override
public Bucket createBucket(InternalAggregations aggregations, Bucket prototype) {
return new Bucket(prototype.key, prototype.docCount, prototype.format, aggregations);
}
private static class IteratorAndCurrent {
private final Iterator<Bucket> iterator;
private Bucket current;
IteratorAndCurrent(Iterator<Bucket> iterator) {
this.iterator = iterator;
current = iterator.next();
}
}
/**
* This method works almost exactly the same as
* InternalDateHistogram#reduceBuckets(List, ReduceContext), the different
* here is that we need to round all the keys we see using the highest level
* rounding returned across all the shards so the resolution of the buckets
* is the same and they can be reduced together.
*/
private BucketReduceResult reduceBuckets(List<InternalAggregation> aggregations, ReduceContext reduceContext) {
// First we need to find the highest level rounding used across all the
// shards
int reduceRoundingIdx = 0;
for (InternalAggregation aggregation : aggregations) {
int aggRoundingIdx = ((InternalAutoDateHistogram) aggregation).bucketInfo.roundingIdx;
if (aggRoundingIdx > reduceRoundingIdx) {
reduceRoundingIdx = aggRoundingIdx;
}
}
// This rounding will be used to reduce all the buckets
RoundingInfo reduceRoundingInfo = bucketInfo.roundingInfos[reduceRoundingIdx];
Rounding reduceRounding = reduceRoundingInfo.rounding;
final PriorityQueue<IteratorAndCurrent> pq = new PriorityQueue<IteratorAndCurrent>(aggregations.size()) {
@Override
protected boolean lessThan(IteratorAndCurrent a, IteratorAndCurrent b) {
return a.current.key < b.current.key;
}
};
for (InternalAggregation aggregation : aggregations) {
InternalAutoDateHistogram histogram = (InternalAutoDateHistogram) aggregation;
if (histogram.buckets.isEmpty() == false) {
pq.add(new IteratorAndCurrent(histogram.buckets.iterator()));
}
}
List<Bucket> reducedBuckets = new ArrayList<>();
if (pq.size() > 0) {
// list of buckets coming from different shards that have the same key
List<Bucket> currentBuckets = new ArrayList<>();
double key = reduceRounding.round(pq.top().current.key);
do {
final IteratorAndCurrent top = pq.top();
if (reduceRounding.round(top.current.key) != key) {
// the key changes, reduce what we already buffered and reset the buffer for current buckets
final Bucket reduced = currentBuckets.get(0).reduce(currentBuckets, reduceRounding, reduceContext);
reduceContext.consumeBucketsAndMaybeBreak(1);
reducedBuckets.add(reduced);
currentBuckets.clear();
key = reduceRounding.round(top.current.key);
}
currentBuckets.add(top.current);
if (top.iterator.hasNext()) {
final Bucket next = top.iterator.next();
assert next.key > top.current.key : "shards must return data sorted by key";
top.current = next;
pq.updateTop();
} else {
pq.pop();
}
} while (pq.size() > 0);
if (currentBuckets.isEmpty() == false) {
final Bucket reduced = currentBuckets.get(0).reduce(currentBuckets, reduceRounding, reduceContext);
reduceContext.consumeBucketsAndMaybeBreak(1);
reducedBuckets.add(reduced);
}
}
return mergeBucketsIfNeeded(reducedBuckets, reduceRoundingIdx, reduceRoundingInfo, reduceContext);
}
private BucketReduceResult mergeBucketsIfNeeded(List<Bucket> reducedBuckets, int reduceRoundingIdx, RoundingInfo reduceRoundingInfo,
ReduceContext reduceContext) {
while (reducedBuckets.size() > (targetBuckets * reduceRoundingInfo.getMaximumInnerInterval())
&& reduceRoundingIdx < bucketInfo.roundingInfos.length - 1) {
reduceRoundingIdx++;
reduceRoundingInfo = bucketInfo.roundingInfos[reduceRoundingIdx];
reducedBuckets = mergeBuckets(reducedBuckets, reduceRoundingInfo.rounding, reduceContext);
}
return new BucketReduceResult(reducedBuckets, reduceRoundingInfo, reduceRoundingIdx);
}
private List<Bucket> mergeBuckets(List<Bucket> reducedBuckets, Rounding reduceRounding, ReduceContext reduceContext) {
List<Bucket> mergedBuckets = new ArrayList<>();
List<Bucket> sameKeyedBuckets = new ArrayList<>();
double key = Double.NaN;
for (Bucket bucket : reducedBuckets) {
long roundedBucketKey = reduceRounding.round(bucket.key);
if (Double.isNaN(key)) {
key = roundedBucketKey;
reduceContext.consumeBucketsAndMaybeBreak(-countInnerBucket(bucket) - 1);
sameKeyedBuckets.add(createBucket(key, bucket.docCount, bucket.aggregations));
} else if (roundedBucketKey == key) {
reduceContext.consumeBucketsAndMaybeBreak(-countInnerBucket(bucket) - 1);
sameKeyedBuckets.add(createBucket(key, bucket.docCount, bucket.aggregations));
} else {
reduceContext.consumeBucketsAndMaybeBreak(1);
mergedBuckets.add(sameKeyedBuckets.get(0).reduce(sameKeyedBuckets, reduceRounding, reduceContext));
sameKeyedBuckets.clear();
key = roundedBucketKey;
reduceContext.consumeBucketsAndMaybeBreak(-countInnerBucket(bucket) - 1);
sameKeyedBuckets.add(createBucket(key, bucket.docCount, bucket.aggregations));
}
}
if (sameKeyedBuckets.isEmpty() == false) {
reduceContext.consumeBucketsAndMaybeBreak(1);
mergedBuckets.add(sameKeyedBuckets.get(0).reduce(sameKeyedBuckets, reduceRounding, reduceContext));
}
reducedBuckets = mergedBuckets;
return reducedBuckets;
}
private static class BucketReduceResult {
List<Bucket> buckets;
RoundingInfo roundingInfo;
int roundingIdx;
BucketReduceResult(List<Bucket> buckets, RoundingInfo roundingInfo, int roundingIdx) {
this.buckets = buckets;
this.roundingInfo = roundingInfo;
this.roundingIdx = roundingIdx;
}
}
private BucketReduceResult addEmptyBuckets(BucketReduceResult currentResult, ReduceContext reduceContext) {
List<Bucket> list = currentResult.buckets;
if (list.isEmpty()) {
return currentResult;
}
int roundingIdx = getAppropriateRounding(list.get(0).key, list.get(list.size() - 1).key, currentResult.roundingIdx,
bucketInfo.roundingInfos);
RoundingInfo roundingInfo = bucketInfo.roundingInfos[roundingIdx];
Rounding rounding = roundingInfo.rounding;
// merge buckets using the new rounding
list = mergeBuckets(list, rounding, reduceContext);
Bucket lastBucket = null;
ListIterator<Bucket> iter = list.listIterator();
InternalAggregations reducedEmptySubAggs = InternalAggregations.reduce(Collections.singletonList(bucketInfo.emptySubAggregations),
reduceContext);
// Add the empty buckets within the data,
// e.g. if the data series is [1,2,3,7] there're 3 empty buckets that will be created for 4,5,6
while (iter.hasNext()) {
Bucket nextBucket = list.get(iter.nextIndex());
if (lastBucket != null) {
long key = rounding.nextRoundingValue(lastBucket.key);
while (key < nextBucket.key) {
reduceContext.consumeBucketsAndMaybeBreak(1);
iter.add(new InternalAutoDateHistogram.Bucket(key, 0, format, reducedEmptySubAggs));
key = rounding.nextRoundingValue(key);
}
assert key == nextBucket.key : "key: " + key + ", nextBucket.key: " + nextBucket.key;
}
lastBucket = iter.next();
}
return new BucketReduceResult(list, roundingInfo, roundingIdx);
}
private int getAppropriateRounding(long minKey, long maxKey, int roundingIdx, RoundingInfo[] roundings) {
if (roundingIdx == roundings.length - 1) {
return roundingIdx;
}
int currentRoundingIdx = roundingIdx;
// Getting the accurate number of required buckets can be slow for large
// ranges at low roundings so get a rough estimate of the rounding first
// so we are at most 1 away from the correct rounding and then get the
// accurate rounding value
for (int i = currentRoundingIdx + 1; i < roundings.length; i++) {
long dataDuration = maxKey - minKey;
long roughEstimateRequiredBuckets = dataDuration / roundings[i].getRoughEstimateDurationMillis();
if (roughEstimateRequiredBuckets < targetBuckets * roundings[i].getMaximumInnerInterval()) {
currentRoundingIdx = i - 1;
break;
} else if (i == roundingIdx - 1) {
currentRoundingIdx = i;
break;
}
}
int requiredBuckets = 0;
do {
Rounding currentRounding = roundings[currentRoundingIdx].rounding;
long currentKey = minKey;
requiredBuckets = 0;
while (currentKey < maxKey) {
requiredBuckets++;
currentKey = currentRounding.nextRoundingValue(currentKey);
}
currentRoundingIdx++;
} while (requiredBuckets > (targetBuckets * roundings[roundingIdx].getMaximumInnerInterval())
&& currentRoundingIdx < roundings.length);
// The loop will increase past the correct rounding index here so we
// need to subtract one to get the rounding index we need
return currentRoundingIdx - 1;
}
@Override
public InternalAggregation doReduce(List<InternalAggregation> aggregations, ReduceContext reduceContext) {
BucketReduceResult reducedBucketsResult = reduceBuckets(aggregations, reduceContext);
if (reduceContext.isFinalReduce()) {
// adding empty buckets if needed
reducedBucketsResult = addEmptyBuckets(reducedBucketsResult, reduceContext);
// Adding empty buckets may have tipped us over the target so merge the buckets again if needed
reducedBucketsResult = mergeBucketsIfNeeded(reducedBucketsResult.buckets, reducedBucketsResult.roundingIdx,
reducedBucketsResult.roundingInfo, reduceContext);
// Now finally see if we need to merge consecutive buckets together to make a coarser interval at the same rounding
reducedBucketsResult = maybeMergeConsecutiveBuckets(reducedBucketsResult, reduceContext);
}
BucketInfo bucketInfo = new BucketInfo(this.bucketInfo.roundingInfos, reducedBucketsResult.roundingIdx,
this.bucketInfo.emptySubAggregations);
return new InternalAutoDateHistogram(getName(), reducedBucketsResult.buckets, targetBuckets, bucketInfo, format,
pipelineAggregators(), getMetaData());
}
private BucketReduceResult maybeMergeConsecutiveBuckets(BucketReduceResult reducedBucketsResult, ReduceContext reduceContext) {
List<Bucket> buckets = reducedBucketsResult.buckets;
RoundingInfo roundingInfo = reducedBucketsResult.roundingInfo;
int roundingIdx = reducedBucketsResult.roundingIdx;
if (buckets.size() > targetBuckets) {
for (int interval : roundingInfo.innerIntervals) {
int resultingBuckets = buckets.size() / interval;
if (resultingBuckets <= targetBuckets) {
return mergeConsecutiveBuckets(buckets, interval, roundingIdx, roundingInfo, reduceContext);
}
}
}
return reducedBucketsResult;
}
private BucketReduceResult mergeConsecutiveBuckets(List<Bucket> reducedBuckets, int mergeInterval, int roundingIdx,
RoundingInfo roundingInfo, ReduceContext reduceContext) {
List<Bucket> mergedBuckets = new ArrayList<>();
List<Bucket> sameKeyedBuckets = new ArrayList<>();
double key = roundingInfo.rounding.round(reducedBuckets.get(0).key);
for (int i = 0; i < reducedBuckets.size(); i++) {
Bucket bucket = reducedBuckets.get(i);
if (i % mergeInterval == 0 && sameKeyedBuckets.isEmpty() == false) {
reduceContext.consumeBucketsAndMaybeBreak(1);
mergedBuckets.add(sameKeyedBuckets.get(0).reduce(sameKeyedBuckets, roundingInfo.rounding, reduceContext));
sameKeyedBuckets.clear();
key = roundingInfo.rounding.round(bucket.key);
}
reduceContext.consumeBucketsAndMaybeBreak(-countInnerBucket(bucket) - 1);
sameKeyedBuckets.add(createBucket(key, bucket.docCount, bucket.aggregations));
}
if (sameKeyedBuckets.isEmpty() == false) {
reduceContext.consumeBucketsAndMaybeBreak(1);
mergedBuckets.add(sameKeyedBuckets.get(0).reduce(sameKeyedBuckets, roundingInfo.rounding, reduceContext));
}
return new BucketReduceResult(mergedBuckets, roundingInfo, roundingIdx);
}
@Override
public XContentBuilder doXContentBody(XContentBuilder builder, Params params) throws IOException {
builder.startArray(CommonFields.BUCKETS.getPreferredName());
for (Bucket bucket : buckets) {
bucket.toXContent(builder, params);
}
builder.endArray();
return builder;
}
// HistogramFactory method impls
@Override
public Number getKey(MultiBucketsAggregation.Bucket bucket) {
return ((Bucket) bucket).key;
}
@Override
public Number nextKey(Number key) {
return bucketInfo.roundingInfos[bucketInfo.roundingIdx].rounding.nextRoundingValue(key.longValue());
}
@Override
public InternalAggregation createAggregation(List<MultiBucketsAggregation.Bucket> buckets) {
// convert buckets to the right type
List<Bucket> buckets2 = new ArrayList<>(buckets.size());
for (Object b : buckets) {
buckets2.add((Bucket) b);
}
buckets2 = Collections.unmodifiableList(buckets2);
return new InternalAutoDateHistogram(name, buckets2, targetBuckets, bucketInfo, format, pipelineAggregators(), getMetaData());
}
@Override
public Bucket createBucket(Number key, long docCount, InternalAggregations aggregations) {
return new Bucket(key.longValue(), docCount, format, aggregations);
}
@Override
protected boolean doEquals(Object obj) {
InternalAutoDateHistogram that = (InternalAutoDateHistogram) obj;
return Objects.equals(buckets, that.buckets)
&& Objects.equals(format, that.format)
&& Objects.equals(bucketInfo, that.bucketInfo);
}
@Override
protected int doHashCode() {
return Objects.hash(buckets, format, bucketInfo);
}
}

View File

@ -424,7 +424,7 @@ public final class InternalDateHistogram extends InternalMultiBucketAggregation<
iter.add(new InternalDateHistogram.Bucket(key, 0, keyed, format, reducedEmptySubAggs));
key = nextKey(key).longValue();
}
assert key == nextBucket.key;
assert key == nextBucket.key : "key: " + key + ", nextBucket.key: " + nextBucket.key;
}
lastBucket = iter.next();
}

View File

@ -0,0 +1,91 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.search.aggregations.bucket.histogram;
import org.elasticsearch.common.xcontent.ObjectParser;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.search.aggregations.ParsedMultiBucketAggregation;
import org.joda.time.DateTime;
import org.joda.time.DateTimeZone;
import java.io.IOException;
import java.util.List;
public class ParsedAutoDateHistogram extends ParsedMultiBucketAggregation<ParsedAutoDateHistogram.ParsedBucket> implements Histogram {
@Override
public String getType() {
return AutoDateHistogramAggregationBuilder.NAME;
}
@Override
public List<? extends Histogram.Bucket> getBuckets() {
return buckets;
}
private static ObjectParser<ParsedAutoDateHistogram, Void> PARSER =
new ObjectParser<>(ParsedAutoDateHistogram.class.getSimpleName(), true, ParsedAutoDateHistogram::new);
static {
declareMultiBucketAggregationFields(PARSER,
parser -> ParsedBucket.fromXContent(parser, false),
parser -> ParsedBucket.fromXContent(parser, true));
}
public static ParsedAutoDateHistogram fromXContent(XContentParser parser, String name) throws IOException {
ParsedAutoDateHistogram aggregation = PARSER.parse(parser, null);
aggregation.setName(name);
return aggregation;
}
public static class ParsedBucket extends ParsedMultiBucketAggregation.ParsedBucket implements Histogram.Bucket {
private Long key;
@Override
public Object getKey() {
if (key != null) {
return new DateTime(key, DateTimeZone.UTC);
}
return null;
}
@Override
public String getKeyAsString() {
String keyAsString = super.getKeyAsString();
if (keyAsString != null) {
return keyAsString;
}
if (key != null) {
return Long.toString(key);
}
return null;
}
@Override
protected XContentBuilder keyToXContent(XContentBuilder builder) throws IOException {
return builder.field(CommonFields.KEY.getPreferredName(), key);
}
static ParsedBucket fromXContent(XContentParser parser, boolean keyed) throws IOException {
return parseXContent(parser, keyed, ParsedBucket::new, (p, bucket) -> bucket.key = p.longValue());
}
}
}

View File

@ -140,22 +140,6 @@ public final class SnapshotInfo implements Comparable<SnapshotInfo>, ToXContent,
this.shardFailures = shardFailures;
}
private void ignoreVersion(String version) {
// ignore extra field
}
private void ignoreStartTime(String startTime) {
// ignore extra field
}
private void ignoreEndTime(String endTime) {
// ignore extra field
}
private void ignoreDurationInMillis(long durationInMillis) {
// ignore extra field
}
public SnapshotInfo build() {
SnapshotId snapshotId = new SnapshotId(snapshotName, snapshotUUID);
@ -197,10 +181,6 @@ public final class SnapshotInfo implements Comparable<SnapshotInfo>, ToXContent,
int getSuccessfulShards() {
return successfulShards;
}
private void ignoreFailedShards(int failedShards) {
// ignore extra field
}
}
public static final ObjectParser<SnapshotInfoBuilder, Void> SNAPSHOT_INFO_PARSER =
@ -222,14 +202,9 @@ public final class SnapshotInfo implements Comparable<SnapshotInfo>, ToXContent,
SNAPSHOT_INFO_PARSER.declareInt(SnapshotInfoBuilder::setVersion, new ParseField(VERSION_ID));
SNAPSHOT_INFO_PARSER.declareObjectArray(SnapshotInfoBuilder::setShardFailures, SnapshotShardFailure.SNAPSHOT_SHARD_FAILURE_PARSER,
new ParseField(FAILURES));
SNAPSHOT_INFO_PARSER.declareString(SnapshotInfoBuilder::ignoreVersion, new ParseField(VERSION));
SNAPSHOT_INFO_PARSER.declareString(SnapshotInfoBuilder::ignoreStartTime, new ParseField(START_TIME));
SNAPSHOT_INFO_PARSER.declareString(SnapshotInfoBuilder::ignoreEndTime, new ParseField(END_TIME));
SNAPSHOT_INFO_PARSER.declareLong(SnapshotInfoBuilder::ignoreDurationInMillis, new ParseField(DURATION_IN_MILLIS));
SHARD_STATS_PARSER.declareInt(ShardStatsBuilder::setTotalShards, new ParseField(TOTAL));
SHARD_STATS_PARSER.declareInt(ShardStatsBuilder::setSuccessfulShards, new ParseField(SUCCESSFUL));
SHARD_STATS_PARSER.declareInt(ShardStatsBuilder::ignoreFailedShards, new ParseField(FAILED));
}
private final SnapshotId snapshotId;

View File

@ -102,8 +102,8 @@ public class CreateSnapshotRequestTests extends ESTestCase {
NamedXContentRegistry.EMPTY, null, BytesReference.bytes(builder).streamInput());
Map<String, Object> map = parser.mapOrdered();
CreateSnapshotRequest processed = new CreateSnapshotRequest((String)map.get("repository"), (String)map.get("snapshot"));
processed.waitForCompletion((boolean)map.getOrDefault("wait_for_completion", false));
processed.masterNodeTimeout((String)map.getOrDefault("master_node_timeout", "30s"));
processed.waitForCompletion(original.waitForCompletion());
processed.masterNodeTimeout(original.masterNodeTimeout());
processed.source(map);
assertEquals(original, processed);

View File

@ -40,7 +40,7 @@ public class CreateSnapshotResponseTests extends AbstractXContentTestCase<Create
@Override
protected boolean supportsUnknownFields() {
return false;
return true;
}
@Override
@ -63,9 +63,7 @@ public class CreateSnapshotResponseTests extends AbstractXContentTestCase<Create
boolean globalState = randomBoolean();
CreateSnapshotResponse response = new CreateSnapshotResponse();
response.setSnapshotInfo(
return new CreateSnapshotResponse(
new SnapshotInfo(snapshotId, indices, startTime, reason, endTime, totalShards, shardFailures, globalState));
return response;
}
}

View File

@ -320,8 +320,5 @@ public class IndicesOptionsTests extends ESTestCase {
}
assertEquals(map.get("ignore_unavailable"), options.contains(Option.IGNORE_UNAVAILABLE));
assertEquals(map.get("allow_no_indices"), options.contains(Option.ALLOW_NO_INDICES));
assertEquals(map.get("forbid_aliases_to_multiple_indices"), options.contains(Option.FORBID_ALIASES_TO_MULTIPLE_INDICES));
assertEquals(map.get("forbid_closed_indices"), options.contains(Option.FORBID_CLOSED_INDICES));
assertEquals(map.get("ignore_aliases"), options.contains(Option.IGNORE_ALIASES));
}
}

View File

@ -20,6 +20,7 @@
package org.elasticsearch.common.settings;
import org.elasticsearch.common.settings.Setting.Property;
import org.elasticsearch.common.unit.ByteSizeUnit;
import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.common.util.PageCacheRecycler;
import org.elasticsearch.indices.IndexingMemoryController;
@ -57,8 +58,15 @@ public class MemorySizeSettingsTests extends ESTestCase {
}
public void testCircuitBreakerSettings() {
// default is chosen based on actual heap size
double defaultTotalPercentage;
if (JvmInfo.jvmInfo().getMem().getHeapMax().getBytes() < new ByteSizeValue(1, ByteSizeUnit.GB).getBytes()) {
defaultTotalPercentage = 0.95d;
} else {
defaultTotalPercentage = 0.7d;
}
assertMemorySizeSetting(HierarchyCircuitBreakerService.TOTAL_CIRCUIT_BREAKER_LIMIT_SETTING, "indices.breaker.total.limit",
new ByteSizeValue((long) (JvmInfo.jvmInfo().getMem().getHeapMax().getBytes() * 0.7)));
new ByteSizeValue((long) (JvmInfo.jvmInfo().getMem().getHeapMax().getBytes() * defaultTotalPercentage)));
assertMemorySizeSetting(HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_LIMIT_SETTING, "indices.breaker.fielddata.limit",
new ByteSizeValue((long) (JvmInfo.jvmInfo().getMem().getHeapMax().getBytes() * 0.6)));
assertMemorySizeSetting(HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_LIMIT_SETTING, "indices.breaker.request.limit",

View File

@ -32,6 +32,7 @@ import org.elasticsearch.test.ESTestCase;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicLong;
import java.util.concurrent.atomic.AtomicReference;
import static org.hamcrest.Matchers.containsString;
@ -56,7 +57,7 @@ public class HierarchyCircuitBreakerServiceTests extends ESTestCase {
}
@Override
public void checkParentLimit(String label) throws CircuitBreakingException {
public void checkParentLimit(long newBytesReserved, String label) throws CircuitBreakingException {
// never trip
}
};
@ -114,7 +115,7 @@ public class HierarchyCircuitBreakerServiceTests extends ESTestCase {
}
@Override
public void checkParentLimit(String label) throws CircuitBreakingException {
public void checkParentLimit(long newBytesReserved, String label) throws CircuitBreakingException {
// Parent will trip right before regular breaker would trip
if (getBreaker(CircuitBreaker.REQUEST).getUsed() > parentLimit) {
parentTripped.incrementAndGet();
@ -170,6 +171,7 @@ public class HierarchyCircuitBreakerServiceTests extends ESTestCase {
*/
public void testBorrowingSiblingBreakerMemory() throws Exception {
Settings clusterSettings = Settings.builder()
.put(HierarchyCircuitBreakerService.USE_REAL_MEMORY_USAGE_SETTING.getKey(), false)
.put(HierarchyCircuitBreakerService.TOTAL_CIRCUIT_BREAKER_LIMIT_SETTING.getKey(), "200mb")
.put(HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_LIMIT_SETTING.getKey(), "150mb")
.put(HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_LIMIT_SETTING.getKey(), "150mb")
@ -199,4 +201,50 @@ public class HierarchyCircuitBreakerServiceTests extends ESTestCase {
assertThat(exception.getMessage(), containsString("which is larger than the limit of [209715200/200mb]"));
}
}
public void testParentBreaksOnRealMemoryUsage() throws Exception {
Settings clusterSettings = Settings.builder()
.put(HierarchyCircuitBreakerService.USE_REAL_MEMORY_USAGE_SETTING.getKey(), Boolean.TRUE)
.put(HierarchyCircuitBreakerService.TOTAL_CIRCUIT_BREAKER_LIMIT_SETTING.getKey(), "200b")
.put(HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_LIMIT_SETTING.getKey(), "300b")
.put(HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_OVERHEAD_SETTING.getKey(), 2)
.build();
AtomicLong memoryUsage = new AtomicLong();
final CircuitBreakerService service = new HierarchyCircuitBreakerService(clusterSettings,
new ClusterSettings(clusterSettings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)) {
@Override
long currentMemoryUsage() {
return memoryUsage.get();
}
};
final CircuitBreaker requestBreaker = service.getBreaker(CircuitBreaker.REQUEST);
// anything below 100 bytes should work (overhead) - current memory usage is zero
requestBreaker.addEstimateBytesAndMaybeBreak(randomLongBetween(0, 99), "request");
assertEquals(0, requestBreaker.getTrippedCount());
// assume memory usage has increased to 150 bytes
memoryUsage.set(150);
// a reservation that bumps memory usage to less than 200 (150 bytes used + reservation < 200)
requestBreaker.addEstimateBytesAndMaybeBreak(randomLongBetween(0, 24), "request");
assertEquals(0, requestBreaker.getTrippedCount());
memoryUsage.set(181);
long reservationInBytes = randomLongBetween(10, 50);
// anything >= 20 bytes (10 bytes * 2 overhead) reservation breaks the parent but it must be low enough to avoid
// breaking the child breaker.
CircuitBreakingException exception = expectThrows(CircuitBreakingException.class, () -> requestBreaker
.addEstimateBytesAndMaybeBreak(reservationInBytes, "request"));
// it was the parent that rejected the reservation
assertThat(exception.getMessage(), containsString("[parent] Data too large, data for [request] would be"));
assertThat(exception.getMessage(), containsString("which is larger than the limit of [200/200b]"));
assertEquals(0, requestBreaker.getTrippedCount());
assertEquals(1, service.stats().getStats(CircuitBreaker.PARENT).getTrippedCount());
// lower memory usage again - the same reservation should succeed
memoryUsage.set(100);
requestBreaker.addEstimateBytesAndMaybeBreak(reservationInBytes, "request");
assertEquals(0, requestBreaker.getTrippedCount());
}
}

View File

@ -406,7 +406,7 @@ public class IndicesClusterStateServiceRandomUpdatesTests extends AbstractIndice
Collections.emptySet());
final ClusterService clusterService = mock(ClusterService.class);
final RepositoriesService repositoriesService = new RepositoriesService(settings, clusterService,
transportService, null);
transportService, null, threadPool);
final PeerRecoveryTargetService recoveryTargetService = new PeerRecoveryTargetService(settings, threadPool,
transportService, null, clusterService);
final ShardStateAction shardStateAction = mock(ShardStateAction.class);

View File

@ -173,10 +173,17 @@ public class BlobStoreRepositoryRestoreTests extends IndexShardTestCase {
}
/** Create a {@link Repository} with a random name **/
private Repository createRepository() throws IOException {
private Repository createRepository() {
Settings settings = Settings.builder().put("location", randomAlphaOfLength(10)).build();
RepositoryMetaData repositoryMetaData = new RepositoryMetaData(randomAlphaOfLength(10), FsRepository.TYPE, settings);
return new FsRepository(repositoryMetaData, createEnvironment(), xContentRegistry());
final FsRepository repository = new FsRepository(repositoryMetaData, createEnvironment(), xContentRegistry()) {
@Override
protected void assertSnapshotOrGenericThread() {
// eliminate thread name check as we create repo manually
}
};
repository.start();
return repository;
}
/** Create a {@link Environment} with random path.home and path.repo **/

View File

@ -24,10 +24,16 @@ import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotRes
import org.elasticsearch.client.Client;
import org.elasticsearch.common.UUIDs;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.NamedXContentRegistry;
import org.elasticsearch.env.Environment;
import org.elasticsearch.plugins.Plugin;
import org.elasticsearch.plugins.RepositoryPlugin;
import org.elasticsearch.repositories.IndexId;
import org.elasticsearch.repositories.RepositoriesService;
import org.elasticsearch.repositories.Repository;
import org.elasticsearch.repositories.RepositoryData;
import org.elasticsearch.repositories.RepositoryException;
import org.elasticsearch.repositories.fs.FsRepository;
import org.elasticsearch.snapshots.SnapshotId;
import org.elasticsearch.snapshots.SnapshotState;
import org.elasticsearch.test.ESIntegTestCase;
@ -37,18 +43,42 @@ import java.io.IOException;
import java.nio.file.Path;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.stream.Collectors;
import static org.elasticsearch.repositories.RepositoryDataTests.generateRandomRepoData;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.nullValue;
/**
* Tests for the {@link BlobStoreRepository} and its subclasses.
*/
public class BlobStoreRepositoryTests extends ESSingleNodeTestCase {
static final String REPO_TYPE = "fsLike";
protected Collection<Class<? extends Plugin>> getPlugins() {
return Arrays.asList(FsLikeRepoPlugin.class);
}
// the reason for this plug-in is to drop any assertSnapshotOrGenericThread as mostly all access in this test goes from test threads
public static class FsLikeRepoPlugin extends org.elasticsearch.plugins.Plugin implements RepositoryPlugin {
@Override
public Map<String, Repository.Factory> getRepositories(Environment env, NamedXContentRegistry namedXContentRegistry) {
return Collections.singletonMap(REPO_TYPE,
(metadata) -> new FsRepository(metadata, env, namedXContentRegistry) {
@Override
protected void assertSnapshotOrGenericThread() {
// eliminate thread name check as we access blobStore on test/main threads
}
});
}
}
public void testRetrieveSnapshots() throws Exception {
final Client client = client();
final Path location = ESIntegTestCase.randomRepoPath(node().settings());
@ -57,7 +87,7 @@ public class BlobStoreRepositoryTests extends ESSingleNodeTestCase {
logger.info("--> creating repository");
PutRepositoryResponse putRepositoryResponse =
client.admin().cluster().preparePutRepository(repositoryName)
.setType("fs")
.setType(REPO_TYPE)
.setSettings(Settings.builder().put(node().settings()).put("location", location))
.get();
assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true));
@ -209,7 +239,7 @@ public class BlobStoreRepositoryTests extends ESSingleNodeTestCase {
PutRepositoryResponse putRepositoryResponse =
client.admin().cluster().preparePutRepository(repositoryName)
.setType("fs")
.setType(REPO_TYPE)
.setSettings(Settings.builder().put(node().settings()).put("location", location))
.get();
assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true));
@ -217,6 +247,7 @@ public class BlobStoreRepositoryTests extends ESSingleNodeTestCase {
final RepositoriesService repositoriesService = getInstanceFromNode(RepositoriesService.class);
@SuppressWarnings("unchecked") final BlobStoreRepository repository =
(BlobStoreRepository) repositoriesService.repository(repositoryName);
assertThat("getBlobContainer has to be lazy initialized", repository.getBlobContainer(), nullValue());
return repository;
}

View File

@ -16,22 +16,29 @@
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.snapshots;
package org.elasticsearch.repositories.fs;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.ByteSizeUnit;
import org.elasticsearch.repositories.Repository;
import org.elasticsearch.repositories.blobstore.ESBlobStoreRepositoryIntegTestCase;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
import static org.hamcrest.Matchers.instanceOf;
public class FsBlobStoreRepositoryIT extends ESBlobStoreRepositoryIntegTestCase {
@Override
protected void createTestRepository(String name) {
protected void createTestRepository(String name, boolean verify) {
assertAcked(client().admin().cluster().preparePutRepository(name)
.setVerify(verify)
.setType("fs").setSettings(Settings.builder()
.put("location", randomRepoPath())
.put("compress", randomBoolean())
.put("chunk_size", randomIntBetween(100, 1000), ByteSizeUnit.BYTES)));
}
@Override
protected void afterCreationCheck(Repository repository) {
assertThat(repository, instanceOf(FsRepository.class));
}
}

View File

@ -37,6 +37,7 @@ import org.elasticsearch.search.aggregations.bucket.filter.InternalFilterTests;
import org.elasticsearch.search.aggregations.bucket.filter.InternalFiltersTests;
import org.elasticsearch.search.aggregations.bucket.geogrid.InternalGeoHashGridTests;
import org.elasticsearch.search.aggregations.bucket.global.InternalGlobalTests;
import org.elasticsearch.search.aggregations.bucket.histogram.InternalAutoDateHistogramTests;
import org.elasticsearch.search.aggregations.bucket.histogram.InternalDateHistogramTests;
import org.elasticsearch.search.aggregations.bucket.histogram.InternalHistogramTests;
import org.elasticsearch.search.aggregations.bucket.missing.InternalMissingTests;
@ -125,6 +126,7 @@ public class AggregationsTests extends ESTestCase {
aggsTests.add(new InternalGeoCentroidTests());
aggsTests.add(new InternalHistogramTests());
aggsTests.add(new InternalDateHistogramTests());
aggsTests.add(new InternalAutoDateHistogramTests());
aggsTests.add(new LongTermsTests());
aggsTests.add(new DoubleTermsTests());
aggsTests.add(new StringTermsTests());

View File

@ -0,0 +1,44 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.search.aggregations.bucket;
import org.elasticsearch.search.aggregations.BaseAggregationTestCase;
import org.elasticsearch.search.aggregations.bucket.histogram.AutoDateHistogramAggregationBuilder;
public class AutoDateHistogramTests extends BaseAggregationTestCase<AutoDateHistogramAggregationBuilder> {
@Override
protected AutoDateHistogramAggregationBuilder createTestAggregatorBuilder() {
AutoDateHistogramAggregationBuilder builder = new AutoDateHistogramAggregationBuilder(randomAlphaOfLengthBetween(1, 10));
builder.field(INT_FIELD_NAME);
builder.setNumBuckets(randomIntBetween(1, 100000));
if (randomBoolean()) {
builder.format("###.##");
}
if (randomBoolean()) {
builder.missing(randomIntBetween(0, 10));
}
if (randomBoolean()) {
builder.timeZone(randomDateTimeZone());
}
return builder;
}
}

View File

@ -0,0 +1,154 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.search.aggregations.bucket.histogram;
import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.common.rounding.DateTimeUnit;
import org.elasticsearch.common.rounding.Rounding;
import org.elasticsearch.search.DocValueFormat;
import org.elasticsearch.search.aggregations.InternalAggregations;
import org.elasticsearch.search.aggregations.ParsedMultiBucketAggregation;
import org.elasticsearch.search.aggregations.bucket.histogram.AutoDateHistogramAggregationBuilder.RoundingInfo;
import org.elasticsearch.search.aggregations.bucket.histogram.InternalAutoDateHistogram.BucketInfo;
import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator;
import org.elasticsearch.test.InternalMultiBucketAggregationTestCase;
import org.joda.time.DateTime;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.TreeMap;
import static org.elasticsearch.common.unit.TimeValue.timeValueHours;
import static org.elasticsearch.common.unit.TimeValue.timeValueMinutes;
import static org.elasticsearch.common.unit.TimeValue.timeValueSeconds;
public class InternalAutoDateHistogramTests extends InternalMultiBucketAggregationTestCase<InternalAutoDateHistogram> {
private DocValueFormat format;
private RoundingInfo[] roundingInfos;
@Override
public void setUp() throws Exception {
super.setUp();
format = randomNumericDocValueFormat();
roundingInfos = new RoundingInfo[6];
roundingInfos[0] = new RoundingInfo(Rounding.builder(DateTimeUnit.SECOND_OF_MINUTE).build(), 1, 5, 10, 30);
roundingInfos[1] = new RoundingInfo(Rounding.builder(DateTimeUnit.MINUTES_OF_HOUR).build(), 1, 5, 10, 30);
roundingInfos[2] = new RoundingInfo(Rounding.builder(DateTimeUnit.HOUR_OF_DAY).build(), 1, 3, 12);
roundingInfos[3] = new RoundingInfo(Rounding.builder(DateTimeUnit.DAY_OF_MONTH).build(), 1, 7);
roundingInfos[4] = new RoundingInfo(Rounding.builder(DateTimeUnit.MONTH_OF_YEAR).build(), 1, 3);
roundingInfos[5] = new RoundingInfo(Rounding.builder(DateTimeUnit.YEAR_OF_CENTURY).build(), 1, 10, 20, 50, 100);
}
@Override
protected InternalAutoDateHistogram createTestInstance(String name,
List<PipelineAggregator> pipelineAggregators,
Map<String, Object> metaData,
InternalAggregations aggregations) {
int nbBuckets = randomNumberOfBuckets();
int targetBuckets = randomIntBetween(1, nbBuckets * 2 + 1);
List<InternalAutoDateHistogram.Bucket> buckets = new ArrayList<>(nbBuckets);
long startingDate = System.currentTimeMillis();
long interval = randomIntBetween(1, 3);
long intervalMillis = randomFrom(timeValueSeconds(interval), timeValueMinutes(interval), timeValueHours(interval)).getMillis();
for (int i = 0; i < nbBuckets; i++) {
long key = startingDate + (intervalMillis * i);
buckets.add(i, new InternalAutoDateHistogram.Bucket(key, randomIntBetween(1, 100), format, aggregations));
}
InternalAggregations subAggregations = new InternalAggregations(Collections.emptyList());
BucketInfo bucketInfo = new BucketInfo(roundingInfos, randomIntBetween(0, roundingInfos.length - 1), subAggregations);
return new InternalAutoDateHistogram(name, buckets, targetBuckets, bucketInfo, format, pipelineAggregators, metaData);
}
@Override
protected void assertReduced(InternalAutoDateHistogram reduced, List<InternalAutoDateHistogram> inputs) {
int roundingIdx = 0;
for (InternalAutoDateHistogram histogram : inputs) {
if (histogram.getBucketInfo().roundingIdx > roundingIdx) {
roundingIdx = histogram.getBucketInfo().roundingIdx;
}
}
Map<Long, Long> expectedCounts = new TreeMap<>();
for (Histogram histogram : inputs) {
for (Histogram.Bucket bucket : histogram.getBuckets()) {
expectedCounts.compute(roundingInfos[roundingIdx].rounding.round(((DateTime) bucket.getKey()).getMillis()),
(key, oldValue) -> (oldValue == null ? 0 : oldValue) + bucket.getDocCount());
}
}
Map<Long, Long> actualCounts = new TreeMap<>();
for (Histogram.Bucket bucket : reduced.getBuckets()) {
actualCounts.compute(((DateTime) bucket.getKey()).getMillis(),
(key, oldValue) -> (oldValue == null ? 0 : oldValue) + bucket.getDocCount());
}
assertEquals(expectedCounts, actualCounts);
}
@Override
protected Writeable.Reader<InternalAutoDateHistogram> instanceReader() {
return InternalAutoDateHistogram::new;
}
@Override
protected Class<? extends ParsedMultiBucketAggregation> implementationClass() {
return ParsedAutoDateHistogram.class;
}
@Override
protected InternalAutoDateHistogram mutateInstance(InternalAutoDateHistogram instance) {
String name = instance.getName();
List<InternalAutoDateHistogram.Bucket> buckets = instance.getBuckets();
int targetBuckets = instance.getTargetBuckets();
BucketInfo bucketInfo = instance.getBucketInfo();
List<PipelineAggregator> pipelineAggregators = instance.pipelineAggregators();
Map<String, Object> metaData = instance.getMetaData();
switch (between(0, 3)) {
case 0:
name += randomAlphaOfLength(5);
break;
case 1:
buckets = new ArrayList<>(buckets);
buckets.add(new InternalAutoDateHistogram.Bucket(randomNonNegativeLong(), randomIntBetween(1, 100), format,
InternalAggregations.EMPTY));
break;
case 2:
int roundingIdx = bucketInfo.roundingIdx == bucketInfo.roundingInfos.length - 1 ? 0 : bucketInfo.roundingIdx + 1;
bucketInfo = new BucketInfo(bucketInfo.roundingInfos, roundingIdx, bucketInfo.emptySubAggregations);
break;
case 3:
if (metaData == null) {
metaData = new HashMap<>(1);
} else {
metaData = new HashMap<>(instance.getMetaData());
}
metaData.put(randomAlphaOfLength(15), randomInt());
break;
default:
throw new AssertionError("Illegal randomisation branch");
}
return new InternalAutoDateHistogram(name, buckets, targetBuckets, bucketInfo, format, pipelineAggregators, metaData);
}
}

View File

@ -19,6 +19,7 @@
package org.elasticsearch.snapshots;
import org.apache.lucene.util.SetOnce;
import org.elasticsearch.ExceptionsHelper;
import org.elasticsearch.Version;
import org.elasticsearch.action.ActionFuture;
@ -93,6 +94,7 @@ import org.elasticsearch.script.MockScriptEngine;
import org.elasticsearch.script.StoredScriptsIT;
import org.elasticsearch.snapshots.mockstore.MockRepository;
import org.elasticsearch.test.junit.annotations.TestLogging;
import org.elasticsearch.threadpool.ThreadPool;
import java.io.IOException;
import java.nio.channels.SeekableByteChannel;
@ -1263,7 +1265,7 @@ public class SharedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTestCas
RepositoriesService service = internalCluster().getInstance(RepositoriesService.class, internalCluster().getMasterName());
Repository repository = service.repository("test-repo");
final Map<String, IndexId> indexIds = repository.getRepositoryData().getIndices();
final Map<String, IndexId> indexIds = getRepositoryData(repository).getIndices();
final Path indicesPath = repo.resolve("indices");
logger.info("--> delete index metadata and shard metadata");
@ -1740,6 +1742,7 @@ public class SharedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTestCas
logger.info("--> trying to create a repository with different name");
assertAcked(client.admin().cluster().preparePutRepository("test-repo-2")
.setVerify(false) // do not do verification itself as snapshot threads could be fully blocked
.setType("fs").setSettings(Settings.builder().put("location", repositoryLocation.resolve("test"))));
logger.info("--> unblocking blocked node");
@ -2572,7 +2575,7 @@ public class SharedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTestCas
logger.info("--> emulate an orphan snapshot");
RepositoriesService repositoriesService = internalCluster().getInstance(RepositoriesService.class, internalCluster().getMasterName());
final RepositoryData repositoryData = repositoriesService.repository(repositoryName).getRepositoryData();
final RepositoryData repositoryData = getRepositoryData(repositoriesService.repository(repositoryName));
final IndexId indexId = repositoryData.resolveIndexId(idxName);
clusterService.submitStateUpdateTask("orphan snapshot test", new ClusterStateUpdateTask() {
@ -2793,7 +2796,8 @@ public class SharedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTestCas
RepositoriesService service = internalCluster().getInstance(RepositoriesService.class, internalCluster().getMasterName());
Repository repository = service.repository("test-repo");
final Map<String, IndexId> indexIds = repository.getRepositoryData().getIndices();
final RepositoryData repositoryData = getRepositoryData(repository);
final Map<String, IndexId> indexIds = repositoryData.getIndices();
assertThat(indexIds.size(), equalTo(nbIndices));
// Choose a random index from the snapshot
@ -3454,6 +3458,19 @@ public class SharedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTestCas
}
}
private RepositoryData getRepositoryData(Repository repository) throws InterruptedException {
ThreadPool threadPool = internalCluster().getInstance(ThreadPool.class, internalCluster().getMasterName());
final SetOnce<RepositoryData> repositoryData = new SetOnce<>();
final CountDownLatch latch = new CountDownLatch(1);
threadPool.executor(ThreadPool.Names.SNAPSHOT).execute(() -> {
repositoryData.set(repository.getRepositoryData());
latch.countDown();
});
latch.await();
return repositoryData.get();
}
private void verifySnapshotInfo(final GetSnapshotsResponse response, final Map<String, List<String>> indicesPerSnapshot) {
for (SnapshotInfo snapshotInfo : response.getSnapshots()) {
final List<String> expected = snapshotInfo.indices();

View File

@ -92,8 +92,6 @@ public class MockRepository extends FsRepository {
private final long waitAfterUnblock;
private final MockBlobStore mockBlobStore;
private final String randomPrefix;
private volatile boolean blockOnInitialization;
@ -128,7 +126,6 @@ public class MockRepository extends FsRepository {
waitAfterUnblock = metadata.settings().getAsLong("wait_after_unblock", 0L);
allowAtomicOperations = metadata.settings().getAsBoolean("allow_atomic_operations", true);
logger.info("starting mock repository with random prefix {}", randomPrefix);
mockBlobStore = new MockBlobStore(super.blobStore());
}
@Override
@ -163,8 +160,8 @@ public class MockRepository extends FsRepository {
}
@Override
protected BlobStore blobStore() {
return mockBlobStore;
protected BlobStore createBlobStore() throws Exception {
return new MockBlobStore(super.createBlobStore());
}
public synchronized void unblock() {
@ -195,7 +192,7 @@ public class MockRepository extends FsRepository {
}
private synchronized boolean blockExecution() {
logger.debug("Blocking execution");
logger.debug("[{}] Blocking execution", metadata.name());
boolean wasBlocked = false;
try {
while (blockOnDataFiles || blockOnControlFiles || blockOnInitialization || blockOnWriteIndexFile ||
@ -207,7 +204,7 @@ public class MockRepository extends FsRepository {
} catch (InterruptedException ex) {
Thread.currentThread().interrupt();
}
logger.debug("Unblocking execution");
logger.debug("[{}] Unblocking execution", metadata.name());
return wasBlocked;
}
@ -285,7 +282,7 @@ public class MockRepository extends FsRepository {
}
private void blockExecutionAndMaybeWait(final String blobName) {
logger.info("blocking I/O operation for file [{}] at path [{}]", blobName, path());
logger.info("[{}] blocking I/O operation for file [{}] at path [{}]", metadata.name(), blobName, path());
if (blockExecution() && waitAfterUnblock > 0) {
try {
// Delay operation after unblocking

View File

@ -18,6 +18,7 @@
*/
package org.elasticsearch.repositories.blobstore;
import org.apache.lucene.util.SetOnce;
import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotRequestBuilder;
import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse;
import org.elasticsearch.action.admin.cluster.snapshots.restore.RestoreSnapshotRequestBuilder;
@ -27,34 +28,61 @@ import org.elasticsearch.client.Client;
import org.elasticsearch.common.blobstore.BlobContainer;
import org.elasticsearch.repositories.IndexId;
import org.elasticsearch.repositories.RepositoriesService;
import org.elasticsearch.repositories.Repository;
import org.elasticsearch.repositories.RepositoryData;
import org.elasticsearch.snapshots.SnapshotMissingException;
import org.elasticsearch.snapshots.SnapshotRestoreException;
import org.elasticsearch.test.ESIntegTestCase;
import org.elasticsearch.threadpool.ThreadPool;
import java.util.Arrays;
import java.util.HashSet;
import java.util.List;
import java.util.Locale;
import java.util.Set;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.ExecutionException;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.greaterThan;
import static org.hamcrest.Matchers.is;
import static org.hamcrest.Matchers.notNullValue;
import static org.hamcrest.Matchers.nullValue;
/**
* Basic integration tests for blob-based repository validation.
*/
public abstract class ESBlobStoreRepositoryIntegTestCase extends ESIntegTestCase {
protected abstract void createTestRepository(String name);
protected abstract void createTestRepository(String name, boolean verify);
protected void afterCreationCheck(Repository repository) {
}
protected void createAndCheckTestRepository(String name) {
final boolean verify = randomBoolean();
createTestRepository(name, verify);
final Iterable<RepositoriesService> repositoriesServices =
internalCluster().getDataOrMasterNodeInstances(RepositoriesService.class);
for (RepositoriesService repositoriesService : repositoriesServices) {
final BlobStoreRepository repository = (BlobStoreRepository) repositoriesService.repository(name);
afterCreationCheck(repository);
assertThat("blob store has to be lazy initialized",
repository.getBlobStore(), verify ? is(notNullValue()) : is(nullValue()));
}
}
public void testSnapshotAndRestore() throws Exception {
final String repoName = randomAsciiName();
logger.info("--> creating repository {}", repoName);
createTestRepository(repoName);
createAndCheckTestRepository(repoName);
int indexCount = randomIntBetween(1, 5);
int[] docCounts = new int[indexCount];
String[] indexNames = generateRandomNames(indexCount);
@ -125,7 +153,7 @@ public abstract class ESBlobStoreRepositoryIntegTestCase extends ESIntegTestCase
public void testMultipleSnapshotAndRollback() throws Exception {
String repoName = randomAsciiName();
logger.info("--> creating repository {}", repoName);
createTestRepository(repoName);
createAndCheckTestRepository(repoName);
int iterationCount = randomIntBetween(2, 5);
int[] docCounts = new int[iterationCount];
String indexName = randomAsciiName();
@ -177,12 +205,12 @@ public abstract class ESBlobStoreRepositoryIntegTestCase extends ESIntegTestCase
}
}
public void testIndicesDeletedFromRepository() {
public void testIndicesDeletedFromRepository() throws Exception {
Client client = client();
logger.info("--> creating repository");
final String repoName = "test-repo";
createTestRepository(repoName);
createAndCheckTestRepository(repoName);
createIndex("test-idx-1", "test-idx-2", "test-idx-3");
ensureGreen();
@ -219,12 +247,22 @@ public abstract class ESBlobStoreRepositoryIntegTestCase extends ESIntegTestCase
logger.info("--> verify index folder deleted from blob container");
RepositoriesService repositoriesSvc = internalCluster().getInstance(RepositoriesService.class, internalCluster().getMasterName());
ThreadPool threadPool = internalCluster().getInstance(ThreadPool.class, internalCluster().getMasterName());
@SuppressWarnings("unchecked") BlobStoreRepository repository = (BlobStoreRepository) repositoriesSvc.repository(repoName);
BlobContainer indicesBlobContainer = repository.blobStore().blobContainer(repository.basePath().add("indices"));
RepositoryData repositoryData = repository.getRepositoryData();
for (IndexId indexId : repositoryData.getIndices().values()) {
final SetOnce<BlobContainer> indicesBlobContainer = new SetOnce<>();
final SetOnce<RepositoryData> repositoryData = new SetOnce<>();
final CountDownLatch latch = new CountDownLatch(1);
threadPool.executor(ThreadPool.Names.SNAPSHOT).execute(() -> {
indicesBlobContainer.set(repository.blobStore().blobContainer(repository.basePath().add("indices")));
repositoryData.set(repository.getRepositoryData());
latch.countDown();
});
latch.await();
for (IndexId indexId : repositoryData.get().getIndices().values()) {
if (indexId.getName().equals("test-idx-3")) {
assertFalse(indicesBlobContainer.blobExists(indexId.getId())); // deleted index
assertFalse(indicesBlobContainer.get().blobExists(indexId.getId())); // deleted index
}
}
}

View File

@ -87,7 +87,6 @@ import static org.mockito.Matchers.anyString;
import static org.mockito.Mockito.doAnswer;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
import static org.elasticsearch.test.InternalAggregationTestCase.DEFAULT_MAX_BUCKETS;
/**
* Base class for testing {@link Aggregator} implementations.
@ -229,7 +228,7 @@ public abstract class AggregatorTestCase extends ESTestCase {
});
when(searchContext.bitsetFilterCache()).thenReturn(new BitsetFilterCache(indexSettings, mock(Listener.class)));
doAnswer(invocation -> {
/* Store the releasables so we can release them at the end of the test case. This is important because aggregations don't
/* Store the release-ables so we can release them at the end of the test case. This is important because aggregations don't
* close their sub-aggregations. This is fairly similar to what the production code does. */
releasables.add((Releasable) invocation.getArguments()[0]);
return null;

View File

@ -53,8 +53,10 @@ import org.elasticsearch.search.aggregations.bucket.geogrid.GeoGridAggregationBu
import org.elasticsearch.search.aggregations.bucket.geogrid.ParsedGeoHashGrid;
import org.elasticsearch.search.aggregations.bucket.global.GlobalAggregationBuilder;
import org.elasticsearch.search.aggregations.bucket.global.ParsedGlobal;
import org.elasticsearch.search.aggregations.bucket.histogram.AutoDateHistogramAggregationBuilder;
import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramAggregationBuilder;
import org.elasticsearch.search.aggregations.bucket.histogram.HistogramAggregationBuilder;
import org.elasticsearch.search.aggregations.bucket.histogram.ParsedAutoDateHistogram;
import org.elasticsearch.search.aggregations.bucket.histogram.ParsedDateHistogram;
import org.elasticsearch.search.aggregations.bucket.histogram.ParsedHistogram;
import org.elasticsearch.search.aggregations.bucket.missing.MissingAggregationBuilder;
@ -181,6 +183,7 @@ public abstract class InternalAggregationTestCase<T extends InternalAggregation>
map.put(GeoCentroidAggregationBuilder.NAME, (p, c) -> ParsedGeoCentroid.fromXContent(p, (String) c));
map.put(HistogramAggregationBuilder.NAME, (p, c) -> ParsedHistogram.fromXContent(p, (String) c));
map.put(DateHistogramAggregationBuilder.NAME, (p, c) -> ParsedDateHistogram.fromXContent(p, (String) c));
map.put(AutoDateHistogramAggregationBuilder.NAME, (p, c) -> ParsedAutoDateHistogram.fromXContent(p, (String) c));
map.put(StringTerms.NAME, (p, c) -> ParsedStringTerms.fromXContent(p, (String) c));
map.put(LongTerms.NAME, (p, c) -> ParsedLongTerms.fromXContent(p, (String) c));
map.put(DoubleTerms.NAME, (p, c) -> ParsedDoubleTerms.fromXContent(p, (String) c));

View File

@ -149,7 +149,8 @@ public abstract class InternalMultiBucketAggregationTestCase<T extends InternalA
protected void assertMultiBucketsAggregation(MultiBucketsAggregation expected, MultiBucketsAggregation actual, boolean checkOrder) {
Class<? extends ParsedMultiBucketAggregation> parsedClass = implementationClass();
assertNotNull("Parsed aggregation class must not be null", parsedClass);
assertTrue(parsedClass.isInstance(actual));
assertTrue("Unexpected parsed class, expected instance of: " + actual + ", but was: " + parsedClass,
parsedClass.isInstance(actual));
assertTrue(expected instanceof InternalAggregation);
assertEquals(expected.getName(), actual.getName());

View File

@ -394,6 +394,10 @@ public final class InternalTestCluster extends TestCluster {
builder.put(MappingUpdatedAction.INDICES_MAPPING_DYNAMIC_TIMEOUT_SETTING.getKey(), new TimeValue(RandomNumbers.randomIntBetween(random, 10, 30), TimeUnit.SECONDS));
}
// turning on the real memory circuit breaker leads to spurious test failures. As have no full control over heap usage, we
// turn it off for these tests.
builder.put(HierarchyCircuitBreakerService.USE_REAL_MEMORY_USAGE_SETTING.getKey(), false);
if (random.nextInt(10) == 0) {
builder.put(HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_TYPE_SETTING.getKey(), "noop");
builder.put(HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_TYPE_SETTING.getKey(), "noop");

View File

@ -121,16 +121,15 @@ if a field is useful for aggregating later, and how you might wish to use it (te
=== Grouping Limitations with heterogeneous indices
There is a known limitation to Rollup groups, due to some internal implementation details at this time. The Rollup feature leverages
the `composite` aggregation from Elasticsearch. At the moment, the composite agg only returns buckets when all keys in the tuple are non-null.
Put another way, if the you request keys `[A,B,C]` in the composite aggregation, the only documents that are aggregated are those that have
_all_ of the keys `A, B` and `C`.
There was previously a limitation in how Rollup could handle indices that had heterogeneous mappings (multiple, unrelated/non-overlapping
mappings). The recommendation at the time was to configure a separate job per data "type". For example, you might configure a separate
job for each Beats module that you had enabled (one for `process`, another for `filesystem`, etc).
Because Rollup uses the composite agg during the indexing process, it inherits this behavior. Practically speaking, if all of the documents
in your index are homogeneous (they have the same mapping), you can ignore this limitation and stop reading now.
This recommendation was driven by internal implementation details that caused document counts to be potentially incorrect if a single "merged"
job was used.
However, if you have a heterogeneous collection of documents that you wish to roll up, you may need to configure two or more jobs to
accurately cover the original data.
This limitation has since been alleviated. As of 6.4.0, it is now considered best practice to combine all rollup configurations
into a single job.
As an example, if your index has two types of documents:
@ -157,7 +156,7 @@ and
--------------------------------------------------
// NOTCONSOLE
it may be tempting to create a single, combined rollup job which covers both of these document types, something like this:
the best practice is to combine them into a single rollup job which covers both of these document types, like this:
[source,js]
--------------------------------------------------
@ -191,222 +190,10 @@ PUT _xpack/rollup/job/combined
--------------------------------------------------
// NOTCONSOLE
You can see that it includes a `terms` grouping on both "node" and "title", fields that are mutually exclusive in the document types.
*This will not work.* Because the `composite` aggregation (and by extension, Rollup) only returns buckets when all keys are non-null,
and there are no documents that have both a "node" field and a "title" field, this rollup job will not produce any rollups.
Instead, you should configure two independent jobs (sharing the same index, or going to separate indices):
[source,js]
--------------------------------------------------
PUT _xpack/rollup/job/sensor
{
"index_pattern": "data-*",
"rollup_index": "data_rollup",
"cron": "*/30 * * * * ?",
"page_size" :1000,
"groups" : {
"date_histogram": {
"field": "timestamp",
"interval": "1h",
"delay": "7d"
},
"terms": {
"fields": ["node"]
}
},
"metrics": [
{
"field": "temperature",
"metrics": ["min", "max", "sum"]
}
]
}
--------------------------------------------------
// NOTCONSOLE
[source,js]
--------------------------------------------------
PUT _xpack/rollup/job/purchases
{
"index_pattern": "data-*",
"rollup_index": "data_rollup",
"cron": "*/30 * * * * ?",
"page_size" :1000,
"groups" : {
"date_histogram": {
"field": "timestamp",
"interval": "1h",
"delay": "7d"
},
"terms": {
"fields": ["title"]
}
},
"metrics": [
{
"field": "price",
"metrics": ["avg"]
}
]
}
--------------------------------------------------
// NOTCONSOLE
Notice that each job now deals with a single "document type", and will not run into the limitations described above. We are working on changes
in core Elasticsearch to remove this limitation from the `composite` aggregation, and the documentation will be updated accordingly
when this particular scenario is fixed.
=== Doc counts and overlapping jobs
There is an issue with doc counts, related to the above grouping limitation. Imagine you have two Rollup jobs saving to the same index, where
one job is a "subset" of another job.
There was previously an issue with document counts on "overlapping" job configurations, driven by the same internal implementation detail.
If there were two Rollup jobs saving to the same index, where one job is a "subset" of another job, it was possible that document counts
could be incorrect for certain aggregation arrangements.
For example, you might have jobs with these two groupings:
[source,js]
--------------------------------------------------
PUT _xpack/rollup/job/sensor-all
{
"groups" : {
"date_histogram": {
"field": "timestamp",
"interval": "1h",
"delay": "7d"
},
"terms": {
"fields": ["node"]
}
},
"metrics": [
{
"field": "price",
"metrics": ["avg"]
}
]
...
}
--------------------------------------------------
// NOTCONSOLE
and
[source,js]
--------------------------------------------------
PUT _xpack/rollup/job/sensor-building
{
"groups" : {
"date_histogram": {
"field": "timestamp",
"interval": "1h",
"delay": "7d"
},
"terms": {
"fields": ["node", "building"]
}
}
...
}
--------------------------------------------------
// NOTCONSOLE
The first job `sensor-all` contains the groupings and metrics that apply to all data in the index. The second job is rolling up a subset
of data (in different buildings) which also include a building identifier. You did this because combining them would run into the limitation
described in the previous section.
This _mostly_ works, but can sometimes return incorrect `doc_counts` when you search. All metrics will be valid however.
The issue arises from the composite agg limitation described before, combined with search-time optimization. Imagine you try to run the
following aggregation:
[source,js]
--------------------------------------------------
"aggs" : {
"nodes": {
"terms": {
"field": "node"
}
}
}
--------------------------------------------------
// NOTCONSOLE
This aggregation could be serviced by either `sensor-all` or `sensor-building` job, since they both group on the node field. So the RollupSearch
API will search both of them and merge results. This will result in *correct* doc_counts and *correct* metrics. No problem here.
The issue arises from an aggregation that can _only_ be serviced by `sensor-building`, like this one:
[source,js]
--------------------------------------------------
"aggs" : {
"nodes": {
"terms": {
"field": "node"
},
"aggs": {
"building": {
"terms": {
"field": "building"
}
}
}
}
}
--------------------------------------------------
// NOTCONSOLE
Now we run into a problem. The RollupSearch API will correctly identify that only `sensor-building` job has all the required components
to answer the aggregation, and will search it exclusively. Unfortunately, due to the composite aggregation limitation, that job only
rolled up documents that have both a "node" and a "building" field. Meaning that the doc_counts for the `"nodes"` aggregation will not
include counts for any document that doesn't have `[node, building]` fields.
- The `doc_count` for `"nodes"` aggregation will be incorrect because it only contains counts for `nodes` that also have buildings
- The `doc_count` for `"buildings"` aggregation will be correct
- Any metrics, on any level, will be correct
==== Workarounds
There are two main workarounds if you find yourself with a schema like the above.
Easiest and most robust method: use separate indices to store your rollups. The limitations arise because you have several document
schemas co-habitating in a single index, which makes it difficult for rollups to correctly summarize. If you make several rollup
jobs and store them in separate indices, these sorts of difficulties do not arise. It does, however, keep you from searching across several
different rollup indices at the same time.
The other workaround is to include an "off-target" aggregation in the query, which pulls in the "superset" job and corrects the doc counts.
The RollupSearch API determines the best job to search for each "leaf node" in the aggregation tree. So if we include a metric agg on `price`,
which was only defined in the `sensor-all` job, that will "pull in" the other job:
[source,js]
--------------------------------------------------
"aggs" : {
"nodes": {
"terms": {
"field": "node"
},
"aggs": {
"building": {
"terms": {
"field": "building"
}
},
"avg_price": {
"avg": { "field": "price" } <1>
}
}
}
}
--------------------------------------------------
// NOTCONSOLE
<1> Adding an avg aggregation here will fix the doc counts
Because only `sensor-all` job had an `avg` on the price field, the RollupSearch API is forced to pull in that additional job for searching,
and will merge/correct the doc_counts as appropriate. This sort of workaround applies to any additional aggregation -- metric or bucketing --
although it can be tedious to look through the jobs and determine the right one to add.
==== Status
We realize this is an onerous limitation, and somewhat breaks the rollup contract of "pick the fields to rollup, we do the rest". We are
actively working to get the limitation to `composite` agg fixed, and the related issues in Rollup. The documentation will be updated when
the fix is implemented.
This issue has also since been eliminated in 6.4.0.

View File

@ -14,6 +14,7 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.protocol.xpack.XPackUsageRequest;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService;
import org.elasticsearch.xpack.core.XPackFeatureSet;

View File

@ -1,18 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.core.action;
import org.elasticsearch.action.ActionRequestValidationException;
import org.elasticsearch.action.support.master.MasterNodeRequest;
public class XPackUsageRequest extends MasterNodeRequest<XPackUsageRequest> {
@Override
public ActionRequestValidationException validate() {
return null;
}
}

View File

@ -7,6 +7,7 @@ package org.elasticsearch.xpack.core.action;
import org.elasticsearch.action.support.master.MasterNodeOperationRequestBuilder;
import org.elasticsearch.client.ElasticsearchClient;
import org.elasticsearch.protocol.xpack.XPackUsageRequest;
public class XPackUsageRequestBuilder
extends MasterNodeOperationRequestBuilder<XPackUsageRequest, XPackUsageResponse, XPackUsageRequestBuilder> {

View File

@ -6,6 +6,7 @@
package org.elasticsearch.xpack.core.rollup;
import org.elasticsearch.common.ParseField;
import org.elasticsearch.index.mapper.NumberFieldMapper;
import org.elasticsearch.search.aggregations.metrics.avg.AvgAggregationBuilder;
import org.elasticsearch.search.aggregations.metrics.max.MaxAggregationBuilder;
import org.elasticsearch.search.aggregations.metrics.min.MinAggregationBuilder;
@ -15,6 +16,8 @@ import org.elasticsearch.search.aggregations.support.ValuesSourceAggregationBuil
import java.util.Arrays;
import java.util.List;
import java.util.stream.Collectors;
import java.util.stream.Stream;
public class RollupField {
// Fields that are used both in core Rollup actions and Rollup plugin
@ -34,6 +37,16 @@ public class RollupField {
public static final List<String> SUPPORTED_METRICS = Arrays.asList(MaxAggregationBuilder.NAME, MinAggregationBuilder.NAME,
SumAggregationBuilder.NAME, AvgAggregationBuilder.NAME, ValueCountAggregationBuilder.NAME);
// these mapper types are used by the configs (metric, histo, etc) to validate field mappings
public static final List<String> NUMERIC_FIELD_MAPPER_TYPES;
static {
List<String> types = Stream.of(NumberFieldMapper.NumberType.values())
.map(NumberFieldMapper.NumberType::typeName)
.collect(Collectors.toList());
types.add("scaled_float"); // have to add manually since scaled_float is in a module
NUMERIC_FIELD_MAPPER_TYPES = types;
}
/**
* Format to the appropriate Rollup field name convention
*

View File

@ -159,7 +159,6 @@ public class DateHistoGroupConfig implements Writeable, ToXContentFragment {
vsBuilder.dateHistogramInterval(interval);
vsBuilder.field(field);
vsBuilder.timeZone(timeZone);
return Collections.singletonList(vsBuilder);
}

View File

@ -15,7 +15,6 @@ import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.common.xcontent.ObjectParser;
import org.elasticsearch.common.xcontent.ToXContentFragment;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.index.mapper.NumberFieldMapper;
import org.elasticsearch.search.aggregations.bucket.composite.CompositeValuesSourceBuilder;
import org.elasticsearch.search.aggregations.bucket.composite.HistogramValuesSourceBuilder;
import org.elasticsearch.search.aggregations.bucket.histogram.HistogramAggregationBuilder;
@ -30,7 +29,6 @@ import java.util.Map;
import java.util.Objects;
import java.util.Set;
import java.util.stream.Collectors;
import java.util.stream.Stream;
/**
* The configuration object for the histograms in the rollup config
@ -51,10 +49,6 @@ public class HistoGroupConfig implements Writeable, ToXContentFragment {
private static final ParseField INTERVAL = new ParseField("interval");
private static final ParseField FIELDS = new ParseField("fields");
private static final List<String> MAPPER_TYPES = Stream.of(NumberFieldMapper.NumberType.values())
.map(NumberFieldMapper.NumberType::typeName)
.collect(Collectors.toList());
private final long interval;
private final String[] fields;
@ -96,6 +90,7 @@ public class HistoGroupConfig implements Writeable, ToXContentFragment {
= new HistogramValuesSourceBuilder(RollupField.formatIndexerAggName(f, HistogramAggregationBuilder.NAME));
vsBuilder.interval(interval);
vsBuilder.field(f);
vsBuilder.missingBucket(true);
return vsBuilder;
}).collect(Collectors.toList());
}
@ -125,7 +120,7 @@ public class HistoGroupConfig implements Writeable, ToXContentFragment {
Map<String, FieldCapabilities> fieldCaps = fieldCapsResponse.get(field);
if (fieldCaps != null && fieldCaps.isEmpty() == false) {
fieldCaps.forEach((key, value) -> {
if (MAPPER_TYPES.contains(key)) {
if (RollupField.NUMERIC_FIELD_MAPPER_TYPES.contains(key)) {
if (value.isAggregatable() == false) {
validationException.addValidationError("The field [" + field + "] must be aggregatable across all indices, " +
"but is not.");

View File

@ -15,7 +15,6 @@ import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.common.xcontent.ObjectParser;
import org.elasticsearch.common.xcontent.ToXContentFragment;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.index.mapper.NumberFieldMapper;
import org.elasticsearch.search.aggregations.metrics.avg.AvgAggregationBuilder;
import org.elasticsearch.search.aggregations.metrics.max.MaxAggregationBuilder;
import org.elasticsearch.search.aggregations.metrics.min.MinAggregationBuilder;
@ -32,7 +31,6 @@ import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.stream.Collectors;
import java.util.stream.Stream;
/**
* The configuration object for the metrics portion of a rollup job config
@ -66,15 +64,6 @@ public class MetricConfig implements Writeable, ToXContentFragment {
private static final ParseField AVG = new ParseField("avg");
private static final ParseField VALUE_COUNT = new ParseField("value_count");
private static final List<String> MAPPER_TYPES;
static {
List<String> types = Stream.of(NumberFieldMapper.NumberType.values())
.map(NumberFieldMapper.NumberType::typeName)
.collect(Collectors.toList());
types.add("scaled_float"); // have to add manually since scaled_float is in a module
MAPPER_TYPES = types;
}
public static final ObjectParser<MetricConfig.Builder, Void> PARSER = new ObjectParser<>(NAME, MetricConfig.Builder::new);
static {
@ -153,7 +142,7 @@ public class MetricConfig implements Writeable, ToXContentFragment {
Map<String, FieldCapabilities> fieldCaps = fieldCapsResponse.get(field);
if (fieldCaps != null && fieldCaps.isEmpty() == false) {
fieldCaps.forEach((key, value) -> {
if (MAPPER_TYPES.contains(key)) {
if (RollupField.NUMERIC_FIELD_MAPPER_TYPES.contains(key)) {
if (value.isAggregatable() == false) {
validationException.addValidationError("The field [" + field + "] must be aggregatable across all indices, " +
"but is not.");

View File

@ -80,6 +80,7 @@ public class TermsGroupConfig implements Writeable, ToXContentFragment {
TermsValuesSourceBuilder vsBuilder
= new TermsValuesSourceBuilder(RollupField.formatIndexerAggName(f, TermsAggregationBuilder.NAME));
vsBuilder.field(f);
vsBuilder.missingBucket(true);
return vsBuilder;
}).collect(Collectors.toList());
}

View File

@ -28,12 +28,12 @@ import org.elasticsearch.xpack.core.ssl.SSLConfiguration;
import org.elasticsearch.xpack.core.ssl.SSLService;
import javax.net.ssl.SSLEngine;
import java.net.InetSocketAddress;
import java.net.SocketAddress;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
import java.util.Set;
import static org.elasticsearch.xpack.core.security.SecurityField.setting;
@ -58,22 +58,9 @@ public class SecurityNetty4Transport extends Netty4Transport {
super(settings, threadPool, networkService, bigArrays, namedWriteableRegistry, circuitBreakerService);
this.sslService = sslService;
this.sslEnabled = XPackSettings.TRANSPORT_SSL_ENABLED.get(settings);
final Settings transportSSLSettings = settings.getByPrefix(setting("transport.ssl."));
if (sslEnabled) {
this.sslConfiguration = sslService.sslConfiguration(transportSSLSettings, Settings.EMPTY);
Map<String, Settings> profileSettingsMap = settings.getGroups("transport.profiles.", true);
Map<String, SSLConfiguration> profileConfiguration = new HashMap<>(profileSettingsMap.size() + 1);
for (Map.Entry<String, Settings> entry : profileSettingsMap.entrySet()) {
Settings profileSettings = entry.getValue();
final Settings profileSslSettings = profileSslSettings(profileSettings);
SSLConfiguration configuration = sslService.sslConfiguration(profileSslSettings, transportSSLSettings);
profileConfiguration.put(entry.getKey(), configuration);
}
if (profileConfiguration.containsKey(TcpTransport.DEFAULT_PROFILE) == false) {
profileConfiguration.put(TcpTransport.DEFAULT_PROFILE, sslConfiguration);
}
this.sslConfiguration = sslService.getSSLConfiguration(setting("transport.ssl."));
Map<String, SSLConfiguration> profileConfiguration = getTransportProfileConfigurations(settings, sslService, sslConfiguration);
this.profileConfiguration = Collections.unmodifiableMap(profileConfiguration);
} else {
this.profileConfiguration = Collections.emptyMap();
@ -81,6 +68,21 @@ public class SecurityNetty4Transport extends Netty4Transport {
}
}
public static Map<String, SSLConfiguration> getTransportProfileConfigurations(Settings settings, SSLService sslService,
SSLConfiguration defaultConfiguration) {
Set<String> profileNames = settings.getGroups("transport.profiles.", true).keySet();
Map<String, SSLConfiguration> profileConfiguration = new HashMap<>(profileNames.size() + 1);
for (String profileName : profileNames) {
SSLConfiguration configuration = sslService.getSSLConfiguration("transport.profiles." + profileName + "." + setting("ssl"));
profileConfiguration.put(profileName, configuration);
}
if (profileConfiguration.containsKey(TcpTransport.DEFAULT_PROFILE) == false) {
profileConfiguration.put(TcpTransport.DEFAULT_PROFILE, defaultConfiguration);
}
return profileConfiguration;
}
@Override
protected void doStart() {
super.doStart();
@ -209,8 +211,4 @@ public class SecurityNetty4Transport extends Netty4Transport {
super.connect(ctx, remoteAddress, localAddress, promise);
}
}
public static Settings profileSslSettings(Settings profileSettings) {
return profileSettings.getByPrefix(setting("ssl."));
}
}

View File

@ -22,6 +22,8 @@ import java.util.List;
import java.util.Locale;
import java.util.Optional;
import java.util.function.Function;
import java.util.stream.Collectors;
import java.util.stream.Stream;
/**
* Bridges SSLConfiguration into the {@link Settings} framework, using {@link Setting} objects.
@ -221,4 +223,10 @@ public class SSLConfigurationSettings {
CLIENT_AUTH_SETTING_PROFILES, VERIFICATION_MODE_SETTING_PROFILES);
}
public List<Setting<SecureString>> getSecureSettingsInUse(Settings settings) {
return Stream.of(this.truststorePassword, this.x509KeyPair.keystorePassword,
this.x509KeyPair.keystoreKeyPassword, this.x509KeyPair.keyPassword)
.filter(s -> s.exists(settings))
.collect(Collectors.toList());
}
}

View File

@ -30,7 +30,6 @@ import javax.net.ssl.SSLSocketFactory;
import javax.net.ssl.TrustManagerFactory;
import javax.net.ssl.X509ExtendedKeyManager;
import javax.net.ssl.X509ExtendedTrustManager;
import java.io.IOException;
import java.net.InetAddress;
import java.net.Socket;
@ -49,8 +48,10 @@ import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Objects;
import java.util.Optional;
import java.util.Set;
import java.util.stream.Collectors;
/**
* Provides access to {@link SSLEngine} and {@link SSLSocketFactory} objects based on a provided configuration. All
@ -58,7 +59,23 @@ import java.util.Set;
*/
public class SSLService extends AbstractComponent {
/**
* This is a mapping from "context name" (in general use, the name of a setting key)
* to a configuration.
* This allows us to easily answer the question "What is the configuration for ssl in realm XYZ?"
* Multiple "context names" may map to the same configuration (either by object-identity or by object-equality).
* For example "xpack.http.ssl" may exist as a name in this map and have the global ssl configuration as a value
*/
private final Map<String, SSLConfiguration> sslConfigurations;
/**
* A mapping from a SSLConfiguration to a pre-built context.
* <p>
* This is managed separately to the {@link #sslConfigurations} map, so that a single configuration (by object equality)
* always maps to the same {@link SSLContextHolder}, even if it is being used within a different context-name.
*/
private final Map<SSLConfiguration, SSLContextHolder> sslContexts;
private final SSLConfiguration globalSSLConfiguration;
private final SetOnce<SSLConfiguration> transportSSLConfiguration = new SetOnce<>();
private final Environment env;
@ -71,14 +88,16 @@ public class SSLService extends AbstractComponent {
super(settings);
this.env = environment;
this.globalSSLConfiguration = new SSLConfiguration(settings.getByPrefix(XPackSettings.GLOBAL_SSL_PREFIX));
this.sslConfigurations = new HashMap<>();
this.sslContexts = loadSSLConfigurations();
}
private SSLService(Settings settings, Environment environment, SSLConfiguration globalSSLConfiguration,
Map<SSLConfiguration, SSLContextHolder> sslContexts) {
Map<String, SSLConfiguration> sslConfigurations, Map<SSLConfiguration, SSLContextHolder> sslContexts) {
super(settings);
this.env = environment;
this.globalSSLConfiguration = globalSSLConfiguration;
this.sslConfigurations = sslConfigurations;
this.sslContexts = sslContexts;
}
@ -88,7 +107,7 @@ public class SSLService extends AbstractComponent {
* have been created during initialization
*/
public SSLService createDynamicSSLService() {
return new SSLService(settings, env, globalSSLConfiguration, sslContexts) {
return new SSLService(settings, env, globalSSLConfiguration, sslConfigurations, sslContexts) {
@Override
Map<SSLConfiguration, SSLContextHolder> loadSSLConfigurations() {
@ -119,9 +138,17 @@ public class SSLService extends AbstractComponent {
* @param settings the settings used to identify the ssl configuration, typically under a *.ssl. prefix. An empty settings will return
* a context created from the default configuration
* @return Never {@code null}.
* @deprecated This method will fail if the SSL configuration uses a {@link org.elasticsearch.common.settings.SecureSetting} but the
* {@link org.elasticsearch.common.settings.SecureSettings} have been closed. Use {@link #getSSLConfiguration(String)}
* and {@link #sslIOSessionStrategy(SSLConfiguration)} (Deprecated, but not removed because monitoring uses dynamic SSL settings)
*/
@Deprecated
public SSLIOSessionStrategy sslIOSessionStrategy(Settings settings) {
SSLConfiguration config = sslConfiguration(settings);
return sslIOSessionStrategy(config);
}
public SSLIOSessionStrategy sslIOSessionStrategy(SSLConfiguration config) {
SSLContext sslContext = sslContext(config);
String[] ciphers = supportedCiphers(sslParameters(sslContext).getCipherSuites(), config.cipherSuites(), false);
String[] supportedProtocols = config.supportedProtocols().toArray(Strings.EMPTY_ARRAY);
@ -163,51 +190,15 @@ public class SSLService extends AbstractComponent {
}
/**
* Create a new {@link SSLSocketFactory} based on the provided settings. The settings are used to identify the ssl configuration that
* should be used to create the socket factory. The socket factory will also properly configure the ciphers and protocols on each
* socket that is created
* @param settings the settings used to identify the ssl configuration, typically under a *.ssl. prefix. An empty settings will return
* a factory created from the default configuration
* Create a new {@link SSLSocketFactory} based on the provided configuration.
* The socket factory will also properly configure the ciphers and protocols on each socket that is created
* @param configuration The SSL configuration to use. Typically obtained from {@link #getSSLConfiguration(String)}
* @return Never {@code null}.
*/
public SSLSocketFactory sslSocketFactory(Settings settings) {
SSLConfiguration sslConfiguration = sslConfiguration(settings);
SSLSocketFactory socketFactory = sslContext(sslConfiguration).getSocketFactory();
return new SecuritySSLSocketFactory(socketFactory, sslConfiguration.supportedProtocols().toArray(Strings.EMPTY_ARRAY),
supportedCiphers(socketFactory.getSupportedCipherSuites(), sslConfiguration.cipherSuites(), false));
}
/**
* Creates an {@link SSLEngine} based on the provided settings. The settings are used to identify the ssl configuration that should be
* used to create the engine. This SSLEngine cannot be used for hostname verification since the engine will not be created with the
* host and port. This method is useful to obtain an SSLEngine that will be used for server connections or client connections that
* will not use hostname verification.
* @param settings the settings used to identify the ssl configuration, typically under a *.ssl. prefix. An empty settings will return
* a SSLEngine created from the default configuration
* @param fallbackSettings the settings that should be used for the fallback of the SSLConfiguration. Using {@link Settings#EMPTY}
* results in a fallback to the global configuration
* @return {@link SSLEngine}
*/
public SSLEngine createSSLEngine(Settings settings, Settings fallbackSettings) {
return createSSLEngine(settings, fallbackSettings, null, -1);
}
/**
* Creates an {@link SSLEngine} based on the provided settings. The settings are used to identify the ssl configuration that should be
* used to create the engine. This SSLEngine can be used for a connection that requires hostname verification assuming the provided
* host and port are correct. The SSLEngine created by this method is most useful for clients with hostname verification enabled
* @param settings the settings used to identify the ssl configuration, typically under a *.ssl. prefix. An empty settings will return
* a SSLEngine created from the default configuration
* @param fallbackSettings the settings that should be used for the fallback of the SSLConfiguration. Using {@link Settings#EMPTY}
* results in a fallback to the global configuration
* @param host the host of the remote endpoint. If using hostname verification, this should match what is in the remote endpoint's
* certificate
* @param port the port of the remote endpoint
* @return {@link SSLEngine}
*/
public SSLEngine createSSLEngine(Settings settings, Settings fallbackSettings, String host, int port) {
SSLConfiguration configuration = sslConfiguration(settings, fallbackSettings);
return createSSLEngine(configuration, host, port);
public SSLSocketFactory sslSocketFactory(SSLConfiguration configuration) {
SSLSocketFactory socketFactory = sslContext(configuration).getSocketFactory();
return new SecuritySSLSocketFactory(socketFactory, configuration.supportedProtocols().toArray(Strings.EMPTY_ARRAY),
supportedCiphers(socketFactory.getSupportedCipherSuites(), configuration.cipherSuites(), false));
}
/**
@ -219,7 +210,7 @@ public class SSLService extends AbstractComponent {
* certificate
* @param port the port of the remote endpoint
* @return {@link SSLEngine}
* @see #sslConfiguration(Settings, Settings)
* @see #getSSLConfiguration(String)
*/
public SSLEngine createSSLEngine(SSLConfiguration configuration, String host, int port) {
SSLContext sslContext = sslContext(configuration);
@ -249,47 +240,18 @@ public class SSLService extends AbstractComponent {
* @param sslConfiguration the configuration to check
*/
public boolean isConfigurationValidForServerUsage(SSLConfiguration sslConfiguration) {
Objects.requireNonNull(sslConfiguration, "SSLConfiguration cannot be null");
return sslConfiguration.keyConfig() != KeyConfig.NONE;
}
/**
* Indicates whether client authentication is enabled for a particular configuration
* @param settings the settings used to identify the ssl configuration, typically under a *.ssl. prefix. The global configuration
* will be used for fallback
*/
public boolean isSSLClientAuthEnabled(Settings settings) {
return isSSLClientAuthEnabled(settings, Settings.EMPTY);
}
/**
* Indicates whether client authentication is enabled for a particular configuration
* @param settings the settings used to identify the ssl configuration, typically under a *.ssl. prefix
* @param fallback the settings that should be used for the fallback of the SSLConfiguration. Using {@link Settings#EMPTY}
* results in a fallback to the global configuration
*/
public boolean isSSLClientAuthEnabled(Settings settings, Settings fallback) {
SSLConfiguration sslConfiguration = sslConfiguration(settings, fallback);
return isSSLClientAuthEnabled(sslConfiguration);
}
/**
* Indicates whether client authentication is enabled for a particular configuration
*/
public boolean isSSLClientAuthEnabled(SSLConfiguration sslConfiguration) {
Objects.requireNonNull(sslConfiguration, "SSLConfiguration cannot be null");
return sslConfiguration.sslClientAuth().enabled();
}
/**
* Returns the {@link VerificationMode} that is specified in the settings (or the default)
* @param settings the settings used to identify the ssl configuration, typically under a *.ssl. prefix
* @param fallback the settings that should be used for the fallback of the SSLConfiguration. Using {@link Settings#EMPTY}
* results in a fallback to the global configuration
*/
public VerificationMode getVerificationMode(Settings settings, Settings fallback) {
SSLConfiguration sslConfiguration = sslConfiguration(settings, fallback);
return sslConfiguration.verificationMode();
}
/**
* Returns the {@link SSLContext} for the global configuration. Mainly used for testing
*/
@ -309,6 +271,7 @@ public class SSLService extends AbstractComponent {
* @throws IllegalArgumentException if not found
*/
SSLContextHolder sslContextHolder(SSLConfiguration sslConfiguration) {
Objects.requireNonNull(sslConfiguration, "SSL Configuration cannot be null");
SSLContextHolder holder = sslContexts.get(sslConfiguration);
if (holder == null) {
throw new IllegalArgumentException("did not find a SSLContext for [" + sslConfiguration.toString() + "]");
@ -328,20 +291,11 @@ public class SSLService extends AbstractComponent {
return new SSLConfiguration(settings, globalSSLConfiguration);
}
/**
* Returns the existing {@link SSLConfiguration} for the given settings and applies the provided fallback settings instead of the global
* configuration
* @param settings the settings for the ssl configuration
* @param fallbackSettings the settings that should be used for the fallback of the SSLConfiguration. Using {@link Settings#EMPTY}
* results in a fallback to the global configuration
* @return the ssl configuration for the provided settings. If the settings are empty, the global configuration is returned
*/
public SSLConfiguration sslConfiguration(Settings settings, Settings fallbackSettings) {
if (settings.isEmpty() && fallbackSettings.isEmpty()) {
return globalSSLConfiguration;
}
SSLConfiguration fallback = sslConfiguration(fallbackSettings);
return new SSLConfiguration(settings, fallback);
public Set<String> getTransportProfileContextNames() {
return Collections.unmodifiableSet(this.sslConfigurations
.keySet().stream()
.filter(k -> k.startsWith("transport.profiles."))
.collect(Collectors.toSet()));
}
/**
@ -430,27 +384,46 @@ public class SSLService extends AbstractComponent {
* Parses the settings to load all SSLConfiguration objects that will be used.
*/
Map<SSLConfiguration, SSLContextHolder> loadSSLConfigurations() {
Map<SSLConfiguration, SSLContextHolder> sslConfigurations = new HashMap<>();
sslConfigurations.put(globalSSLConfiguration, createSslContext(globalSSLConfiguration));
Map<SSLConfiguration, SSLContextHolder> sslContextHolders = new HashMap<>();
sslContextHolders.put(globalSSLConfiguration, createSslContext(globalSSLConfiguration));
this.sslConfigurations.put("xpack.ssl", globalSSLConfiguration);
Map<String, Settings> sslSettingsMap = new HashMap<>();
sslSettingsMap.put(XPackSettings.HTTP_SSL_PREFIX, getHttpTransportSSLSettings(settings));
sslSettingsMap.put("xpack.http.ssl", settings.getByPrefix("xpack.http.ssl."));
sslSettingsMap.putAll(getRealmsSSLSettings(settings));
sslSettingsMap.putAll(getMonitoringExporterSettings(settings));
sslSettingsMap.forEach((key, sslSettings) -> {
if (sslSettings.isEmpty()) {
storeSslConfiguration(key, globalSSLConfiguration);
} else {
final SSLConfiguration configuration = new SSLConfiguration(sslSettings, globalSSLConfiguration);
storeSslConfiguration(key, configuration);
sslContextHolders.computeIfAbsent(configuration, this::createSslContext);
}
});
final Settings transportSSLSettings = settings.getByPrefix(XPackSettings.TRANSPORT_SSL_PREFIX);
List<Settings> sslSettingsList = new ArrayList<>();
sslSettingsList.add(getHttpTransportSSLSettings(settings));
sslSettingsList.add(settings.getByPrefix("xpack.http.ssl."));
sslSettingsList.addAll(getRealmsSSLSettings(settings));
sslSettingsList.addAll(getMonitoringExporterSettings(settings));
sslSettingsList.forEach((sslSettings) ->
sslConfigurations.computeIfAbsent(new SSLConfiguration(sslSettings, globalSSLConfiguration), this::createSslContext));
// transport is special because we want to use a auto-generated key when there isn't one
final SSLConfiguration transportSSLConfiguration = new SSLConfiguration(transportSSLSettings, globalSSLConfiguration);
this.transportSSLConfiguration.set(transportSSLConfiguration);
List<Settings> profileSettings = getTransportProfileSSLSettings(settings);
sslConfigurations.computeIfAbsent(transportSSLConfiguration, this::createSslContext);
profileSettings.forEach((profileSetting) ->
sslConfigurations.computeIfAbsent(new SSLConfiguration(profileSetting, transportSSLConfiguration), this::createSslContext));
return Collections.unmodifiableMap(sslConfigurations);
storeSslConfiguration(XPackSettings.TRANSPORT_SSL_PREFIX, transportSSLConfiguration);
Map<String, Settings> profileSettings = getTransportProfileSSLSettings(settings);
sslContextHolders.computeIfAbsent(transportSSLConfiguration, this::createSslContext);
profileSettings.forEach((key, profileSetting) -> {
final SSLConfiguration configuration = new SSLConfiguration(profileSetting, transportSSLConfiguration);
storeSslConfiguration(key, configuration);
sslContextHolders.computeIfAbsent(configuration, this::createSslContext);
});
return Collections.unmodifiableMap(sslContextHolders);
}
private void storeSslConfiguration(String key, SSLConfiguration configuration) {
if (key.endsWith(".")) {
key = key.substring(0, key.length() - 1);
}
sslConfigurations.put(key, configuration);
}
@ -619,31 +592,32 @@ public class SSLService extends AbstractComponent {
}
}
private static List<Settings> getRealmsSSLSettings(Settings settings) {
List<Settings> sslSettings = new ArrayList<>();
Settings realmsSettings = settings.getByPrefix(SecurityField.setting("authc.realms."));
/**
* @return A map of Settings prefix to Settings object
*/
private static Map<String, Settings> getRealmsSSLSettings(Settings settings) {
Map<String, Settings> sslSettings = new HashMap<>();
final String prefix = SecurityField.setting("authc.realms.");
Settings realmsSettings = settings.getByPrefix(prefix);
for (String name : realmsSettings.names()) {
Settings realmSSLSettings = realmsSettings.getAsSettings(name).getByPrefix("ssl.");
if (realmSSLSettings.isEmpty() == false) {
sslSettings.add(realmSSLSettings);
}
// Put this even if empty, so that the name will be mapped to the global SSL configuration
sslSettings.put(prefix + name + ".ssl", realmSSLSettings);
}
return sslSettings;
}
private static List<Settings> getTransportProfileSSLSettings(Settings settings) {
List<Settings> sslSettings = new ArrayList<>();
private static Map<String, Settings> getTransportProfileSSLSettings(Settings settings) {
Map<String, Settings> sslSettings = new HashMap<>();
Map<String, Settings> profiles = settings.getGroups("transport.profiles.", true);
for (Entry<String, Settings> entry : profiles.entrySet()) {
Settings profileSettings = entry.getValue().getByPrefix("xpack.security.ssl.");
if (profileSettings.isEmpty() == false) {
sslSettings.add(profileSettings);
}
sslSettings.put("transport.profiles." + entry.getKey() + ".xpack.security.ssl", profileSettings);
}
return sslSettings;
}
public static Settings getHttpTransportSSLSettings(Settings settings) {
private Settings getHttpTransportSSLSettings(Settings settings) {
Settings httpSSLSettings = settings.getByPrefix(XPackSettings.HTTP_SSL_PREFIX);
if (httpSSLSettings.isEmpty()) {
return httpSSLSettings;
@ -656,18 +630,33 @@ public class SSLService extends AbstractComponent {
return builder.build();
}
private static List<Settings> getMonitoringExporterSettings(Settings settings) {
List<Settings> sslSettings = new ArrayList<>();
public SSLConfiguration getHttpTransportSSLConfiguration() {
return getSSLConfiguration(XPackSettings.HTTP_SSL_PREFIX);
}
private static Map<String, Settings> getMonitoringExporterSettings(Settings settings) {
Map<String, Settings> sslSettings = new HashMap<>();
Map<String, Settings> exportersSettings = settings.getGroups("xpack.monitoring.exporters.");
for (Entry<String, Settings> entry : exportersSettings.entrySet()) {
Settings exporterSSLSettings = entry.getValue().getByPrefix("ssl.");
if (exporterSSLSettings.isEmpty() == false) {
sslSettings.add(exporterSSLSettings);
}
// Put this even if empty, so that the name will be mapped to the global SSL configuration
sslSettings.put("xpack.monitoring.exporters." + entry.getKey() + ".ssl", exporterSSLSettings);
}
return sslSettings;
}
public SSLConfiguration getSSLConfiguration(String contextName) {
if (contextName.endsWith(".")) {
contextName = contextName.substring(0, contextName.length() - 1);
}
final SSLConfiguration configuration = sslConfigurations.get(contextName);
if (configuration == null) {
logger.warn("Cannot find SSL configuration for context {}. Known contexts are: {}", contextName,
Strings.collectionToCommaDelimitedString(sslConfigurations.keySet()));
}
return configuration;
}
/**
* Maps the supported protocols to an appropriate ssl context algorithm. We make an attempt to use the "best" algorithm when
* possible. The names in this method are taken from the

Some files were not shown because too many files have changed in this diff Show More