mirror of
https://github.com/honeymoose/OpenSearch.git
synced 2025-02-08 22:14:59 +00:00
Merge branch 'master' of github.com:elastic/elasticsearch into feature/rank-eval
This commit is contained in:
commit
dbef96c69e
@ -257,8 +257,10 @@ thirdPartyAudit.excludes = [
|
|||||||
'org.noggit.JSONParser',
|
'org.noggit.JSONParser',
|
||||||
]
|
]
|
||||||
|
|
||||||
// dependency license are currently checked in distribution
|
dependencyLicenses {
|
||||||
dependencyLicenses.enabled = false
|
mapping from: /lucene-.*/, to: 'lucene'
|
||||||
|
mapping from: /jackson-.*/, to: 'jackson'
|
||||||
|
}
|
||||||
|
|
||||||
if (isEclipse == false || project.path == ":core-tests") {
|
if (isEclipse == false || project.path == ":core-tests") {
|
||||||
task integTest(type: RandomizedTestingTask,
|
task integTest(type: RandomizedTestingTask,
|
||||||
|
@ -39,7 +39,7 @@ import org.elasticsearch.transport.TransportService;
|
|||||||
|
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
import java.util.HashMap;
|
import java.util.HashMap;
|
||||||
import java.util.LinkedHashSet;
|
import java.util.HashSet;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
import java.util.Set;
|
import java.util.Set;
|
||||||
@ -80,25 +80,26 @@ public class TransportGetSnapshotsAction extends TransportMasterNodeAction<GetSn
|
|||||||
try {
|
try {
|
||||||
final String repository = request.repository();
|
final String repository = request.repository();
|
||||||
List<SnapshotInfo> snapshotInfoBuilder = new ArrayList<>();
|
List<SnapshotInfo> snapshotInfoBuilder = new ArrayList<>();
|
||||||
if (isAllSnapshots(request.snapshots())) {
|
final Map<String, SnapshotId> allSnapshotIds = new HashMap<>();
|
||||||
snapshotInfoBuilder.addAll(snapshotsService.currentSnapshots(repository));
|
final List<SnapshotId> currentSnapshotIds = new ArrayList<>();
|
||||||
snapshotInfoBuilder.addAll(snapshotsService.snapshots(repository,
|
for (SnapshotInfo snapshotInfo : snapshotsService.currentSnapshots(repository)) {
|
||||||
snapshotsService.snapshotIds(repository),
|
SnapshotId snapshotId = snapshotInfo.snapshotId();
|
||||||
request.ignoreUnavailable()));
|
allSnapshotIds.put(snapshotId.getName(), snapshotId);
|
||||||
} else if (isCurrentSnapshots(request.snapshots())) {
|
currentSnapshotIds.add(snapshotId);
|
||||||
snapshotInfoBuilder.addAll(snapshotsService.currentSnapshots(repository));
|
}
|
||||||
} else {
|
if (isCurrentSnapshotsOnly(request.snapshots()) == false) {
|
||||||
final Map<String, SnapshotId> allSnapshotIds = new HashMap<>();
|
|
||||||
for (SnapshotInfo snapshotInfo : snapshotsService.currentSnapshots(repository)) {
|
|
||||||
SnapshotId snapshotId = snapshotInfo.snapshotId();
|
|
||||||
allSnapshotIds.put(snapshotId.getName(), snapshotId);
|
|
||||||
}
|
|
||||||
for (SnapshotId snapshotId : snapshotsService.snapshotIds(repository)) {
|
for (SnapshotId snapshotId : snapshotsService.snapshotIds(repository)) {
|
||||||
allSnapshotIds.put(snapshotId.getName(), snapshotId);
|
allSnapshotIds.put(snapshotId.getName(), snapshotId);
|
||||||
}
|
}
|
||||||
final Set<SnapshotId> toResolve = new LinkedHashSet<>(); // maintain order
|
}
|
||||||
|
final Set<SnapshotId> toResolve = new HashSet<>();
|
||||||
|
if (isAllSnapshots(request.snapshots())) {
|
||||||
|
toResolve.addAll(allSnapshotIds.values());
|
||||||
|
} else {
|
||||||
for (String snapshotOrPattern : request.snapshots()) {
|
for (String snapshotOrPattern : request.snapshots()) {
|
||||||
if (Regex.isSimpleMatchPattern(snapshotOrPattern) == false) {
|
if (GetSnapshotsRequest.CURRENT_SNAPSHOT.equalsIgnoreCase(snapshotOrPattern)) {
|
||||||
|
toResolve.addAll(currentSnapshotIds);
|
||||||
|
} else if (Regex.isSimpleMatchPattern(snapshotOrPattern) == false) {
|
||||||
if (allSnapshotIds.containsKey(snapshotOrPattern)) {
|
if (allSnapshotIds.containsKey(snapshotOrPattern)) {
|
||||||
toResolve.add(allSnapshotIds.get(snapshotOrPattern));
|
toResolve.add(allSnapshotIds.get(snapshotOrPattern));
|
||||||
} else if (request.ignoreUnavailable() == false) {
|
} else if (request.ignoreUnavailable() == false) {
|
||||||
@ -113,12 +114,12 @@ public class TransportGetSnapshotsAction extends TransportMasterNodeAction<GetSn
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (toResolve.isEmpty() && request.ignoreUnavailable() == false) {
|
if (toResolve.isEmpty() && request.ignoreUnavailable() == false && isCurrentSnapshotsOnly(request.snapshots()) == false) {
|
||||||
throw new SnapshotMissingException(repository, request.snapshots()[0]);
|
throw new SnapshotMissingException(repository, request.snapshots()[0]);
|
||||||
}
|
}
|
||||||
|
|
||||||
snapshotInfoBuilder.addAll(snapshotsService.snapshots(repository, new ArrayList<>(toResolve), request.ignoreUnavailable()));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
snapshotInfoBuilder.addAll(snapshotsService.snapshots(repository, new ArrayList<>(toResolve), request.ignoreUnavailable()));
|
||||||
listener.onResponse(new GetSnapshotsResponse(snapshotInfoBuilder));
|
listener.onResponse(new GetSnapshotsResponse(snapshotInfoBuilder));
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
listener.onFailure(e);
|
listener.onFailure(e);
|
||||||
@ -129,7 +130,7 @@ public class TransportGetSnapshotsAction extends TransportMasterNodeAction<GetSn
|
|||||||
return (snapshots.length == 0) || (snapshots.length == 1 && GetSnapshotsRequest.ALL_SNAPSHOTS.equalsIgnoreCase(snapshots[0]));
|
return (snapshots.length == 0) || (snapshots.length == 1 && GetSnapshotsRequest.ALL_SNAPSHOTS.equalsIgnoreCase(snapshots[0]));
|
||||||
}
|
}
|
||||||
|
|
||||||
private boolean isCurrentSnapshots(String[] snapshots) {
|
private boolean isCurrentSnapshotsOnly(String[] snapshots) {
|
||||||
return (snapshots.length == 1 && GetSnapshotsRequest.CURRENT_SNAPSHOT.equalsIgnoreCase(snapshots[0]));
|
return (snapshots.length == 1 && GetSnapshotsRequest.CURRENT_SNAPSHOT.equalsIgnoreCase(snapshots[0]));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -271,9 +271,6 @@ final class Bootstrap {
|
|||||||
closeSystOut();
|
closeSystOut();
|
||||||
}
|
}
|
||||||
|
|
||||||
// fail if using broken version
|
|
||||||
JVMCheck.check();
|
|
||||||
|
|
||||||
// fail if somebody replaced the lucene jars
|
// fail if somebody replaced the lucene jars
|
||||||
checkLucene();
|
checkLucene();
|
||||||
|
|
||||||
|
@ -44,6 +44,8 @@ import java.util.Collections;
|
|||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.Locale;
|
import java.util.Locale;
|
||||||
import java.util.function.Predicate;
|
import java.util.function.Predicate;
|
||||||
|
import java.util.regex.Matcher;
|
||||||
|
import java.util.regex.Pattern;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* We enforce limits once any network host is configured. In this case we assume the node is running in production
|
* We enforce limits once any network host is configured. In this case we assume the node is running in production
|
||||||
@ -172,6 +174,7 @@ final class BootstrapCheck {
|
|||||||
checks.add(new UseSerialGCCheck());
|
checks.add(new UseSerialGCCheck());
|
||||||
checks.add(new OnErrorCheck());
|
checks.add(new OnErrorCheck());
|
||||||
checks.add(new OnOutOfMemoryErrorCheck());
|
checks.add(new OnOutOfMemoryErrorCheck());
|
||||||
|
checks.add(new G1GCCheck());
|
||||||
return Collections.unmodifiableList(checks);
|
return Collections.unmodifiableList(checks);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -549,4 +552,56 @@ final class BootstrapCheck {
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Bootstrap check for versions of HotSpot that are known to have issues that can lead to index corruption when G1GC is enabled.
|
||||||
|
*/
|
||||||
|
static class G1GCCheck implements BootstrapCheck.Check {
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean check() {
|
||||||
|
if ("Oracle Corporation".equals(jvmVendor()) && isJava8() && isG1GCEnabled()) {
|
||||||
|
final String jvmVersion = jvmVersion();
|
||||||
|
final Pattern pattern = Pattern.compile("(\\d+)\\.(\\d+)-b\\d+");
|
||||||
|
final Matcher matcher = pattern.matcher(jvmVersion);
|
||||||
|
final boolean matches = matcher.matches();
|
||||||
|
assert matches : jvmVersion;
|
||||||
|
final int major = Integer.parseInt(matcher.group(1));
|
||||||
|
final int update = Integer.parseInt(matcher.group(2));
|
||||||
|
return major == 25 && update < 40;
|
||||||
|
} else {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// visible for testing
|
||||||
|
String jvmVendor() {
|
||||||
|
return Constants.JVM_VENDOR;
|
||||||
|
}
|
||||||
|
|
||||||
|
// visible for testing
|
||||||
|
boolean isG1GCEnabled() {
|
||||||
|
assert "Oracle Corporation".equals(jvmVendor());
|
||||||
|
return JvmInfo.jvmInfo().useG1GC().equals("true");
|
||||||
|
}
|
||||||
|
|
||||||
|
// visible for testing
|
||||||
|
String jvmVersion() {
|
||||||
|
assert "Oracle Corporation".equals(jvmVendor());
|
||||||
|
return Constants.JVM_VERSION;
|
||||||
|
}
|
||||||
|
|
||||||
|
// visible for tests
|
||||||
|
boolean isJava8() {
|
||||||
|
return Constants.JVM_SPEC_VERSION.equals("1.8");
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String errorMessage() {
|
||||||
|
return String.format(
|
||||||
|
Locale.ROOT,
|
||||||
|
"JVM version [%s] can cause data corruption when used with G1GC; upgrade to at least Java 8u40", jvmVersion());
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -1,246 +0,0 @@
|
|||||||
/*
|
|
||||||
* Licensed to Elasticsearch under one or more contributor
|
|
||||||
* license agreements. See the NOTICE file distributed with
|
|
||||||
* this work for additional information regarding copyright
|
|
||||||
* ownership. Elasticsearch licenses this file to you under
|
|
||||||
* the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
* not use this file except in compliance with the License.
|
|
||||||
* You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing,
|
|
||||||
* software distributed under the License is distributed on an
|
|
||||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
|
||||||
* KIND, either express or implied. See the License for the
|
|
||||||
* specific language governing permissions and limitations
|
|
||||||
* under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package org.elasticsearch.bootstrap;
|
|
||||||
|
|
||||||
import org.apache.lucene.util.Constants;
|
|
||||||
import org.elasticsearch.common.logging.Loggers;
|
|
||||||
import org.elasticsearch.monitor.jvm.JvmInfo;
|
|
||||||
|
|
||||||
import java.lang.management.ManagementFactory;
|
|
||||||
import java.util.Collections;
|
|
||||||
import java.util.HashMap;
|
|
||||||
import java.util.Map;
|
|
||||||
import java.util.Optional;
|
|
||||||
|
|
||||||
/** Checks that the JVM is ok and won't cause index corruption */
|
|
||||||
final class JVMCheck {
|
|
||||||
/** no instantiation */
|
|
||||||
private JVMCheck() {}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* URL with latest JVM recommendations
|
|
||||||
*/
|
|
||||||
static final String JVM_RECOMMENDATIONS = "http://www.elastic.co/guide/en/elasticsearch/reference/current/_installation.html";
|
|
||||||
|
|
||||||
/**
|
|
||||||
* System property which if set causes us to bypass the check completely (but issues a warning in doing so)
|
|
||||||
*/
|
|
||||||
static final String JVM_BYPASS = "es.bypass.vm.check";
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Metadata and messaging for checking and reporting HotSpot
|
|
||||||
* issues.
|
|
||||||
*/
|
|
||||||
interface HotSpotCheck {
|
|
||||||
/**
|
|
||||||
* If this HotSpot check should be executed.
|
|
||||||
*
|
|
||||||
* @return true if this HotSpot check should be executed
|
|
||||||
*/
|
|
||||||
boolean check();
|
|
||||||
|
|
||||||
/**
|
|
||||||
* The error message to display when this HotSpot issue is
|
|
||||||
* present.
|
|
||||||
*
|
|
||||||
* @return the error message for this HotSpot issue
|
|
||||||
*/
|
|
||||||
String getErrorMessage();
|
|
||||||
|
|
||||||
/**
|
|
||||||
* The warning message for this HotSpot issue if a workaround
|
|
||||||
* exists and is used.
|
|
||||||
*
|
|
||||||
* @return the warning message for this HotSpot issue
|
|
||||||
*/
|
|
||||||
Optional<String> getWarningMessage();
|
|
||||||
|
|
||||||
/**
|
|
||||||
* The workaround for this HotSpot issue, if one exists.
|
|
||||||
*
|
|
||||||
* @return the workaround for this HotSpot issue, if one exists
|
|
||||||
*/
|
|
||||||
Optional<String> getWorkaround();
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Metadata and messaging for hotspot bugs.
|
|
||||||
*/
|
|
||||||
static class HotspotBug implements HotSpotCheck {
|
|
||||||
|
|
||||||
/** OpenJDK bug URL */
|
|
||||||
final String bugUrl;
|
|
||||||
|
|
||||||
/** Compiler workaround flag (null if there is no workaround) */
|
|
||||||
final String workAround;
|
|
||||||
|
|
||||||
HotspotBug(String bugUrl, String workAround) {
|
|
||||||
this.bugUrl = bugUrl;
|
|
||||||
this.workAround = workAround;
|
|
||||||
}
|
|
||||||
|
|
||||||
/** Returns an error message to the user for a broken version */
|
|
||||||
public String getErrorMessage() {
|
|
||||||
StringBuilder sb = new StringBuilder();
|
|
||||||
sb.append("Java version: ").append(fullVersion());
|
|
||||||
sb.append(" suffers from critical bug ").append(bugUrl);
|
|
||||||
sb.append(" which can cause data corruption.");
|
|
||||||
sb.append(System.lineSeparator());
|
|
||||||
sb.append("Please upgrade the JVM, see ").append(JVM_RECOMMENDATIONS);
|
|
||||||
sb.append(" for current recommendations.");
|
|
||||||
if (workAround != null) {
|
|
||||||
sb.append(System.lineSeparator());
|
|
||||||
sb.append("If you absolutely cannot upgrade, please add ").append(workAround);
|
|
||||||
sb.append(" to the ES_JAVA_OPTS environment variable.");
|
|
||||||
sb.append(System.lineSeparator());
|
|
||||||
sb.append("Upgrading is preferred, this workaround will result in degraded performance.");
|
|
||||||
}
|
|
||||||
return sb.toString();
|
|
||||||
}
|
|
||||||
|
|
||||||
/** Warns the user when a workaround is being used to dodge the bug */
|
|
||||||
public Optional<String> getWarningMessage() {
|
|
||||||
StringBuilder sb = new StringBuilder();
|
|
||||||
sb.append("Workaround flag ").append(workAround);
|
|
||||||
sb.append(" for bug ").append(bugUrl);
|
|
||||||
sb.append(" found. ");
|
|
||||||
sb.append(System.lineSeparator());
|
|
||||||
sb.append("This will result in degraded performance!");
|
|
||||||
sb.append(System.lineSeparator());
|
|
||||||
sb.append("Upgrading is preferred, see ").append(JVM_RECOMMENDATIONS);
|
|
||||||
sb.append(" for current recommendations.");
|
|
||||||
return Optional.of(sb.toString());
|
|
||||||
}
|
|
||||||
|
|
||||||
public boolean check() {
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public Optional<String> getWorkaround() {
|
|
||||||
return Optional.of(workAround);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
static class G1GCCheck implements HotSpotCheck {
|
|
||||||
@Override
|
|
||||||
public boolean check() {
|
|
||||||
return JvmInfo.jvmInfo().useG1GC().equals("true");
|
|
||||||
}
|
|
||||||
|
|
||||||
/** Returns an error message to the user for a broken version */
|
|
||||||
public String getErrorMessage() {
|
|
||||||
StringBuilder sb = new StringBuilder();
|
|
||||||
sb.append("Java version: ").append(fullVersion());
|
|
||||||
sb.append(" can cause data corruption");
|
|
||||||
sb.append(" when used with G1GC.");
|
|
||||||
sb.append(System.lineSeparator());
|
|
||||||
sb.append("Please upgrade the JVM, see ").append(JVM_RECOMMENDATIONS);
|
|
||||||
sb.append(" for current recommendations.");
|
|
||||||
return sb.toString();
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public Optional<String> getWarningMessage() {
|
|
||||||
return Optional.empty();
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public Optional<String> getWorkaround() {
|
|
||||||
return Optional.empty();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/** mapping of hotspot version to hotspot bug information for the most serious bugs */
|
|
||||||
static final Map<String, HotSpotCheck> JVM_BROKEN_HOTSPOT_VERSIONS;
|
|
||||||
|
|
||||||
static {
|
|
||||||
Map<String, HotSpotCheck> bugs = new HashMap<>();
|
|
||||||
|
|
||||||
// 1.7.0: loop optimizer bug
|
|
||||||
bugs.put("21.0-b17", new HotspotBug("https://bugs.openjdk.java.net/browse/JDK-7070134", "-XX:-UseLoopPredicate"));
|
|
||||||
// register allocation issues (technically only x86/amd64). This impacted update 40, 45, and 51
|
|
||||||
bugs.put("24.0-b56", new HotspotBug("https://bugs.openjdk.java.net/browse/JDK-8024830", "-XX:-UseSuperWord"));
|
|
||||||
bugs.put("24.45-b08", new HotspotBug("https://bugs.openjdk.java.net/browse/JDK-8024830", "-XX:-UseSuperWord"));
|
|
||||||
bugs.put("24.51-b03", new HotspotBug("https://bugs.openjdk.java.net/browse/JDK-8024830", "-XX:-UseSuperWord"));
|
|
||||||
G1GCCheck g1GcCheck = new G1GCCheck();
|
|
||||||
bugs.put("25.0-b70", g1GcCheck);
|
|
||||||
bugs.put("25.11-b03", g1GcCheck);
|
|
||||||
bugs.put("25.20-b23", g1GcCheck);
|
|
||||||
bugs.put("25.25-b02", g1GcCheck);
|
|
||||||
bugs.put("25.31-b07", g1GcCheck);
|
|
||||||
|
|
||||||
JVM_BROKEN_HOTSPOT_VERSIONS = Collections.unmodifiableMap(bugs);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Checks that the current JVM is "ok". This means it doesn't have severe bugs that cause data corruption.
|
|
||||||
*/
|
|
||||||
static void check() {
|
|
||||||
if (Boolean.parseBoolean(System.getProperty(JVM_BYPASS))) {
|
|
||||||
Loggers.getLogger(JVMCheck.class).warn("bypassing jvm version check for version [{}], this can result in data corruption!", fullVersion());
|
|
||||||
} else if ("Oracle Corporation".equals(Constants.JVM_VENDOR)) {
|
|
||||||
HotSpotCheck bug = JVM_BROKEN_HOTSPOT_VERSIONS.get(Constants.JVM_VERSION);
|
|
||||||
if (bug != null && bug.check()) {
|
|
||||||
if (bug.getWorkaround().isPresent() && ManagementFactory.getRuntimeMXBean().getInputArguments().contains(bug.getWorkaround().get())) {
|
|
||||||
Loggers.getLogger(JVMCheck.class).warn("{}", bug.getWarningMessage().get());
|
|
||||||
} else {
|
|
||||||
throw new RuntimeException(bug.getErrorMessage());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else if ("IBM Corporation".equals(Constants.JVM_VENDOR)) {
|
|
||||||
// currently some old JVM versions from IBM will easily result in index corruption.
|
|
||||||
// 2.8+ seems ok for ES from testing.
|
|
||||||
float version = Float.POSITIVE_INFINITY;
|
|
||||||
try {
|
|
||||||
version = Float.parseFloat(Constants.JVM_VERSION);
|
|
||||||
} catch (NumberFormatException ignored) {
|
|
||||||
// this is just a simple best-effort to detect old runtimes,
|
|
||||||
// if we cannot parse it, we don't fail.
|
|
||||||
}
|
|
||||||
if (version < 2.8f) {
|
|
||||||
StringBuilder sb = new StringBuilder();
|
|
||||||
sb.append("IBM J9 runtimes < 2.8 suffer from several bugs which can cause data corruption.");
|
|
||||||
sb.append(System.lineSeparator());
|
|
||||||
sb.append("Your version: " + fullVersion());
|
|
||||||
sb.append(System.lineSeparator());
|
|
||||||
sb.append("Please upgrade the JVM to a recent IBM JDK");
|
|
||||||
throw new RuntimeException(sb.toString());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Returns java + jvm version, looks like this:
|
|
||||||
* {@code Oracle Corporation 1.8.0_45 [Java HotSpot(TM) 64-Bit Server VM 25.45-b02]}
|
|
||||||
*/
|
|
||||||
static String fullVersion() {
|
|
||||||
StringBuilder sb = new StringBuilder();
|
|
||||||
sb.append(Constants.JAVA_VENDOR);
|
|
||||||
sb.append(" ");
|
|
||||||
sb.append(Constants.JAVA_VERSION);
|
|
||||||
sb.append(" [");
|
|
||||||
sb.append(Constants.JVM_NAME);
|
|
||||||
sb.append(" ");
|
|
||||||
sb.append(Constants.JVM_VERSION);
|
|
||||||
sb.append("]");
|
|
||||||
return sb.toString();
|
|
||||||
}
|
|
||||||
}
|
|
@ -29,6 +29,7 @@ import org.elasticsearch.monitor.Probes;
|
|||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.lang.management.ManagementFactory;
|
import java.lang.management.ManagementFactory;
|
||||||
import java.lang.management.OperatingSystemMXBean;
|
import java.lang.management.OperatingSystemMXBean;
|
||||||
|
import java.lang.reflect.InvocationTargetException;
|
||||||
import java.lang.reflect.Method;
|
import java.lang.reflect.Method;
|
||||||
import java.nio.file.Files;
|
import java.nio.file.Files;
|
||||||
import java.nio.file.Path;
|
import java.nio.file.Path;
|
||||||
@ -120,10 +121,7 @@ public class OsProbe {
|
|||||||
*
|
*
|
||||||
* On Windows, this method returns {@code null}.
|
* On Windows, this method returns {@code null}.
|
||||||
*
|
*
|
||||||
* On Linux, this method should return the 1, 5, and 15-minute load
|
* On Linux, this method returns the 1, 5, and 15-minute load averages.
|
||||||
* averages. If obtaining these values from {@code /proc/loadavg}
|
|
||||||
* fails, the method will fallback to obtaining the 1-minute load
|
|
||||||
* average.
|
|
||||||
*
|
*
|
||||||
* On macOS, this method should return the 1-minute load average.
|
* On macOS, this method should return the 1-minute load average.
|
||||||
*
|
*
|
||||||
@ -133,53 +131,44 @@ public class OsProbe {
|
|||||||
if (Constants.WINDOWS) {
|
if (Constants.WINDOWS) {
|
||||||
return null;
|
return null;
|
||||||
} else if (Constants.LINUX) {
|
} else if (Constants.LINUX) {
|
||||||
final String procLoadAvg = readProcLoadavg();
|
try {
|
||||||
if (procLoadAvg != null) {
|
final String procLoadAvg = readProcLoadavg();
|
||||||
assert procLoadAvg.matches("(\\d+\\.\\d+\\s+){3}\\d+/\\d+\\s+\\d+");
|
assert procLoadAvg.matches("(\\d+\\.\\d+\\s+){3}\\d+/\\d+\\s+\\d+");
|
||||||
final String[] fields = procLoadAvg.split("\\s+");
|
final String[] fields = procLoadAvg.split("\\s+");
|
||||||
try {
|
return new double[]{Double.parseDouble(fields[0]), Double.parseDouble(fields[1]), Double.parseDouble(fields[2])};
|
||||||
return new double[]{Double.parseDouble(fields[0]), Double.parseDouble(fields[1]), Double.parseDouble(fields[2])};
|
} catch (final IOException e) {
|
||||||
} catch (final NumberFormatException e) {
|
if (logger.isDebugEnabled()) {
|
||||||
if (logger.isDebugEnabled()) {
|
logger.debug("error reading /proc/loadavg", e);
|
||||||
logger.debug(String.format(Locale.ROOT, "error parsing /proc/loadavg [%s]", procLoadAvg), e);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
assert Constants.MAC_OS_X;
|
||||||
|
if (getSystemLoadAverage == null) {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
try {
|
||||||
|
final double oneMinuteLoadAverage = (double) getSystemLoadAverage.invoke(osMxBean);
|
||||||
|
return new double[]{oneMinuteLoadAverage >= 0 ? oneMinuteLoadAverage : -1, -1, -1};
|
||||||
|
} catch (IllegalAccessException | InvocationTargetException e) {
|
||||||
|
if (logger.isDebugEnabled()) {
|
||||||
|
logger.debug("error reading one minute load average from operating system", e);
|
||||||
|
}
|
||||||
|
return null;
|
||||||
}
|
}
|
||||||
// fallback
|
|
||||||
}
|
|
||||||
|
|
||||||
if (getSystemLoadAverage == null) {
|
|
||||||
return null;
|
|
||||||
}
|
|
||||||
try {
|
|
||||||
final double oneMinuteLoadAverage = (double) getSystemLoadAverage.invoke(osMxBean);
|
|
||||||
return new double[] { oneMinuteLoadAverage >= 0 ? oneMinuteLoadAverage : -1, -1, -1 };
|
|
||||||
} catch (final Exception e) {
|
|
||||||
logger.debug("error obtaining system load average", e);
|
|
||||||
return null;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* The line from {@code /proc/loadavg}. The first three fields are
|
* The line from {@code /proc/loadavg}. The first three fields are the load averages averaged over 1, 5, and 15 minutes. The fourth
|
||||||
* the load averages averaged over 1, 5, and 15 minutes. The fourth
|
* field is two numbers separated by a slash, the first is the number of currently runnable scheduling entities, the second is the
|
||||||
* field is two numbers separated by a slash, the first is the
|
* number of scheduling entities on the system. The fifth field is the PID of the most recently created process.
|
||||||
* number of currently runnable scheduling entities, the second is
|
|
||||||
* the number of scheduling entities on the system. The fifth field
|
|
||||||
* is the PID of the most recently created process.
|
|
||||||
*
|
*
|
||||||
* @return the line from {@code /proc/loadavg} or {@code null}
|
* @return the line from {@code /proc/loadavg} or {@code null}
|
||||||
*/
|
*/
|
||||||
@SuppressForbidden(reason = "access /proc/loadavg")
|
@SuppressForbidden(reason = "access /proc/loadavg")
|
||||||
String readProcLoadavg() {
|
String readProcLoadavg() throws IOException {
|
||||||
try {
|
return readSingleLine(PathUtils.get("/proc/loadavg"));
|
||||||
final List<String> lines = Files.readAllLines(PathUtils.get("/proc/loadavg"));
|
|
||||||
assert lines != null && lines.size() == 1;
|
|
||||||
return lines.get(0);
|
|
||||||
} catch (final IOException e) {
|
|
||||||
logger.debug("error reading /proc/loadavg", e);
|
|
||||||
return null;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
public short getSystemCpuPercent() {
|
public short getSystemCpuPercent() {
|
||||||
@ -203,15 +192,11 @@ public class OsProbe {
|
|||||||
private static final Pattern CONTROL_GROUP_PATTERN = Pattern.compile("\\d+:([^:,]+(?:,[^:,]+)?):(/.*)");
|
private static final Pattern CONTROL_GROUP_PATTERN = Pattern.compile("\\d+:([^:,]+(?:,[^:,]+)?):(/.*)");
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* A map of the control groups to which the Elasticsearch process
|
* A map of the control groups to which the Elasticsearch process belongs. Note that this is a map because the control groups can vary
|
||||||
* belongs. Note that this is a map because the control groups can
|
* from subsystem to subsystem. Additionally, this map can not be cached because a running process can be reclassified.
|
||||||
* vary from subsystem to subsystem. Additionally, this map can not
|
|
||||||
* be cached because a running process can be reclassified.
|
|
||||||
*
|
*
|
||||||
* @return a map from subsystems to the control group for the
|
* @return a map from subsystems to the control group for the Elasticsearch process.
|
||||||
* Elasticsearch process.
|
* @throws IOException if an I/O exception occurs reading {@code /proc/self/cgroup}
|
||||||
* @throws IOException if an I/O exception occurs reading
|
|
||||||
* {@code /proc/self/cgroup}
|
|
||||||
*/
|
*/
|
||||||
private Map<String, String> getControlGroups() throws IOException {
|
private Map<String, String> getControlGroups() throws IOException {
|
||||||
final List<String> lines = readProcSelfCgroup();
|
final List<String> lines = readProcSelfCgroup();
|
||||||
@ -234,21 +219,16 @@ public class OsProbe {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* The lines from {@code /proc/self/cgroup}. This file represents
|
* The lines from {@code /proc/self/cgroup}. This file represents the control groups to which the Elasticsearch process belongs. Each
|
||||||
* the control groups to which the Elasticsearch process belongs.
|
* line in this file represents a control group hierarchy of the form
|
||||||
* Each line in this file represents a control group hierarchy of
|
|
||||||
* the form
|
|
||||||
* <p>
|
* <p>
|
||||||
* {@code \d+:([^:,]+(?:,[^:,]+)?):(/.*)}
|
* {@code \d+:([^:,]+(?:,[^:,]+)?):(/.*)}
|
||||||
* <p>
|
* <p>
|
||||||
* with the first field representing the hierarchy ID, the second
|
* with the first field representing the hierarchy ID, the second field representing a comma-separated list of the subsystems bound to
|
||||||
* field representing a comma-separated list of the subsystems
|
* the hierarchy, and the last field representing the control group.
|
||||||
* bound to the hierarchy, and the last field representing the
|
|
||||||
* control group.
|
|
||||||
*
|
*
|
||||||
* @return the lines from {@code /proc/self/cgroup}
|
* @return the lines from {@code /proc/self/cgroup}
|
||||||
* @throws IOException if an I/O exception occurs reading
|
* @throws IOException if an I/O exception occurs reading {@code /proc/self/cgroup}
|
||||||
* {@code /proc/self/cgroup}
|
|
||||||
*/
|
*/
|
||||||
@SuppressForbidden(reason = "access /proc/self/cgroup")
|
@SuppressForbidden(reason = "access /proc/self/cgroup")
|
||||||
List<String> readProcSelfCgroup() throws IOException {
|
List<String> readProcSelfCgroup() throws IOException {
|
||||||
@ -258,33 +238,24 @@ public class OsProbe {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* The total CPU time in nanoseconds consumed by all tasks in the
|
* The total CPU time in nanoseconds consumed by all tasks in the cgroup to which the Elasticsearch process belongs for the {@code
|
||||||
* cgroup to which the Elasticsearch process belongs for the
|
* cpuacct} subsystem.
|
||||||
* {@code cpuacct} subsystem.
|
|
||||||
*
|
*
|
||||||
* @param controlGroup the control group for the Elasticsearch
|
* @param controlGroup the control group for the Elasticsearch process for the {@code cpuacct} subsystem
|
||||||
* process for the {@code cpuacct} subsystem
|
|
||||||
* @return the total CPU time in nanoseconds
|
* @return the total CPU time in nanoseconds
|
||||||
* @throws IOException if an I/O exception occurs reading
|
* @throws IOException if an I/O exception occurs reading {@code cpuacct.usage} for the control group
|
||||||
* {@code cpuacct.usage} for the control group
|
|
||||||
*/
|
*/
|
||||||
private long getCgroupCpuAcctUsageNanos(final String controlGroup) throws IOException {
|
private long getCgroupCpuAcctUsageNanos(final String controlGroup) throws IOException {
|
||||||
return Long.parseLong(readSysFsCgroupCpuAcctCpuAcctUsage(controlGroup));
|
return Long.parseLong(readSysFsCgroupCpuAcctCpuAcctUsage(controlGroup));
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Returns the line from {@code cpuacct.usage} for the control
|
* Returns the line from {@code cpuacct.usage} for the control group to which the Elasticsearch process belongs for the {@code cpuacct}
|
||||||
* group to which the Elasticsearch process belongs for the
|
* subsystem. This line represents the total CPU time in nanoseconds consumed by all tasks in the same control group.
|
||||||
* {@code cpuacct} subsystem. This line represents the total CPU
|
|
||||||
* time in nanoseconds consumed by all tasks in the same control
|
|
||||||
* group.
|
|
||||||
*
|
*
|
||||||
* @param controlGroup the control group to which the Elasticsearch
|
* @param controlGroup the control group to which the Elasticsearch process belongs for the {@code cpuacct} subsystem
|
||||||
* process belongs for the {@code cpuacct}
|
|
||||||
* subsystem
|
|
||||||
* @return the line from {@code cpuacct.usage}
|
* @return the line from {@code cpuacct.usage}
|
||||||
* @throws IOException if an I/O exception occurs reading
|
* @throws IOException if an I/O exception occurs reading {@code cpuacct.usage} for the control group
|
||||||
* {@code cpuacct.usage} for the control group
|
|
||||||
*/
|
*/
|
||||||
@SuppressForbidden(reason = "access /sys/fs/cgroup/cpuacct")
|
@SuppressForbidden(reason = "access /sys/fs/cgroup/cpuacct")
|
||||||
String readSysFsCgroupCpuAcctCpuAcctUsage(final String controlGroup) throws IOException {
|
String readSysFsCgroupCpuAcctCpuAcctUsage(final String controlGroup) throws IOException {
|
||||||
@ -292,33 +263,25 @@ public class OsProbe {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* The total period of time in microseconds for how frequently the
|
* The total period of time in microseconds for how frequently the Elasticsearch control group's access to CPU resources will be
|
||||||
* Elasticsearch control group's access to CPU resources will be
|
|
||||||
* reallocated.
|
* reallocated.
|
||||||
*
|
*
|
||||||
* @param controlGroup the control group for the Elasticsearch
|
* @param controlGroup the control group for the Elasticsearch process for the {@code cpuacct} subsystem
|
||||||
* process for the {@code cpuacct} subsystem
|
|
||||||
* @return the CFS quota period in microseconds
|
* @return the CFS quota period in microseconds
|
||||||
* @throws IOException if an I/O exception occurs reading
|
* @throws IOException if an I/O exception occurs reading {@code cpu.cfs_period_us} for the control group
|
||||||
* {@code cpu.cfs_period_us} for the control group
|
|
||||||
*/
|
*/
|
||||||
private long getCgroupCpuAcctCpuCfsPeriodMicros(final String controlGroup) throws IOException {
|
private long getCgroupCpuAcctCpuCfsPeriodMicros(final String controlGroup) throws IOException {
|
||||||
return Long.parseLong(readSysFsCgroupCpuAcctCpuCfsPeriod(controlGroup));
|
return Long.parseLong(readSysFsCgroupCpuAcctCpuCfsPeriod(controlGroup));
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Returns the line from {@code cpu.cfs_period_us} for the control
|
* Returns the line from {@code cpu.cfs_period_us} for the control group to which the Elasticsearch process belongs for the {@code cpu}
|
||||||
* group to which the Elasticsearch process belongs for the
|
* subsystem. This line represents the period of time in microseconds for how frequently the control group's access to CPU resources
|
||||||
* {@code cpu} subsystem. This line represents the period of time
|
* will be reallocated.
|
||||||
* in microseconds for how frequently the control group's access to
|
|
||||||
* CPU resources will be reallocated.
|
|
||||||
*
|
*
|
||||||
* @param controlGroup the control group to which the Elasticsearch
|
* @param controlGroup the control group to which the Elasticsearch process belongs for the {@code cpu} subsystem
|
||||||
* process belongs for the {@code cpu}
|
|
||||||
* subsystem
|
|
||||||
* @return the line from {@code cpu.cfs_period_us}
|
* @return the line from {@code cpu.cfs_period_us}
|
||||||
* @throws IOException if an I/O exception occurs reading
|
* @throws IOException if an I/O exception occurs reading {@code cpu.cfs_period_us} for the control group
|
||||||
* {@code cpu.cfs_period_us} for the control group
|
|
||||||
*/
|
*/
|
||||||
@SuppressForbidden(reason = "access /sys/fs/cgroup/cpu")
|
@SuppressForbidden(reason = "access /sys/fs/cgroup/cpu")
|
||||||
String readSysFsCgroupCpuAcctCpuCfsPeriod(final String controlGroup) throws IOException {
|
String readSysFsCgroupCpuAcctCpuCfsPeriod(final String controlGroup) throws IOException {
|
||||||
@ -326,33 +289,25 @@ public class OsProbe {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* The total time in microseconds that all tasks in the
|
* The total time in microseconds that all tasks in the Elasticsearch control group can run during one period as specified by {@code
|
||||||
* Elasticsearch control group can run during one period as
|
* cpu.cfs_period_us}.
|
||||||
* specified by {@code cpu.cfs_period_us}.
|
|
||||||
*
|
*
|
||||||
* @param controlGroup the control group for the Elasticsearch
|
* @param controlGroup the control group for the Elasticsearch process for the {@code cpuacct} subsystem
|
||||||
* process for the {@code cpuacct} subsystem
|
|
||||||
* @return the CFS quota in microseconds
|
* @return the CFS quota in microseconds
|
||||||
* @throws IOException if an I/O exception occurs reading
|
* @throws IOException if an I/O exception occurs reading {@code cpu.cfs_quota_us} for the control group
|
||||||
* {@code cpu.cfs_quota_us} for the control group
|
|
||||||
*/
|
*/
|
||||||
private long getCgroupCpuAcctCpuCfsQuotaMicros(final String controlGroup) throws IOException {
|
private long getCgroupCpuAcctCpuCfsQuotaMicros(final String controlGroup) throws IOException {
|
||||||
return Long.parseLong(readSysFsCgroupCpuAcctCpuAcctCfsQuota(controlGroup));
|
return Long.parseLong(readSysFsCgroupCpuAcctCpuAcctCfsQuota(controlGroup));
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Returns the line from {@code cpu.cfs_quota_us} for the control
|
* Returns the line from {@code cpu.cfs_quota_us} for the control group to which the Elasticsearch process belongs for the {@code cpu}
|
||||||
* group to which the Elasticsearch process belongs for the
|
* subsystem. This line represents the total time in microseconds that all tasks in the control group can run during one period as
|
||||||
* {@code cpu} subsystem. This line represents the total time in
|
* specified by {@code cpu.cfs_period_us}.
|
||||||
* microseconds that all tasks in the control group can run during
|
|
||||||
* one period as specified by {@code cpu.cfs_period_us}.
|
|
||||||
*
|
*
|
||||||
* @param controlGroup the control group to which the Elasticsearch
|
* @param controlGroup the control group to which the Elasticsearch process belongs for the {@code cpu} subsystem
|
||||||
* process belongs for the {@code cpu}
|
|
||||||
* subsystem
|
|
||||||
* @return the line from {@code cpu.cfs_quota_us}
|
* @return the line from {@code cpu.cfs_quota_us}
|
||||||
* @throws IOException if an I/O exception occurs reading
|
* @throws IOException if an I/O exception occurs reading {@code cpu.cfs_quota_us} for the control group
|
||||||
* {@code cpu.cfs_quota_us} for the control group
|
|
||||||
*/
|
*/
|
||||||
@SuppressForbidden(reason = "access /sys/fs/cgroup/cpu")
|
@SuppressForbidden(reason = "access /sys/fs/cgroup/cpu")
|
||||||
String readSysFsCgroupCpuAcctCpuAcctCfsQuota(final String controlGroup) throws IOException {
|
String readSysFsCgroupCpuAcctCpuAcctCfsQuota(final String controlGroup) throws IOException {
|
||||||
@ -360,14 +315,11 @@ public class OsProbe {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* The CPU time statistics for all tasks in the Elasticsearch
|
* The CPU time statistics for all tasks in the Elasticsearch control group.
|
||||||
* control group.
|
|
||||||
*
|
*
|
||||||
* @param controlGroup the control group for the Elasticsearch
|
* @param controlGroup the control group for the Elasticsearch process for the {@code cpuacct} subsystem
|
||||||
* process for the {@code cpuacct} subsystem
|
|
||||||
* @return the CPU time statistics
|
* @return the CPU time statistics
|
||||||
* @throws IOException if an I/O exception occurs reading
|
* @throws IOException if an I/O exception occurs reading {@code cpu.stat} for the control group
|
||||||
* {@code cpu.stat} for the control group
|
|
||||||
*/
|
*/
|
||||||
private OsStats.Cgroup.CpuStat getCgroupCpuAcctCpuStat(final String controlGroup) throws IOException {
|
private OsStats.Cgroup.CpuStat getCgroupCpuAcctCpuStat(final String controlGroup) throws IOException {
|
||||||
final List<String> lines = readSysFsCgroupCpuAcctCpuStat(controlGroup);
|
final List<String> lines = readSysFsCgroupCpuAcctCpuStat(controlGroup);
|
||||||
@ -395,28 +347,20 @@ public class OsProbe {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Returns the lines from {@code cpu.stat} for the control
|
* Returns the lines from {@code cpu.stat} for the control group to which the Elasticsearch process belongs for the {@code cpu}
|
||||||
* group to which the Elasticsearch process belongs for the
|
* subsystem. These lines represent the CPU time statistics and have the form
|
||||||
* {@code cpu} subsystem. These lines represent the CPU time
|
|
||||||
* statistics and have the form
|
|
||||||
* <blockquote><pre>
|
* <blockquote><pre>
|
||||||
* nr_periods \d+
|
* nr_periods \d+
|
||||||
* nr_throttled \d+
|
* nr_throttled \d+
|
||||||
* throttled_time \d+
|
* throttled_time \d+
|
||||||
* </pre></blockquote>
|
* </pre></blockquote>
|
||||||
* where {@code nr_periods} is the number of period intervals
|
* where {@code nr_periods} is the number of period intervals as specified by {@code cpu.cfs_period_us} that have elapsed, {@code
|
||||||
* as specified by {@code cpu.cfs_period_us} that have elapsed,
|
* nr_throttled} is the number of times tasks in the given control group have been throttled, and {@code throttled_time} is the total
|
||||||
* {@code nr_throttled} is the number of times tasks in the given
|
* time in nanoseconds for which tasks in the given control group have been throttled.
|
||||||
* control group have been throttled, and {@code throttled_time} is
|
|
||||||
* the total time in nanoseconds for which tasks in the given
|
|
||||||
* control group have been throttled.
|
|
||||||
*
|
*
|
||||||
* @param controlGroup the control group to which the Elasticsearch
|
* @param controlGroup the control group to which the Elasticsearch process belongs for the {@code cpu} subsystem
|
||||||
* process belongs for the {@code cpu}
|
|
||||||
* subsystem
|
|
||||||
* @return the lines from {@code cpu.stat}
|
* @return the lines from {@code cpu.stat}
|
||||||
* @throws IOException if an I/O exception occurs reading
|
* @throws IOException if an I/O exception occurs reading {@code cpu.stat} for the control group
|
||||||
* {@code cpu.stat} for the control group
|
|
||||||
*/
|
*/
|
||||||
@SuppressForbidden(reason = "access /sys/fs/cgroup/cpu")
|
@SuppressForbidden(reason = "access /sys/fs/cgroup/cpu")
|
||||||
List<String> readSysFsCgroupCpuAcctCpuStat(final String controlGroup) throws IOException {
|
List<String> readSysFsCgroupCpuAcctCpuStat(final String controlGroup) throws IOException {
|
||||||
@ -426,11 +370,10 @@ public class OsProbe {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Checks if cgroup stats are available by checking for the existence of {@code /proc/self/cgroup},
|
* Checks if cgroup stats are available by checking for the existence of {@code /proc/self/cgroup}, {@code /sys/fs/cgroup/cpu}, and
|
||||||
* {@code /sys/fs/cgroup/cpu}, and {@code /sys/fs/cgroup/cpuacct}.
|
* {@code /sys/fs/cgroup/cpuacct}.
|
||||||
*
|
*
|
||||||
* @return {@code true} if the stats are available, otherwise
|
* @return {@code true} if the stats are available, otherwise {@code false}
|
||||||
* {@code false}
|
|
||||||
*/
|
*/
|
||||||
@SuppressForbidden(reason = "access /proc/self/cgroup, /sys/fs/cgroup/cpu, and /sys/fs/cgroup/cpuacct")
|
@SuppressForbidden(reason = "access /proc/self/cgroup, /sys/fs/cgroup/cpu, and /sys/fs/cgroup/cpuacct")
|
||||||
protected boolean areCgroupStatsAvailable() {
|
protected boolean areCgroupStatsAvailable() {
|
||||||
@ -449,8 +392,7 @@ public class OsProbe {
|
|||||||
/**
|
/**
|
||||||
* Basic cgroup stats.
|
* Basic cgroup stats.
|
||||||
*
|
*
|
||||||
* @return basic cgroup stats, or {@code null} if an I/O exception
|
* @return basic cgroup stats, or {@code null} if an I/O exception occurred reading the cgroup stats
|
||||||
* occurred reading the cgroup stats
|
|
||||||
*/
|
*/
|
||||||
private OsStats.Cgroup getCgroup() {
|
private OsStats.Cgroup getCgroup() {
|
||||||
try {
|
try {
|
||||||
@ -514,8 +456,7 @@ public class OsProbe {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Returns a given method of the OperatingSystemMXBean,
|
* Returns a given method of the OperatingSystemMXBean, or null if the method is not found or unavailable.
|
||||||
* or null if the method is not found or unavailable.
|
|
||||||
*/
|
*/
|
||||||
private static Method getMethod(String methodName) {
|
private static Method getMethod(String methodName) {
|
||||||
try {
|
try {
|
||||||
|
@ -88,7 +88,7 @@ public class PercentilesBucketPipelineAggregator extends BucketMetricsPipelineAg
|
|||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
for (int i = 0; i < percents.length; i++) {
|
for (int i = 0; i < percents.length; i++) {
|
||||||
int index = (int)((percents[i] / 100.0) * data.size());
|
int index = (int) Math.round((percents[i] / 100.0) * (data.size() - 1));
|
||||||
percentiles[i] = data.get(index);
|
percentiles[i] = data.get(index);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -33,6 +33,7 @@ import java.util.ArrayList;
|
|||||||
import java.util.Arrays;
|
import java.util.Arrays;
|
||||||
import java.util.Collections;
|
import java.util.Collections;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
|
import java.util.Locale;
|
||||||
import java.util.concurrent.atomic.AtomicBoolean;
|
import java.util.concurrent.atomic.AtomicBoolean;
|
||||||
import java.util.concurrent.atomic.AtomicLong;
|
import java.util.concurrent.atomic.AtomicLong;
|
||||||
import java.util.concurrent.atomic.AtomicReference;
|
import java.util.concurrent.atomic.AtomicReference;
|
||||||
@ -42,6 +43,7 @@ import static org.hamcrest.CoreMatchers.allOf;
|
|||||||
import static org.hamcrest.CoreMatchers.containsString;
|
import static org.hamcrest.CoreMatchers.containsString;
|
||||||
import static org.hamcrest.CoreMatchers.equalTo;
|
import static org.hamcrest.CoreMatchers.equalTo;
|
||||||
import static org.hamcrest.CoreMatchers.instanceOf;
|
import static org.hamcrest.CoreMatchers.instanceOf;
|
||||||
|
import static org.hamcrest.CoreMatchers.is;
|
||||||
import static org.hamcrest.Matchers.hasToString;
|
import static org.hamcrest.Matchers.hasToString;
|
||||||
import static org.hamcrest.Matchers.not;
|
import static org.hamcrest.Matchers.not;
|
||||||
import static org.mockito.Mockito.mock;
|
import static org.mockito.Mockito.mock;
|
||||||
@ -530,6 +532,75 @@ public class BootstrapCheckTests extends ESTestCase {
|
|||||||
consumer.accept(e);
|
consumer.accept(e);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public void testG1GCCheck() throws NodeValidationException {
|
||||||
|
final AtomicBoolean isG1GCEnabled = new AtomicBoolean(true);
|
||||||
|
final AtomicBoolean isJava8 = new AtomicBoolean(true);
|
||||||
|
final AtomicReference<String> jvmVersion =
|
||||||
|
new AtomicReference<>(String.format(Locale.ROOT, "25.%d-b%d", randomIntBetween(0, 39), randomIntBetween(1, 128)));
|
||||||
|
final BootstrapCheck.G1GCCheck oracleCheck = new BootstrapCheck.G1GCCheck() {
|
||||||
|
|
||||||
|
@Override
|
||||||
|
String jvmVendor() {
|
||||||
|
return "Oracle Corporation";
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
boolean isG1GCEnabled() {
|
||||||
|
return isG1GCEnabled.get();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
String jvmVersion() {
|
||||||
|
return jvmVersion.get();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
boolean isJava8() {
|
||||||
|
return isJava8.get();
|
||||||
|
}
|
||||||
|
|
||||||
|
};
|
||||||
|
|
||||||
|
final NodeValidationException e =
|
||||||
|
expectThrows(
|
||||||
|
NodeValidationException.class,
|
||||||
|
() -> BootstrapCheck.check(true, Collections.singletonList(oracleCheck), "testG1GCCheck"));
|
||||||
|
assertThat(
|
||||||
|
e.getMessage(),
|
||||||
|
containsString(
|
||||||
|
"JVM version [" + jvmVersion.get() + "] can cause data corruption when used with G1GC; upgrade to at least Java 8u40"));
|
||||||
|
|
||||||
|
// if G1GC is disabled, nothing should happen
|
||||||
|
isG1GCEnabled.set(false);
|
||||||
|
BootstrapCheck.check(true, Collections.singletonList(oracleCheck), "testG1GCCheck");
|
||||||
|
|
||||||
|
// if on or after update 40, nothing should happen independent of whether or not G1GC is enabled
|
||||||
|
isG1GCEnabled.set(randomBoolean());
|
||||||
|
jvmVersion.set(String.format(Locale.ROOT, "25.%d-b%d", randomIntBetween(40, 112), randomIntBetween(1, 128)));
|
||||||
|
BootstrapCheck.check(true, Collections.singletonList(oracleCheck), "testG1GCCheck");
|
||||||
|
|
||||||
|
final BootstrapCheck.G1GCCheck nonOracleCheck = new BootstrapCheck.G1GCCheck() {
|
||||||
|
|
||||||
|
@Override
|
||||||
|
String jvmVendor() {
|
||||||
|
return randomAsciiOfLength(8);
|
||||||
|
}
|
||||||
|
|
||||||
|
};
|
||||||
|
|
||||||
|
// if not on an Oracle JVM, nothing should happen
|
||||||
|
BootstrapCheck.check(true, Collections.singletonList(nonOracleCheck), "testG1GCCheck");
|
||||||
|
|
||||||
|
final BootstrapCheck.G1GCCheck nonJava8Check = new BootstrapCheck.G1GCCheck() {
|
||||||
|
@Override
|
||||||
|
boolean isJava8() {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
// if not java 8, nothing should happen
|
||||||
|
BootstrapCheck.check(true, Collections.singletonList(nonJava8Check), "testG1GCCheck");
|
||||||
|
}
|
||||||
|
|
||||||
public void testAlwaysEnforcedChecks() {
|
public void testAlwaysEnforcedChecks() {
|
||||||
final BootstrapCheck.Check check = new BootstrapCheck.Check() {
|
final BootstrapCheck.Check check = new BootstrapCheck.Check() {
|
||||||
@Override
|
@Override
|
||||||
|
@ -34,6 +34,7 @@ import org.elasticsearch.test.ESIntegTestCase;
|
|||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
import java.util.Arrays;
|
import java.util.Arrays;
|
||||||
import java.util.Collections;
|
import java.util.Collections;
|
||||||
|
import java.util.Iterator;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
|
|
||||||
import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
|
import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
|
||||||
@ -51,7 +52,7 @@ import static org.hamcrest.core.IsNull.notNullValue;
|
|||||||
public class PercentilesBucketIT extends ESIntegTestCase {
|
public class PercentilesBucketIT extends ESIntegTestCase {
|
||||||
|
|
||||||
private static final String SINGLE_VALUED_FIELD_NAME = "l_value";
|
private static final String SINGLE_VALUED_FIELD_NAME = "l_value";
|
||||||
private static final double[] PERCENTS = {1.0, 25.0, 50.0, 75.0, 99.0};
|
private static final double[] PERCENTS = {0.0, 1.0, 25.0, 50.0, 75.0, 99.0, 100.0};
|
||||||
static int numDocs;
|
static int numDocs;
|
||||||
static int interval;
|
static int interval;
|
||||||
static int minRandomValue;
|
static int minRandomValue;
|
||||||
@ -123,11 +124,7 @@ public class PercentilesBucketIT extends ESIntegTestCase {
|
|||||||
PercentilesBucket percentilesBucketValue = response.getAggregations().get("percentiles_bucket");
|
PercentilesBucket percentilesBucketValue = response.getAggregations().get("percentiles_bucket");
|
||||||
assertThat(percentilesBucketValue, notNullValue());
|
assertThat(percentilesBucketValue, notNullValue());
|
||||||
assertThat(percentilesBucketValue.getName(), equalTo("percentiles_bucket"));
|
assertThat(percentilesBucketValue.getName(), equalTo("percentiles_bucket"));
|
||||||
for (Double p : PERCENTS) {
|
assertPercentileBucket(PERCENTS, values, percentilesBucketValue);
|
||||||
double expected = values[(int)((p / 100) * values.length)];
|
|
||||||
assertThat(percentilesBucketValue.percentile(p), equalTo(expected));
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
public void testDocCountAsSubAgg() throws Exception {
|
public void testDocCountAsSubAgg() throws Exception {
|
||||||
@ -174,10 +171,7 @@ public class PercentilesBucketIT extends ESIntegTestCase {
|
|||||||
PercentilesBucket percentilesBucketValue = termsBucket.getAggregations().get("percentiles_bucket");
|
PercentilesBucket percentilesBucketValue = termsBucket.getAggregations().get("percentiles_bucket");
|
||||||
assertThat(percentilesBucketValue, notNullValue());
|
assertThat(percentilesBucketValue, notNullValue());
|
||||||
assertThat(percentilesBucketValue.getName(), equalTo("percentiles_bucket"));
|
assertThat(percentilesBucketValue.getName(), equalTo("percentiles_bucket"));
|
||||||
for (Double p : PERCENTS) {
|
assertPercentileBucket(PERCENTS, values, percentilesBucketValue);
|
||||||
double expected = values[(int)((p / 100) * values.length)];
|
|
||||||
assertThat(percentilesBucketValue.percentile(p), equalTo(expected));
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -212,10 +206,7 @@ public class PercentilesBucketIT extends ESIntegTestCase {
|
|||||||
PercentilesBucket percentilesBucketValue = response.getAggregations().get("percentiles_bucket");
|
PercentilesBucket percentilesBucketValue = response.getAggregations().get("percentiles_bucket");
|
||||||
assertThat(percentilesBucketValue, notNullValue());
|
assertThat(percentilesBucketValue, notNullValue());
|
||||||
assertThat(percentilesBucketValue.getName(), equalTo("percentiles_bucket"));
|
assertThat(percentilesBucketValue.getName(), equalTo("percentiles_bucket"));
|
||||||
for (Double p : PERCENTS) {
|
assertPercentileBucket(PERCENTS, values, percentilesBucketValue);
|
||||||
double expected = values[(int)((p / 100) * values.length)];
|
|
||||||
assertThat(percentilesBucketValue.percentile(p), equalTo(expected));
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
public void testMetricTopLevelDefaultPercents() throws Exception {
|
public void testMetricTopLevelDefaultPercents() throws Exception {
|
||||||
@ -248,11 +239,7 @@ public class PercentilesBucketIT extends ESIntegTestCase {
|
|||||||
PercentilesBucket percentilesBucketValue = response.getAggregations().get("percentiles_bucket");
|
PercentilesBucket percentilesBucketValue = response.getAggregations().get("percentiles_bucket");
|
||||||
assertThat(percentilesBucketValue, notNullValue());
|
assertThat(percentilesBucketValue, notNullValue());
|
||||||
assertThat(percentilesBucketValue.getName(), equalTo("percentiles_bucket"));
|
assertThat(percentilesBucketValue.getName(), equalTo("percentiles_bucket"));
|
||||||
for (Percentile p : percentilesBucketValue) {
|
assertPercentileBucket(values, percentilesBucketValue);
|
||||||
double expected = values[(int)((p.getPercent() / 100) * values.length)];
|
|
||||||
assertThat(percentilesBucketValue.percentile(p.getPercent()), equalTo(expected));
|
|
||||||
assertThat(p.getValue(), equalTo(expected));
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
public void testMetricAsSubAgg() throws Exception {
|
public void testMetricAsSubAgg() throws Exception {
|
||||||
@ -304,10 +291,7 @@ public class PercentilesBucketIT extends ESIntegTestCase {
|
|||||||
PercentilesBucket percentilesBucketValue = termsBucket.getAggregations().get("percentiles_bucket");
|
PercentilesBucket percentilesBucketValue = termsBucket.getAggregations().get("percentiles_bucket");
|
||||||
assertThat(percentilesBucketValue, notNullValue());
|
assertThat(percentilesBucketValue, notNullValue());
|
||||||
assertThat(percentilesBucketValue.getName(), equalTo("percentiles_bucket"));
|
assertThat(percentilesBucketValue.getName(), equalTo("percentiles_bucket"));
|
||||||
for (Double p : PERCENTS) {
|
assertPercentileBucket(PERCENTS, values.stream().mapToDouble(Double::doubleValue).toArray(), percentilesBucketValue);
|
||||||
double expected = values.get((int) ((p / 100) * values.size()));
|
|
||||||
assertThat(percentilesBucketValue.percentile(p), equalTo(expected));
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -361,10 +345,7 @@ public class PercentilesBucketIT extends ESIntegTestCase {
|
|||||||
PercentilesBucket percentilesBucketValue = termsBucket.getAggregations().get("percentiles_bucket");
|
PercentilesBucket percentilesBucketValue = termsBucket.getAggregations().get("percentiles_bucket");
|
||||||
assertThat(percentilesBucketValue, notNullValue());
|
assertThat(percentilesBucketValue, notNullValue());
|
||||||
assertThat(percentilesBucketValue.getName(), equalTo("percentiles_bucket"));
|
assertThat(percentilesBucketValue.getName(), equalTo("percentiles_bucket"));
|
||||||
for (Double p : PERCENTS) {
|
assertPercentileBucket(PERCENTS, values, percentilesBucketValue);
|
||||||
double expected = values[(int)((p / 100) * values.length)];
|
|
||||||
assertThat(percentilesBucketValue.percentile(p), equalTo(expected));
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -489,7 +470,7 @@ public class PercentilesBucketIT extends ESIntegTestCase {
|
|||||||
.subAggregation(
|
.subAggregation(
|
||||||
histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval)
|
histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval)
|
||||||
.extendedBounds(minRandomValue, maxRandomValue))
|
.extendedBounds(minRandomValue, maxRandomValue))
|
||||||
.subAggregation(percentilesBucket("percentile_histo_bucket", "histo>_count")))
|
.subAggregation(percentilesBucket("percentile_histo_bucket", "histo>_count").percents(PERCENTS)))
|
||||||
.addAggregation(percentilesBucket("percentile_terms_bucket", "terms>percentile_histo_bucket.50")
|
.addAggregation(percentilesBucket("percentile_terms_bucket", "terms>percentile_histo_bucket.50")
|
||||||
.percents(PERCENTS)).execute().actionGet();
|
.percents(PERCENTS)).execute().actionGet();
|
||||||
|
|
||||||
@ -525,10 +506,7 @@ public class PercentilesBucketIT extends ESIntegTestCase {
|
|||||||
PercentilesBucket percentilesBucketValue = termsBucket.getAggregations().get("percentile_histo_bucket");
|
PercentilesBucket percentilesBucketValue = termsBucket.getAggregations().get("percentile_histo_bucket");
|
||||||
assertThat(percentilesBucketValue, notNullValue());
|
assertThat(percentilesBucketValue, notNullValue());
|
||||||
assertThat(percentilesBucketValue.getName(), equalTo("percentile_histo_bucket"));
|
assertThat(percentilesBucketValue.getName(), equalTo("percentile_histo_bucket"));
|
||||||
for (Double p : PERCENTS) {
|
assertPercentileBucket(PERCENTS, innerValues, percentilesBucketValue);
|
||||||
double expected = innerValues[(int)((p / 100) * innerValues.length)];
|
|
||||||
assertThat(percentilesBucketValue.percentile(p), equalTo(expected));
|
|
||||||
}
|
|
||||||
values[i] = percentilesBucketValue.percentile(50.0);
|
values[i] = percentilesBucketValue.percentile(50.0);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -537,10 +515,7 @@ public class PercentilesBucketIT extends ESIntegTestCase {
|
|||||||
PercentilesBucket percentilesBucketValue = response.getAggregations().get("percentile_terms_bucket");
|
PercentilesBucket percentilesBucketValue = response.getAggregations().get("percentile_terms_bucket");
|
||||||
assertThat(percentilesBucketValue, notNullValue());
|
assertThat(percentilesBucketValue, notNullValue());
|
||||||
assertThat(percentilesBucketValue.getName(), equalTo("percentile_terms_bucket"));
|
assertThat(percentilesBucketValue.getName(), equalTo("percentile_terms_bucket"));
|
||||||
for (Double p : PERCENTS) {
|
assertPercentileBucket(PERCENTS, values, percentilesBucketValue);
|
||||||
double expected = values[(int)((p / 100) * values.length)];
|
|
||||||
assertThat(percentilesBucketValue.percentile(p), equalTo(expected));
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
public void testNestedWithDecimal() throws Exception {
|
public void testNestedWithDecimal() throws Exception {
|
||||||
@ -591,10 +566,7 @@ public class PercentilesBucketIT extends ESIntegTestCase {
|
|||||||
PercentilesBucket percentilesBucketValue = termsBucket.getAggregations().get("percentile_histo_bucket");
|
PercentilesBucket percentilesBucketValue = termsBucket.getAggregations().get("percentile_histo_bucket");
|
||||||
assertThat(percentilesBucketValue, notNullValue());
|
assertThat(percentilesBucketValue, notNullValue());
|
||||||
assertThat(percentilesBucketValue.getName(), equalTo("percentile_histo_bucket"));
|
assertThat(percentilesBucketValue.getName(), equalTo("percentile_histo_bucket"));
|
||||||
for (Double p : percent) {
|
assertPercentileBucket(innerValues, percentilesBucketValue);
|
||||||
double expected = innerValues[(int)((p / 100) * innerValues.length)];
|
|
||||||
assertThat(percentilesBucketValue.percentile(p), equalTo(expected));
|
|
||||||
}
|
|
||||||
values[i] = percentilesBucketValue.percentile(99.9);
|
values[i] = percentilesBucketValue.percentile(99.9);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -608,4 +580,22 @@ public class PercentilesBucketIT extends ESIntegTestCase {
|
|||||||
assertThat(percentilesBucketValue.percentile(p), equalTo(expected));
|
assertThat(percentilesBucketValue.percentile(p), equalTo(expected));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private void assertPercentileBucket(double[] values, PercentilesBucket percentiles) {
|
||||||
|
for (Percentile percentile : percentiles) {
|
||||||
|
assertEquals(percentiles.percentile(percentile.getPercent()), percentile.getValue(), 0d);
|
||||||
|
int index = (int) Math.round((percentile.getPercent() / 100.0) * (values.length - 1));
|
||||||
|
assertThat(percentile.getValue(), equalTo(values[index]));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private void assertPercentileBucket(double[] percents, double[] values, PercentilesBucket percentiles) {
|
||||||
|
Iterator<Percentile> it = percentiles.iterator();
|
||||||
|
for (int i = 0; i < percents.length; ++i) {
|
||||||
|
assertTrue(it.hasNext());
|
||||||
|
assertEquals(percents[i], it.next().getPercent(), 0d);
|
||||||
|
}
|
||||||
|
assertFalse(it.hasNext());
|
||||||
|
assertPercentileBucket(values, percentiles);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
@ -34,7 +34,6 @@ import org.elasticsearch.action.admin.cluster.snapshots.status.SnapshotIndexStat
|
|||||||
import org.elasticsearch.action.admin.cluster.snapshots.status.SnapshotStatus;
|
import org.elasticsearch.action.admin.cluster.snapshots.status.SnapshotStatus;
|
||||||
import org.elasticsearch.action.admin.cluster.snapshots.status.SnapshotsStatusResponse;
|
import org.elasticsearch.action.admin.cluster.snapshots.status.SnapshotsStatusResponse;
|
||||||
import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse;
|
import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse;
|
||||||
import org.elasticsearch.action.admin.cluster.storedscripts.GetStoredScriptRequest;
|
|
||||||
import org.elasticsearch.action.admin.cluster.storedscripts.GetStoredScriptResponse;
|
import org.elasticsearch.action.admin.cluster.storedscripts.GetStoredScriptResponse;
|
||||||
import org.elasticsearch.action.admin.indices.flush.FlushResponse;
|
import org.elasticsearch.action.admin.indices.flush.FlushResponse;
|
||||||
import org.elasticsearch.action.admin.indices.settings.get.GetSettingsResponse;
|
import org.elasticsearch.action.admin.indices.settings.get.GetSettingsResponse;
|
||||||
@ -58,6 +57,7 @@ import org.elasticsearch.cluster.metadata.MetaDataIndexStateService;
|
|||||||
import org.elasticsearch.cluster.routing.IndexRoutingTable;
|
import org.elasticsearch.cluster.routing.IndexRoutingTable;
|
||||||
import org.elasticsearch.cluster.service.ClusterService;
|
import org.elasticsearch.cluster.service.ClusterService;
|
||||||
import org.elasticsearch.common.Priority;
|
import org.elasticsearch.common.Priority;
|
||||||
|
import org.elasticsearch.common.Strings;
|
||||||
import org.elasticsearch.common.bytes.BytesArray;
|
import org.elasticsearch.common.bytes.BytesArray;
|
||||||
import org.elasticsearch.common.bytes.BytesReference;
|
import org.elasticsearch.common.bytes.BytesReference;
|
||||||
import org.elasticsearch.common.collect.ImmutableOpenMap;
|
import org.elasticsearch.common.collect.ImmutableOpenMap;
|
||||||
@ -77,7 +77,6 @@ import org.elasticsearch.repositories.RepositoriesService;
|
|||||||
import org.elasticsearch.repositories.RepositoryData;
|
import org.elasticsearch.repositories.RepositoryData;
|
||||||
import org.elasticsearch.repositories.RepositoryException;
|
import org.elasticsearch.repositories.RepositoryException;
|
||||||
import org.elasticsearch.script.MockScriptEngine;
|
import org.elasticsearch.script.MockScriptEngine;
|
||||||
import org.elasticsearch.script.ScriptService;
|
|
||||||
import org.elasticsearch.script.StoredScriptsIT;
|
import org.elasticsearch.script.StoredScriptsIT;
|
||||||
import org.elasticsearch.snapshots.mockstore.MockRepository;
|
import org.elasticsearch.snapshots.mockstore.MockRepository;
|
||||||
import org.elasticsearch.test.junit.annotations.TestLogging;
|
import org.elasticsearch.test.junit.annotations.TestLogging;
|
||||||
@ -101,7 +100,6 @@ import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF
|
|||||||
import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS;
|
import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS;
|
||||||
import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
|
import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
|
||||||
import static org.elasticsearch.index.IndexSettings.INDEX_REFRESH_INTERVAL_SETTING;
|
import static org.elasticsearch.index.IndexSettings.INDEX_REFRESH_INTERVAL_SETTING;
|
||||||
import static org.elasticsearch.index.query.QueryBuilders.boolQuery;
|
|
||||||
import static org.elasticsearch.index.query.QueryBuilders.matchQuery;
|
import static org.elasticsearch.index.query.QueryBuilders.matchQuery;
|
||||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
|
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
|
||||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAliasesExist;
|
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAliasesExist;
|
||||||
@ -2505,8 +2503,28 @@ public class SharedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTestCas
|
|||||||
}
|
}
|
||||||
refresh();
|
refresh();
|
||||||
|
|
||||||
|
// make sure we return only the in-progress snapshot when taking the first snapshot on a clean repository
|
||||||
|
// take initial snapshot with a block, making sure we only get 1 in-progress snapshot returned
|
||||||
|
// block a node so the create snapshot operation can remain in progress
|
||||||
|
final String initialBlockedNode = blockNodeWithIndex(repositoryName, indexName);
|
||||||
|
ListenableActionFuture<CreateSnapshotResponse> responseListener =
|
||||||
|
client.admin().cluster().prepareCreateSnapshot(repositoryName, "snap-on-empty-repo")
|
||||||
|
.setWaitForCompletion(false)
|
||||||
|
.setIndices(indexName)
|
||||||
|
.execute();
|
||||||
|
waitForBlock(initialBlockedNode, repositoryName, TimeValue.timeValueSeconds(60)); // wait for block to kick in
|
||||||
|
getSnapshotsResponse = client.admin().cluster()
|
||||||
|
.prepareGetSnapshots("test-repo")
|
||||||
|
.setSnapshots(randomFrom("_all", "_current", "snap-on-*", "*-on-empty-repo", "snap-on-empty-repo"))
|
||||||
|
.get();
|
||||||
|
assertEquals(1, getSnapshotsResponse.getSnapshots().size());
|
||||||
|
assertEquals("snap-on-empty-repo", getSnapshotsResponse.getSnapshots().get(0).snapshotId().getName());
|
||||||
|
unblockNode(repositoryName, initialBlockedNode); // unblock node
|
||||||
|
responseListener.actionGet(TimeValue.timeValueMillis(10000L)); // timeout after 10 seconds
|
||||||
|
client.admin().cluster().prepareDeleteSnapshot(repositoryName, "snap-on-empty-repo").get();
|
||||||
|
|
||||||
final int numSnapshots = randomIntBetween(1, 3) + 1;
|
final int numSnapshots = randomIntBetween(1, 3) + 1;
|
||||||
logger.info("--> take {} snapshot(s)", numSnapshots);
|
logger.info("--> take {} snapshot(s)", numSnapshots - 1);
|
||||||
final String[] snapshotNames = new String[numSnapshots];
|
final String[] snapshotNames = new String[numSnapshots];
|
||||||
for (int i = 0; i < numSnapshots - 1; i++) {
|
for (int i = 0; i < numSnapshots - 1; i++) {
|
||||||
final String snapshotName = randomAsciiOfLength(8).toLowerCase(Locale.ROOT);
|
final String snapshotName = randomAsciiOfLength(8).toLowerCase(Locale.ROOT);
|
||||||
@ -2538,9 +2556,19 @@ public class SharedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTestCas
|
|||||||
|
|
||||||
logger.info("--> get all snapshots with a current in-progress");
|
logger.info("--> get all snapshots with a current in-progress");
|
||||||
// with ignore unavailable set to true, should not throw an exception
|
// with ignore unavailable set to true, should not throw an exception
|
||||||
|
final List<String> snapshotsToGet = new ArrayList<>();
|
||||||
|
if (randomBoolean()) {
|
||||||
|
// use _current plus the individual names of the finished snapshots
|
||||||
|
snapshotsToGet.add("_current");
|
||||||
|
for (int i = 0; i < numSnapshots - 1; i++) {
|
||||||
|
snapshotsToGet.add(snapshotNames[i]);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
snapshotsToGet.add("_all");
|
||||||
|
}
|
||||||
getSnapshotsResponse = client.admin().cluster()
|
getSnapshotsResponse = client.admin().cluster()
|
||||||
.prepareGetSnapshots(repositoryName)
|
.prepareGetSnapshots(repositoryName)
|
||||||
.addSnapshots("_all")
|
.setSnapshots(snapshotsToGet.toArray(Strings.EMPTY_ARRAY))
|
||||||
.get();
|
.get();
|
||||||
List<String> sortedNames = Arrays.asList(snapshotNames);
|
List<String> sortedNames = Arrays.asList(snapshotNames);
|
||||||
Collections.sort(sortedNames);
|
Collections.sort(sortedNames);
|
||||||
|
@ -39,10 +39,6 @@ buildscript {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// this is common configuration for distributions, but we also add it here for the license check to use
|
|
||||||
ext.dependencyFiles = project(':core').configurations.runtime.copyRecursive()
|
|
||||||
|
|
||||||
|
|
||||||
/*****************************************************************************
|
/*****************************************************************************
|
||||||
* Modules *
|
* Modules *
|
||||||
*****************************************************************************/
|
*****************************************************************************/
|
||||||
@ -146,7 +142,7 @@ subprojects {
|
|||||||
libFiles = copySpec {
|
libFiles = copySpec {
|
||||||
into 'lib'
|
into 'lib'
|
||||||
from project(':core').jar
|
from project(':core').jar
|
||||||
from project(':distribution').dependencyFiles
|
from project(':core').configurations.runtime
|
||||||
}
|
}
|
||||||
|
|
||||||
modulesFiles = copySpec {
|
modulesFiles = copySpec {
|
||||||
@ -438,19 +434,6 @@ configure(subprojects.findAll { ['deb', 'rpm'].contains(it.name) }) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: dependency checks should really be when building the jar itself, which would remove the need
|
|
||||||
// for this hackery and instead we can do this inside the BuildPlugin
|
|
||||||
task dependencyLicenses(type: DependencyLicensesTask) {
|
|
||||||
dependsOn = [dependencyFiles]
|
|
||||||
dependencies = dependencyFiles
|
|
||||||
mapping from: /lucene-.*/, to: 'lucene'
|
|
||||||
mapping from: /jackson-.*/, to: 'jackson'
|
|
||||||
}
|
|
||||||
task check(group: 'Verification', description: 'Runs all checks.', dependsOn: dependencyLicenses) {} // dummy task!
|
|
||||||
task updateShas(type: UpdateShasTask) {
|
|
||||||
parentTask = dependencyLicenses
|
|
||||||
}
|
|
||||||
|
|
||||||
task run(type: RunTask) {
|
task run(type: RunTask) {
|
||||||
distribution = 'zip'
|
distribution = 'zip'
|
||||||
}
|
}
|
||||||
|
@ -137,7 +137,7 @@ case "$1" in
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
# Start Daemon
|
# Start Daemon
|
||||||
start-stop-daemon -d $ES_HOME --start -b --user "$ES_USER" -c "$ES_USER" --pidfile "$PID_FILE" --exec $DAEMON -- $DAEMON_OPTS
|
start-stop-daemon -d $ES_HOME --start --user "$ES_USER" -c "$ES_USER" --pidfile "$PID_FILE" --exec $DAEMON -- $DAEMON_OPTS
|
||||||
return=$?
|
return=$?
|
||||||
if [ $return -eq 0 ]; then
|
if [ $return -eq 0 ]; then
|
||||||
i=0
|
i=0
|
||||||
|
@ -35,6 +35,6 @@ public class DebClientYamlTestSuiteIT extends ESClientYamlSuiteTestCase {
|
|||||||
|
|
||||||
@ParametersFactory
|
@ParametersFactory
|
||||||
public static Iterable<Object[]> parameters() throws IOException, ClientYamlTestParseException {
|
public static Iterable<Object[]> parameters() throws IOException, ClientYamlTestParseException {
|
||||||
return createParameters(0, 1);
|
return createParameters();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -35,6 +35,6 @@ public class IntegTestZipClientYamlTestSuiteIT extends ESClientYamlSuiteTestCase
|
|||||||
|
|
||||||
@ParametersFactory
|
@ParametersFactory
|
||||||
public static Iterable<Object[]> parameters() throws IOException, ClientYamlTestParseException {
|
public static Iterable<Object[]> parameters() throws IOException, ClientYamlTestParseException {
|
||||||
return createParameters(0, 1);
|
return createParameters();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -18,7 +18,7 @@
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
task buildRpm(type: Rpm) {
|
task buildRpm(type: Rpm) {
|
||||||
dependsOn dependencyFiles, preparePackagingFiles
|
dependsOn preparePackagingFiles
|
||||||
baseName 'elasticsearch' // this is what pom generation uses for artifactId
|
baseName 'elasticsearch' // this is what pom generation uses for artifactId
|
||||||
// Follow elasticsearch's rpm file naming convention
|
// Follow elasticsearch's rpm file naming convention
|
||||||
archiveName "${packageName}-${project.version}.rpm"
|
archiveName "${packageName}-${project.version}.rpm"
|
||||||
|
@ -35,6 +35,6 @@ public class RpmClientYamlTestSuiteIT extends ESClientYamlSuiteTestCase {
|
|||||||
|
|
||||||
@ParametersFactory
|
@ParametersFactory
|
||||||
public static Iterable<Object[]> parameters() throws IOException, ClientYamlTestParseException {
|
public static Iterable<Object[]> parameters() throws IOException, ClientYamlTestParseException {
|
||||||
return createParameters(0, 1);
|
return createParameters();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -35,6 +35,6 @@ public class TarClientYamlTestSuiteIT extends ESClientYamlSuiteTestCase {
|
|||||||
|
|
||||||
@ParametersFactory
|
@ParametersFactory
|
||||||
public static Iterable<Object[]> parameters() throws IOException, ClientYamlTestParseException {
|
public static Iterable<Object[]> parameters() throws IOException, ClientYamlTestParseException {
|
||||||
return createParameters(0, 1);
|
return createParameters();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -35,6 +35,6 @@ public class ZipClientYamlTestSuiteIT extends ESClientYamlSuiteTestCase {
|
|||||||
|
|
||||||
@ParametersFactory
|
@ParametersFactory
|
||||||
public static Iterable<Object[]> parameters() throws IOException, ClientYamlTestParseException {
|
public static Iterable<Object[]> parameters() throws IOException, ClientYamlTestParseException {
|
||||||
return createParameters(0, 1);
|
return createParameters();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -56,7 +56,7 @@ cloud:
|
|||||||
type: pkcs12
|
type: pkcs12
|
||||||
|
|
||||||
discovery:
|
discovery:
|
||||||
type: azure
|
zen.hosts_provider: azure
|
||||||
----
|
----
|
||||||
|
|
||||||
[IMPORTANT]
|
[IMPORTANT]
|
||||||
|
@ -139,7 +139,7 @@ environments). Here is a simple sample configuration:
|
|||||||
[source,yaml]
|
[source,yaml]
|
||||||
----
|
----
|
||||||
discovery:
|
discovery:
|
||||||
type: ec2
|
zen.hosts_provider: ec2
|
||||||
----
|
----
|
||||||
|
|
||||||
You must also set `cloud.aws.region` if you are not using default AWS region. See <<discovery-ec2-usage-region>> for details.
|
You must also set `cloud.aws.region` if you are not using default AWS region. See <<discovery-ec2-usage-region>> for details.
|
||||||
|
@ -39,11 +39,15 @@ The node must be stopped before removing the plugin.
|
|||||||
|
|
||||||
The file-based discovery plugin provides the ability to specify the
|
The file-based discovery plugin provides the ability to specify the
|
||||||
unicast hosts list through a simple `unicast_hosts.txt` file that can
|
unicast hosts list through a simple `unicast_hosts.txt` file that can
|
||||||
be dynamically updated at any time. The discovery type for this plugin
|
be dynamically updated at any time. To enable, add the following in `elasticsearch.yml`:
|
||||||
is still the default `zen` plugin, so no changes are required to the
|
|
||||||
`elasticsearch.yml` config file. This plugin simply provides a facility
|
[source,yaml]
|
||||||
to supply the unicast hosts list for zen discovery through an external
|
----
|
||||||
file that can be updated at any time by a side process.
|
discovery.zen.hosts_provider: file
|
||||||
|
----
|
||||||
|
|
||||||
|
This plugin simply provides a facility to supply the unicast hosts list for
|
||||||
|
zen discovery through an external file that can be updated at any time by a side process.
|
||||||
|
|
||||||
For example, this gives a convenient mechanism for an Elasticsearch instance
|
For example, this gives a convenient mechanism for an Elasticsearch instance
|
||||||
that is run in docker containers to be dynamically supplied a list of IP
|
that is run in docker containers to be dynamically supplied a list of IP
|
||||||
|
@ -46,7 +46,7 @@ cloud:
|
|||||||
project_id: <your-google-project-id>
|
project_id: <your-google-project-id>
|
||||||
zone: <your-zone>
|
zone: <your-zone>
|
||||||
discovery:
|
discovery:
|
||||||
type: gce
|
zen.hosts_provider: gce
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
|
|
||||||
The following gce settings (prefixed with `cloud.gce`) are supported:
|
The following gce settings (prefixed with `cloud.gce`) are supported:
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
[[search-aggregations-bucket-diversified-sampler-aggregation]]
|
[[search-aggregations-bucket-diversified-sampler-aggregation]]
|
||||||
=== Sampler Aggregation
|
=== Diversified Sampler Aggregation
|
||||||
|
|
||||||
experimental[]
|
experimental[]
|
||||||
|
|
||||||
@ -24,7 +24,7 @@ Example:
|
|||||||
},
|
},
|
||||||
"aggs": {
|
"aggs": {
|
||||||
"sample": {
|
"sample": {
|
||||||
"sampler": {
|
"diversified_sampler": {
|
||||||
"shard_size": 200,
|
"shard_size": 200,
|
||||||
"field" : "user.id"
|
"field" : "user.id"
|
||||||
},
|
},
|
||||||
|
@ -65,7 +65,7 @@ POST /sales/_search
|
|||||||
|
|
||||||
<1> `buckets_path` instructs this percentiles_bucket aggregation that we want to calculate percentiles for
|
<1> `buckets_path` instructs this percentiles_bucket aggregation that we want to calculate percentiles for
|
||||||
the `sales` aggregation in the `sales_per_month` date histogram.
|
the `sales` aggregation in the `sales_per_month` date histogram.
|
||||||
<2> `percents` specifies which percentiles we wish to calculate, in this case, the 25th, 50th and 75th percentil
|
<2> `percents` specifies which percentiles we wish to calculate, in this case, the 25th, 50th and 75th percentiles.
|
||||||
|
|
||||||
And the following may be the response:
|
And the following may be the response:
|
||||||
|
|
||||||
@ -107,7 +107,7 @@ And the following may be the response:
|
|||||||
},
|
},
|
||||||
"percentiles_monthly_sales": {
|
"percentiles_monthly_sales": {
|
||||||
"values" : {
|
"values" : {
|
||||||
"25.0": 60.0,
|
"25.0": 375.0,
|
||||||
"50.0": 375.0,
|
"50.0": 375.0,
|
||||||
"75.0": 550.0
|
"75.0": 550.0
|
||||||
}
|
}
|
||||||
|
@ -161,3 +161,11 @@ enabled. This check is always enforced. To pass this check do not enable
|
|||||||
use the JVM flag `ExitOnOutOfMemoryError`. While this does not have the
|
use the JVM flag `ExitOnOutOfMemoryError`. While this does not have the
|
||||||
full capabilities of `OnError` nor `OnOutOfMemoryError`, arbitrary
|
full capabilities of `OnError` nor `OnOutOfMemoryError`, arbitrary
|
||||||
forking will not be supported with seccomp enabled.
|
forking will not be supported with seccomp enabled.
|
||||||
|
|
||||||
|
=== G1GC check
|
||||||
|
|
||||||
|
Early versions of the HotSpot JVM that shipped with JDK 8 are known to have
|
||||||
|
issues that can lead to index corruption when the G1GC collector is enabled.
|
||||||
|
The versions impacted are those earlier than the version of HotSpot that
|
||||||
|
shipped with JDK 8u40. The G1GC check detects these early versions of the
|
||||||
|
HotSpot JVM.
|
||||||
|
@ -184,6 +184,3 @@ discovery.zen.minimum_master_nodes: 2
|
|||||||
IMPORTANT: If `discovery.zen.minimum_master_nodes` is not set when
|
IMPORTANT: If `discovery.zen.minimum_master_nodes` is not set when
|
||||||
Elasticsearch is running in <<dev-vs-prod,production mode>>, an exception will
|
Elasticsearch is running in <<dev-vs-prod,production mode>>, an exception will
|
||||||
be thrown which will prevent the node from starting.
|
be thrown which will prevent the node from starting.
|
||||||
|
|
||||||
[float]
|
|
||||||
[[node.max_local_storage_nodes]]
|
|
||||||
|
@ -37,7 +37,7 @@ public class DocsClientYamlTestSuiteIT extends ESClientYamlSuiteTestCase {
|
|||||||
|
|
||||||
@ParametersFactory
|
@ParametersFactory
|
||||||
public static Iterable<Object[]> parameters() throws IOException, ClientYamlTestParseException {
|
public static Iterable<Object[]> parameters() throws IOException, ClientYamlTestParseException {
|
||||||
return ESClientYamlSuiteTestCase.createParameters(0, 1);
|
return ESClientYamlSuiteTestCase.createParameters();
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
@ -34,6 +34,6 @@ public class MatrixStatsClientYamlTestSuiteIT extends ESClientYamlSuiteTestCase
|
|||||||
|
|
||||||
@ParametersFactory
|
@ParametersFactory
|
||||||
public static Iterable<Object[]> parameters() throws IOException, ClientYamlTestParseException {
|
public static Iterable<Object[]> parameters() throws IOException, ClientYamlTestParseException {
|
||||||
return ESClientYamlSuiteTestCase.createParameters(0, 1);
|
return ESClientYamlSuiteTestCase.createParameters();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -36,7 +36,7 @@ public class IngestCommonClientYamlTestSuiteIT extends ESClientYamlSuiteTestCase
|
|||||||
|
|
||||||
@ParametersFactory
|
@ParametersFactory
|
||||||
public static Iterable<Object[]> parameters() throws IOException, ClientYamlTestParseException {
|
public static Iterable<Object[]> parameters() throws IOException, ClientYamlTestParseException {
|
||||||
return ESClientYamlSuiteTestCase.createParameters(0, 1);
|
return ESClientYamlSuiteTestCase.createParameters();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -36,7 +36,7 @@ public class LangExpressionClientYamlTestSuiteIT extends ESClientYamlSuiteTestCa
|
|||||||
|
|
||||||
@ParametersFactory
|
@ParametersFactory
|
||||||
public static Iterable<Object[]> parameters() throws IOException, ClientYamlTestParseException {
|
public static Iterable<Object[]> parameters() throws IOException, ClientYamlTestParseException {
|
||||||
return ESClientYamlSuiteTestCase.createParameters(0, 1);
|
return ESClientYamlSuiteTestCase.createParameters();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -36,7 +36,7 @@ public class LangGroovyClientYamlTestSuiteIT extends ESClientYamlSuiteTestCase {
|
|||||||
|
|
||||||
@ParametersFactory
|
@ParametersFactory
|
||||||
public static Iterable<Object[]> parameters() throws IOException, ClientYamlTestParseException {
|
public static Iterable<Object[]> parameters() throws IOException, ClientYamlTestParseException {
|
||||||
return ESClientYamlSuiteTestCase.createParameters(0, 1);
|
return ESClientYamlSuiteTestCase.createParameters();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -36,7 +36,7 @@ public class LangMustacheClientYamlTestSuiteIT extends ESClientYamlSuiteTestCase
|
|||||||
|
|
||||||
@ParametersFactory
|
@ParametersFactory
|
||||||
public static Iterable<Object[]> parameters() throws IOException, ClientYamlTestParseException {
|
public static Iterable<Object[]> parameters() throws IOException, ClientYamlTestParseException {
|
||||||
return ESClientYamlSuiteTestCase.createParameters(0, 1);
|
return ESClientYamlSuiteTestCase.createParameters();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -37,7 +37,7 @@ public class LangPainlessClientYamlTestSuiteIT extends ESClientYamlSuiteTestCase
|
|||||||
|
|
||||||
@ParametersFactory
|
@ParametersFactory
|
||||||
public static Iterable<Object[]> parameters() throws IOException, ClientYamlTestParseException {
|
public static Iterable<Object[]> parameters() throws IOException, ClientYamlTestParseException {
|
||||||
return ESClientYamlSuiteTestCase.createParameters(0, 1);
|
return ESClientYamlSuiteTestCase.createParameters();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user