merge from master
This commit is contained in:
commit
27eab74510
|
@ -213,7 +213,7 @@ public abstract class TransportWriteAction<
|
|||
* callback used by {@link AsyncAfterWriteAction} to notify that all post
|
||||
* process actions have been executed
|
||||
*/
|
||||
private interface RespondingWriteResult {
|
||||
interface RespondingWriteResult {
|
||||
/**
|
||||
* Called on successful processing of all post write actions
|
||||
* @param forcedRefresh <code>true</code> iff this write has caused a refresh
|
||||
|
|
|
@ -66,7 +66,6 @@ final class BootstrapCheck {
|
|||
static void check(final Settings settings, final BoundTransportAddress boundTransportAddress) throws NodeValidationException {
|
||||
check(
|
||||
enforceLimits(boundTransportAddress),
|
||||
BootstrapSettings.IGNORE_SYSTEM_BOOTSTRAP_CHECKS.get(settings),
|
||||
checks(settings),
|
||||
Node.NODE_NAME_SETTING.get(settings));
|
||||
}
|
||||
|
@ -77,18 +76,15 @@ final class BootstrapCheck {
|
|||
*
|
||||
* @param enforceLimits true if the checks should be enforced or
|
||||
* otherwise warned
|
||||
* @param ignoreSystemChecks true if system checks should be enforced
|
||||
* or otherwise warned
|
||||
* @param checks the checks to execute
|
||||
* @param nodeName the node name to be used as a logging prefix
|
||||
*/
|
||||
// visible for testing
|
||||
static void check(
|
||||
final boolean enforceLimits,
|
||||
final boolean ignoreSystemChecks,
|
||||
final List<Check> checks,
|
||||
final String nodeName) throws NodeValidationException {
|
||||
check(enforceLimits, ignoreSystemChecks, checks, Loggers.getLogger(BootstrapCheck.class, nodeName));
|
||||
check(enforceLimits, checks, Loggers.getLogger(BootstrapCheck.class, nodeName));
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -97,14 +93,11 @@ final class BootstrapCheck {
|
|||
*
|
||||
* @param enforceLimits true if the checks should be enforced or
|
||||
* otherwise warned
|
||||
* @param ignoreSystemChecks true if system checks should be enforced
|
||||
* or otherwise warned
|
||||
* @param checks the checks to execute
|
||||
* @param logger the logger to
|
||||
*/
|
||||
static void check(
|
||||
final boolean enforceLimits,
|
||||
final boolean ignoreSystemChecks,
|
||||
final List<Check> checks,
|
||||
final Logger logger) throws NodeValidationException {
|
||||
final List<String> errors = new ArrayList<>();
|
||||
|
@ -113,13 +106,10 @@ final class BootstrapCheck {
|
|||
if (enforceLimits) {
|
||||
logger.info("bound or publishing to a non-loopback or non-link-local address, enforcing bootstrap checks");
|
||||
}
|
||||
if (enforceLimits && ignoreSystemChecks) {
|
||||
logger.warn("enforcing bootstrap checks but ignoring system bootstrap checks, consider not ignoring system checks");
|
||||
}
|
||||
|
||||
for (final Check check : checks) {
|
||||
if (check.check()) {
|
||||
if ((!enforceLimits || (check.isSystemCheck() && ignoreSystemChecks)) && !check.alwaysEnforce()) {
|
||||
if (!enforceLimits && !check.alwaysEnforce()) {
|
||||
ignoredErrors.add(check.errorMessage());
|
||||
} else {
|
||||
errors.add(check.errorMessage());
|
||||
|
@ -201,14 +191,6 @@ final class BootstrapCheck {
|
|||
*/
|
||||
String errorMessage();
|
||||
|
||||
/**
|
||||
* test if the check is a system-level check
|
||||
*
|
||||
* @return true if the check is a system-level check as opposed
|
||||
* to an Elasticsearch-level check
|
||||
*/
|
||||
boolean isSystemCheck();
|
||||
|
||||
default boolean alwaysEnforce() {
|
||||
return false;
|
||||
}
|
||||
|
@ -245,11 +227,6 @@ final class BootstrapCheck {
|
|||
return JvmInfo.jvmInfo().getConfiguredMaxHeapSize();
|
||||
}
|
||||
|
||||
@Override
|
||||
public final boolean isSystemCheck() {
|
||||
return false;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
static class OsXFileDescriptorCheck extends FileDescriptorCheck {
|
||||
|
@ -299,11 +276,6 @@ final class BootstrapCheck {
|
|||
return ProcessProbe.getInstance().getMaxFileDescriptorCount();
|
||||
}
|
||||
|
||||
@Override
|
||||
public final boolean isSystemCheck() {
|
||||
return true;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
static class MlockallCheck implements Check {
|
||||
|
@ -329,11 +301,6 @@ final class BootstrapCheck {
|
|||
return Natives.isMemoryLocked();
|
||||
}
|
||||
|
||||
@Override
|
||||
public final boolean isSystemCheck() {
|
||||
return true;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
static class MaxNumberOfThreadsCheck implements Check {
|
||||
|
@ -360,11 +327,6 @@ final class BootstrapCheck {
|
|||
return JNANatives.MAX_NUMBER_OF_THREADS;
|
||||
}
|
||||
|
||||
@Override
|
||||
public final boolean isSystemCheck() {
|
||||
return true;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
static class MaxSizeVirtualMemoryCheck implements Check {
|
||||
|
@ -393,11 +355,6 @@ final class BootstrapCheck {
|
|||
return JNANatives.MAX_SIZE_VIRTUAL_MEMORY;
|
||||
}
|
||||
|
||||
@Override
|
||||
public final boolean isSystemCheck() {
|
||||
return true;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
static class MaxMapCountCheck implements Check {
|
||||
|
@ -465,11 +422,6 @@ final class BootstrapCheck {
|
|||
return Long.parseLong(procSysVmMaxMapCount);
|
||||
}
|
||||
|
||||
@Override
|
||||
public final boolean isSystemCheck() {
|
||||
return true;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
static class ClientJvmCheck implements BootstrapCheck.Check {
|
||||
|
@ -492,11 +444,6 @@ final class BootstrapCheck {
|
|||
getVmName());
|
||||
}
|
||||
|
||||
@Override
|
||||
public final boolean isSystemCheck() {
|
||||
return false;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -524,11 +471,6 @@ final class BootstrapCheck {
|
|||
JvmInfo.jvmInfo().getVmName());
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isSystemCheck() {
|
||||
return false;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
abstract static class MightForkCheck implements BootstrapCheck.Check {
|
||||
|
@ -546,11 +488,6 @@ final class BootstrapCheck {
|
|||
// visible for testing
|
||||
abstract boolean mightFork();
|
||||
|
||||
@Override
|
||||
public final boolean isSystemCheck() {
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public final boolean alwaysEnforce() {
|
||||
return true;
|
||||
|
|
|
@ -37,7 +37,5 @@ public final class BootstrapSettings {
|
|||
Setting.boolSetting("bootstrap.seccomp", true, Property.NodeScope);
|
||||
public static final Setting<Boolean> CTRLHANDLER_SETTING =
|
||||
Setting.boolSetting("bootstrap.ctrlhandler", true, Property.NodeScope);
|
||||
public static final Setting<Boolean> IGNORE_SYSTEM_BOOTSTRAP_CHECKS =
|
||||
Setting.boolSetting("bootstrap.ignore_system_bootstrap_checks", false, Property.NodeScope);
|
||||
|
||||
}
|
||||
|
|
|
@ -157,10 +157,23 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
|
|||
}
|
||||
}
|
||||
|
||||
static Setting<Integer> buildNumberOfShardsSetting() {
|
||||
/* This is a safety limit that should only be exceeded in very rare and special cases. The assumption is that
|
||||
* 99% of the users have less than 1024 shards per index. We also make it a hard check that requires restart of nodes
|
||||
* if a cluster should allow to create more than 1024 shards per index. NOTE: this does not limit the number of shards per cluster.
|
||||
* this also prevents creating stuff like a new index with millions of shards by accident which essentially kills the entire cluster
|
||||
* with OOM on the spot.*/
|
||||
final int maxNumShards = Integer.parseInt(System.getProperty("es.index.max_number_of_shards", "1024"));
|
||||
if (maxNumShards < 1) {
|
||||
throw new IllegalArgumentException("es.index.max_number_of_shards must be > 0");
|
||||
}
|
||||
return Setting.intSetting(SETTING_NUMBER_OF_SHARDS, Math.min(5, maxNumShards), 1, maxNumShards,
|
||||
Property.IndexScope);
|
||||
}
|
||||
|
||||
public static final String INDEX_SETTING_PREFIX = "index.";
|
||||
public static final String SETTING_NUMBER_OF_SHARDS = "index.number_of_shards";
|
||||
public static final Setting<Integer> INDEX_NUMBER_OF_SHARDS_SETTING =
|
||||
Setting.intSetting(SETTING_NUMBER_OF_SHARDS, 5, 1, Property.IndexScope);
|
||||
public static final Setting<Integer> INDEX_NUMBER_OF_SHARDS_SETTING = buildNumberOfShardsSetting();
|
||||
public static final String SETTING_NUMBER_OF_REPLICAS = "index.number_of_replicas";
|
||||
public static final Setting<Integer> INDEX_NUMBER_OF_REPLICAS_SETTING =
|
||||
Setting.intSetting(SETTING_NUMBER_OF_REPLICAS, 1, 0, Property.Dynamic, Property.IndexScope);
|
||||
|
|
|
@ -397,7 +397,6 @@ public final class ClusterSettings extends AbstractScopedSettings {
|
|||
BootstrapSettings.MEMORY_LOCK_SETTING,
|
||||
BootstrapSettings.SECCOMP_SETTING,
|
||||
BootstrapSettings.CTRLHANDLER_SETTING,
|
||||
BootstrapSettings.IGNORE_SYSTEM_BOOTSTRAP_CHECKS,
|
||||
IndexingMemoryController.INDEX_BUFFER_SIZE_SETTING,
|
||||
IndexingMemoryController.MIN_INDEX_BUFFER_SIZE_SETTING,
|
||||
IndexingMemoryController.MAX_INDEX_BUFFER_SIZE_SETTING,
|
||||
|
|
|
@ -636,10 +636,6 @@ public class Setting<T> extends ToXContentToBytes {
|
|||
return new Setting<>(key, (s) -> defaultPercentage, (s) -> MemorySizeValue.parseBytesSizeValueOrHeapRatio(s, key), properties);
|
||||
}
|
||||
|
||||
public static Setting<TimeValue> positiveTimeSetting(String key, TimeValue defaultValue, Property... properties) {
|
||||
return timeSetting(key, defaultValue, TimeValue.timeValueMillis(0), properties);
|
||||
}
|
||||
|
||||
public static <T> Setting<List<T>> listSetting(String key, List<String> defaultStringValue, Function<String, T> singleValueParser,
|
||||
Property... properties) {
|
||||
return listSetting(key, (s) -> defaultStringValue, singleValueParser, properties);
|
||||
|
@ -795,9 +791,9 @@ public class Setting<T> extends ToXContentToBytes {
|
|||
};
|
||||
}
|
||||
|
||||
public static Setting<TimeValue> timeSetting(String key, Function<Settings, String> defaultValue, TimeValue minValue,
|
||||
public static Setting<TimeValue> timeSetting(String key, Function<Settings, TimeValue> defaultValue, TimeValue minValue,
|
||||
Property... properties) {
|
||||
return new Setting<>(key, defaultValue, (s) -> {
|
||||
return new Setting<>(key, (s) -> defaultValue.apply(s).getStringRep(), (s) -> {
|
||||
TimeValue timeValue = TimeValue.parseTimeValue(s, null, key);
|
||||
if (timeValue.millis() < minValue.millis()) {
|
||||
throw new IllegalArgumentException("Failed to parse value [" + s + "] for setting [" + key + "] must be >= " + minValue);
|
||||
|
@ -807,17 +803,21 @@ public class Setting<T> extends ToXContentToBytes {
|
|||
}
|
||||
|
||||
public static Setting<TimeValue> timeSetting(String key, TimeValue defaultValue, TimeValue minValue, Property... properties) {
|
||||
return timeSetting(key, (s) -> defaultValue.getStringRep(), minValue, properties);
|
||||
return timeSetting(key, (s) -> defaultValue, minValue, properties);
|
||||
}
|
||||
|
||||
public static Setting<TimeValue> timeSetting(String key, TimeValue defaultValue, Property... properties) {
|
||||
return new Setting<>(key, (s) -> defaultValue.toString(), (s) -> TimeValue.parseTimeValue(s, key), properties);
|
||||
return new Setting<>(key, (s) -> defaultValue.getStringRep(), (s) -> TimeValue.parseTimeValue(s, key), properties);
|
||||
}
|
||||
|
||||
public static Setting<TimeValue> timeSetting(String key, Setting<TimeValue> fallbackSetting, Property... properties) {
|
||||
return new Setting<>(key, fallbackSetting, (s) -> TimeValue.parseTimeValue(s, key), properties);
|
||||
}
|
||||
|
||||
public static Setting<TimeValue> positiveTimeSetting(String key, TimeValue defaultValue, Property... properties) {
|
||||
return timeSetting(key, defaultValue, TimeValue.timeValueMillis(0), properties);
|
||||
}
|
||||
|
||||
public static Setting<Double> doubleSetting(String key, double defaultValue, double minValue, Property... properties) {
|
||||
return new Setting<>(key, (s) -> Double.toString(defaultValue), (s) -> {
|
||||
final double d = Double.parseDouble(s);
|
||||
|
|
|
@ -249,6 +249,12 @@ public class TimeValue implements Writeable {
|
|||
return PeriodFormat.getDefault().withParseType(type).print(period);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns a {@link String} representation of the current {@link TimeValue}.
|
||||
*
|
||||
* Note that this method might produce fractional time values (ex 1.6m) which cannot be
|
||||
* parsed by method like {@link TimeValue#parse(String, String, int)}.
|
||||
*/
|
||||
@Override
|
||||
public String toString() {
|
||||
if (duration < 0) {
|
||||
|
|
|
@ -89,7 +89,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover
|
|||
Setting.positiveTimeSetting("discovery.zen.ping_timeout", timeValueSeconds(3), Property.NodeScope);
|
||||
public static final Setting<TimeValue> JOIN_TIMEOUT_SETTING =
|
||||
Setting.timeSetting("discovery.zen.join_timeout",
|
||||
settings -> TimeValue.timeValueMillis(PING_TIMEOUT_SETTING.get(settings).millis() * 20).toString(),
|
||||
settings -> TimeValue.timeValueMillis(PING_TIMEOUT_SETTING.get(settings).millis() * 20),
|
||||
TimeValue.timeValueMillis(0), Property.NodeScope);
|
||||
public static final Setting<Integer> JOIN_RETRY_ATTEMPTS_SETTING =
|
||||
Setting.intSetting("discovery.zen.join_retry_attempts", 3, 1, Property.NodeScope);
|
||||
|
@ -101,7 +101,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover
|
|||
Setting.boolSetting("discovery.zen.send_leave_request", true, Property.NodeScope);
|
||||
public static final Setting<TimeValue> MASTER_ELECTION_WAIT_FOR_JOINS_TIMEOUT_SETTING =
|
||||
Setting.timeSetting("discovery.zen.master_election.wait_for_joins_timeout",
|
||||
settings -> TimeValue.timeValueMillis(JOIN_TIMEOUT_SETTING.get(settings).millis() / 2).toString(), TimeValue.timeValueMillis(0),
|
||||
settings -> TimeValue.timeValueMillis(JOIN_TIMEOUT_SETTING.get(settings).millis() / 2), TimeValue.timeValueMillis(0),
|
||||
Property.NodeScope);
|
||||
public static final Setting<Boolean> MASTER_ELECTION_IGNORE_NON_MASTER_PINGS_SETTING =
|
||||
Setting.boolSetting("discovery.zen.master_election.ignore_non_master_pings", false, Property.NodeScope);
|
||||
|
|
|
@ -61,7 +61,7 @@ public class RecoverySettings extends AbstractComponent {
|
|||
*/
|
||||
public static final Setting<TimeValue> INDICES_RECOVERY_INTERNAL_LONG_ACTION_TIMEOUT_SETTING =
|
||||
Setting.timeSetting("indices.recovery.internal_action_long_timeout",
|
||||
(s) -> TimeValue.timeValueMillis(INDICES_RECOVERY_INTERNAL_ACTION_TIMEOUT_SETTING.get(s).millis() * 2).toString(),
|
||||
(s) -> TimeValue.timeValueMillis(INDICES_RECOVERY_INTERNAL_ACTION_TIMEOUT_SETTING.get(s).millis() * 2),
|
||||
TimeValue.timeValueSeconds(0), Property.Dynamic, Property.NodeScope);
|
||||
|
||||
/**
|
||||
|
@ -70,7 +70,7 @@ public class RecoverySettings extends AbstractComponent {
|
|||
*/
|
||||
public static final Setting<TimeValue> INDICES_RECOVERY_ACTIVITY_TIMEOUT_SETTING =
|
||||
Setting.timeSetting("indices.recovery.recovery_activity_timeout",
|
||||
(s) -> INDICES_RECOVERY_INTERNAL_LONG_ACTION_TIMEOUT_SETTING.getRaw(s) , TimeValue.timeValueSeconds(0),
|
||||
INDICES_RECOVERY_INTERNAL_LONG_ACTION_TIMEOUT_SETTING::get, TimeValue.timeValueSeconds(0),
|
||||
Property.Dynamic, Property.NodeScope);
|
||||
|
||||
public static final ByteSizeValue DEFAULT_CHUNK_SIZE = new ByteSizeValue(512, ByteSizeUnit.KB);
|
||||
|
|
|
@ -224,7 +224,13 @@ public final class ConfigurationUtils {
|
|||
|
||||
public static ElasticsearchException newConfigurationException(String processorType, String processorTag,
|
||||
String propertyName, String reason) {
|
||||
ElasticsearchParseException exception = new ElasticsearchParseException("[" + propertyName + "] " + reason);
|
||||
String msg;
|
||||
if (propertyName == null) {
|
||||
msg = reason;
|
||||
} else {
|
||||
msg = "[" + propertyName + "] " + reason;
|
||||
}
|
||||
ElasticsearchParseException exception = new ElasticsearchParseException(msg);
|
||||
addHeadersToException(exception, processorType, processorTag, propertyName);
|
||||
return exception;
|
||||
}
|
||||
|
|
|
@ -0,0 +1,55 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.action.support.replication;
|
||||
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.elasticsearch.action.support.WriteRequest;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.index.shard.IndexShard;
|
||||
import org.elasticsearch.index.translog.Translog;
|
||||
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
|
||||
public abstract class TransportWriteActionTestHelper {
|
||||
|
||||
|
||||
public static void performPostWriteActions(final IndexShard indexShard,
|
||||
final WriteRequest<?> request,
|
||||
@Nullable final Translog.Location location,
|
||||
final Logger logger) {
|
||||
final CountDownLatch latch = new CountDownLatch(1);
|
||||
TransportWriteAction.RespondingWriteResult writerResult = new TransportWriteAction.RespondingWriteResult() {
|
||||
@Override
|
||||
public void onSuccess(boolean forcedRefresh) {
|
||||
latch.countDown();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception ex) {
|
||||
throw new AssertionError(ex);
|
||||
}
|
||||
};
|
||||
new TransportWriteAction.AsyncAfterWriteAction(indexShard, request, location, writerResult, logger).run();
|
||||
try {
|
||||
latch.await();
|
||||
} catch (InterruptedException e) {
|
||||
throw new AssertionError(e);
|
||||
}
|
||||
}
|
||||
}
|
|
@ -70,18 +70,14 @@ public class BootstrapCheckTests extends ESTestCase {
|
|||
|
||||
public void testNoLogMessageInNonProductionMode() throws NodeValidationException {
|
||||
final Logger logger = mock(Logger.class);
|
||||
BootstrapCheck.check(false, randomBoolean(), Collections.emptyList(), logger);
|
||||
BootstrapCheck.check(false, Collections.emptyList(), logger);
|
||||
verifyNoMoreInteractions(logger);
|
||||
}
|
||||
|
||||
public void testLogMessageInProductionMode() throws NodeValidationException {
|
||||
final Logger logger = mock(Logger.class);
|
||||
final boolean ignoreSystemChecks = randomBoolean();
|
||||
BootstrapCheck.check(true, ignoreSystemChecks, Collections.emptyList(), logger);
|
||||
BootstrapCheck.check(true, Collections.emptyList(), logger);
|
||||
verify(logger).info("bound or publishing to a non-loopback or non-link-local address, enforcing bootstrap checks");
|
||||
if (ignoreSystemChecks) {
|
||||
verify(logger).warn("enforcing bootstrap checks but ignoring system bootstrap checks, consider not ignoring system checks");
|
||||
}
|
||||
verifyNoMoreInteractions(logger);
|
||||
}
|
||||
|
||||
|
@ -139,11 +135,6 @@ public class BootstrapCheckTests extends ESTestCase {
|
|||
public String errorMessage() {
|
||||
return "first";
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isSystemCheck() {
|
||||
return false;
|
||||
}
|
||||
},
|
||||
new BootstrapCheck.Check() {
|
||||
@Override
|
||||
|
@ -155,16 +146,11 @@ public class BootstrapCheckTests extends ESTestCase {
|
|||
public String errorMessage() {
|
||||
return "second";
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isSystemCheck() {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
);
|
||||
|
||||
final NodeValidationException e =
|
||||
expectThrows(NodeValidationException.class, () -> BootstrapCheck.check(true, false, checks, "testExceptionAggregation"));
|
||||
expectThrows(NodeValidationException.class, () -> BootstrapCheck.check(true, checks, "testExceptionAggregation"));
|
||||
assertThat(e, hasToString(allOf(containsString("bootstrap checks failed"), containsString("first"), containsString("second"))));
|
||||
final Throwable[] suppressed = e.getSuppressed();
|
||||
assertThat(suppressed.length, equalTo(2));
|
||||
|
@ -195,7 +181,7 @@ public class BootstrapCheckTests extends ESTestCase {
|
|||
final NodeValidationException e =
|
||||
expectThrows(
|
||||
NodeValidationException.class,
|
||||
() -> BootstrapCheck.check(true, false, Collections.singletonList(check), "testHeapSizeCheck"));
|
||||
() -> BootstrapCheck.check(true, Collections.singletonList(check), "testHeapSizeCheck"));
|
||||
assertThat(
|
||||
e.getMessage(),
|
||||
containsString("initial heap size [" + initialHeapSize.get() + "] " +
|
||||
|
@ -203,7 +189,7 @@ public class BootstrapCheckTests extends ESTestCase {
|
|||
|
||||
initialHeapSize.set(maxHeapSize.get());
|
||||
|
||||
BootstrapCheck.check(true, false, Collections.singletonList(check), "testHeapSizeCheck");
|
||||
BootstrapCheck.check(true, Collections.singletonList(check), "testHeapSizeCheck");
|
||||
|
||||
// nothing should happen if the initial heap size or the max
|
||||
// heap size is not available
|
||||
|
@ -212,7 +198,7 @@ public class BootstrapCheckTests extends ESTestCase {
|
|||
} else {
|
||||
maxHeapSize.set(0);
|
||||
}
|
||||
BootstrapCheck.check(true, false, Collections.singletonList(check), "testHeapSizeCheck");
|
||||
BootstrapCheck.check(true, Collections.singletonList(check), "testHeapSizeCheck");
|
||||
}
|
||||
|
||||
public void testFileDescriptorLimits() throws NodeValidationException {
|
||||
|
@ -238,17 +224,17 @@ public class BootstrapCheckTests extends ESTestCase {
|
|||
|
||||
final NodeValidationException e =
|
||||
expectThrows(NodeValidationException.class,
|
||||
() -> BootstrapCheck.check(true, false, Collections.singletonList(check), "testFileDescriptorLimits"));
|
||||
() -> BootstrapCheck.check(true, Collections.singletonList(check), "testFileDescriptorLimits"));
|
||||
assertThat(e.getMessage(), containsString("max file descriptors"));
|
||||
|
||||
maxFileDescriptorCount.set(randomIntBetween(limit + 1, Integer.MAX_VALUE));
|
||||
|
||||
BootstrapCheck.check(true, false, Collections.singletonList(check), "testFileDescriptorLimits");
|
||||
BootstrapCheck.check(true, Collections.singletonList(check), "testFileDescriptorLimits");
|
||||
|
||||
// nothing should happen if current file descriptor count is
|
||||
// not available
|
||||
maxFileDescriptorCount.set(-1);
|
||||
BootstrapCheck.check(true, false, Collections.singletonList(check), "testFileDescriptorLimits");
|
||||
BootstrapCheck.check(true, Collections.singletonList(check), "testFileDescriptorLimits");
|
||||
}
|
||||
|
||||
public void testFileDescriptorLimitsThrowsOnInvalidLimit() {
|
||||
|
@ -293,7 +279,6 @@ public class BootstrapCheckTests extends ESTestCase {
|
|||
NodeValidationException.class,
|
||||
() -> BootstrapCheck.check(
|
||||
true,
|
||||
false,
|
||||
Collections.singletonList(check),
|
||||
"testFileDescriptorLimitsThrowsOnInvalidLimit"));
|
||||
assertThat(
|
||||
|
@ -301,7 +286,7 @@ public class BootstrapCheckTests extends ESTestCase {
|
|||
containsString("memory locking requested for elasticsearch process but memory is not locked"));
|
||||
} else {
|
||||
// nothing should happen
|
||||
BootstrapCheck.check(true, false, Collections.singletonList(check), "testFileDescriptorLimitsThrowsOnInvalidLimit");
|
||||
BootstrapCheck.check(true, Collections.singletonList(check), "testFileDescriptorLimitsThrowsOnInvalidLimit");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -318,17 +303,17 @@ public class BootstrapCheckTests extends ESTestCase {
|
|||
|
||||
final NodeValidationException e = expectThrows(
|
||||
NodeValidationException.class,
|
||||
() -> BootstrapCheck.check(true, false, Collections.singletonList(check), "testMaxNumberOfThreadsCheck"));
|
||||
() -> BootstrapCheck.check(true, Collections.singletonList(check), "testMaxNumberOfThreadsCheck"));
|
||||
assertThat(e.getMessage(), containsString("max number of threads"));
|
||||
|
||||
maxNumberOfThreads.set(randomIntBetween(limit + 1, Integer.MAX_VALUE));
|
||||
|
||||
BootstrapCheck.check(true, false, Collections.singletonList(check), "testMaxNumberOfThreadsCheck");
|
||||
BootstrapCheck.check(true, Collections.singletonList(check), "testMaxNumberOfThreadsCheck");
|
||||
|
||||
// nothing should happen if current max number of threads is
|
||||
// not available
|
||||
maxNumberOfThreads.set(-1);
|
||||
BootstrapCheck.check(true, false, Collections.singletonList(check), "testMaxNumberOfThreadsCheck");
|
||||
BootstrapCheck.check(true, Collections.singletonList(check), "testMaxNumberOfThreadsCheck");
|
||||
}
|
||||
|
||||
public void testMaxSizeVirtualMemory() throws NodeValidationException {
|
||||
|
@ -349,17 +334,17 @@ public class BootstrapCheckTests extends ESTestCase {
|
|||
|
||||
final NodeValidationException e = expectThrows(
|
||||
NodeValidationException.class,
|
||||
() -> BootstrapCheck.check(true, false, Collections.singletonList(check), "testMaxSizeVirtualMemory"));
|
||||
() -> BootstrapCheck.check(true, Collections.singletonList(check), "testMaxSizeVirtualMemory"));
|
||||
assertThat(e.getMessage(), containsString("max size virtual memory"));
|
||||
|
||||
maxSizeVirtualMemory.set(rlimInfinity);
|
||||
|
||||
BootstrapCheck.check(true, false, Collections.singletonList(check), "testMaxSizeVirtualMemory");
|
||||
BootstrapCheck.check(true, Collections.singletonList(check), "testMaxSizeVirtualMemory");
|
||||
|
||||
// nothing should happen if max size virtual memory is not
|
||||
// available
|
||||
maxSizeVirtualMemory.set(Long.MIN_VALUE);
|
||||
BootstrapCheck.check(true, false, Collections.singletonList(check), "testMaxSizeVirtualMemory");
|
||||
BootstrapCheck.check(true, Collections.singletonList(check), "testMaxSizeVirtualMemory");
|
||||
}
|
||||
|
||||
public void testMaxMapCountCheck() throws NodeValidationException {
|
||||
|
@ -374,17 +359,17 @@ public class BootstrapCheckTests extends ESTestCase {
|
|||
|
||||
final NodeValidationException e = expectThrows(
|
||||
NodeValidationException.class,
|
||||
() -> BootstrapCheck.check(true, false, Collections.singletonList(check), "testMaxMapCountCheck"));
|
||||
() -> BootstrapCheck.check(true, Collections.singletonList(check), "testMaxMapCountCheck"));
|
||||
assertThat(e.getMessage(), containsString("max virtual memory areas vm.max_map_count"));
|
||||
|
||||
maxMapCount.set(randomIntBetween(limit + 1, Integer.MAX_VALUE));
|
||||
|
||||
BootstrapCheck.check(true, false, Collections.singletonList(check), "testMaxMapCountCheck");
|
||||
BootstrapCheck.check(true, Collections.singletonList(check), "testMaxMapCountCheck");
|
||||
|
||||
// nothing should happen if current vm.max_map_count is not
|
||||
// available
|
||||
maxMapCount.set(-1);
|
||||
BootstrapCheck.check(true, false, Collections.singletonList(check), "testMaxMapCountCheck");
|
||||
BootstrapCheck.check(true, Collections.singletonList(check), "testMaxMapCountCheck");
|
||||
}
|
||||
|
||||
public void testClientJvmCheck() throws NodeValidationException {
|
||||
|
@ -398,14 +383,14 @@ public class BootstrapCheckTests extends ESTestCase {
|
|||
|
||||
final NodeValidationException e = expectThrows(
|
||||
NodeValidationException.class,
|
||||
() -> BootstrapCheck.check(true, false, Collections.singletonList(check), "testClientJvmCheck"));
|
||||
() -> BootstrapCheck.check(true, Collections.singletonList(check), "testClientJvmCheck"));
|
||||
assertThat(
|
||||
e.getMessage(),
|
||||
containsString("JVM is using the client VM [Java HotSpot(TM) 32-Bit Client VM] " +
|
||||
"but should be using a server VM for the best performance"));
|
||||
|
||||
vmName.set("Java HotSpot(TM) 32-Bit Server VM");
|
||||
BootstrapCheck.check(true, false, Collections.singletonList(check), "testClientJvmCheck");
|
||||
BootstrapCheck.check(true, Collections.singletonList(check), "testClientJvmCheck");
|
||||
}
|
||||
|
||||
public void testUseSerialGCCheck() throws NodeValidationException {
|
||||
|
@ -419,14 +404,14 @@ public class BootstrapCheckTests extends ESTestCase {
|
|||
|
||||
final NodeValidationException e = expectThrows(
|
||||
NodeValidationException.class,
|
||||
() -> BootstrapCheck.check(true, false, Collections.singletonList(check), "testUseSerialGCCheck"));
|
||||
() -> BootstrapCheck.check(true, Collections.singletonList(check), "testUseSerialGCCheck"));
|
||||
assertThat(
|
||||
e.getMessage(),
|
||||
containsString("JVM is using the serial collector but should not be for the best performance; " + "" +
|
||||
"either it's the default for the VM [" + JvmInfo.jvmInfo().getVmName() +"] or -XX:+UseSerialGC was explicitly specified"));
|
||||
|
||||
useSerialGC.set("false");
|
||||
BootstrapCheck.check(true, false, Collections.singletonList(check), "testUseSerialGCCheck");
|
||||
BootstrapCheck.check(true, Collections.singletonList(check), "testUseSerialGCCheck");
|
||||
}
|
||||
|
||||
public void testMightForkCheck() throws NodeValidationException {
|
||||
|
@ -530,13 +515,13 @@ public class BootstrapCheckTests extends ESTestCase {
|
|||
} else {
|
||||
enableMightFork.run();
|
||||
}
|
||||
BootstrapCheck.check(true, randomBoolean(), Collections.singletonList(check), methodName);
|
||||
BootstrapCheck.check(true, Collections.singletonList(check), methodName);
|
||||
|
||||
// if seccomp is enabled, but we will not fork, nothing should
|
||||
// happen
|
||||
isSeccompInstalled.set(true);
|
||||
disableMightFork.run();
|
||||
BootstrapCheck.check(true, randomBoolean(), Collections.singletonList(check), methodName);
|
||||
BootstrapCheck.check(true, Collections.singletonList(check), methodName);
|
||||
|
||||
// if seccomp is enabled, and we might fork, the check should
|
||||
// be enforced, regardless of bootstrap checks being enabled or
|
||||
|
@ -546,49 +531,10 @@ public class BootstrapCheckTests extends ESTestCase {
|
|||
|
||||
final NodeValidationException e = expectThrows(
|
||||
NodeValidationException.class,
|
||||
() -> BootstrapCheck.check(randomBoolean(), randomBoolean(), Collections.singletonList(check), methodName));
|
||||
() -> BootstrapCheck.check(randomBoolean(), Collections.singletonList(check), methodName));
|
||||
consumer.accept(e);
|
||||
}
|
||||
|
||||
public void testIgnoringSystemChecks() throws NodeValidationException {
|
||||
final BootstrapCheck.Check check = new BootstrapCheck.Check() {
|
||||
@Override
|
||||
public boolean check() {
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String errorMessage() {
|
||||
return "error";
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isSystemCheck() {
|
||||
return true;
|
||||
}
|
||||
};
|
||||
|
||||
final NodeValidationException notIgnored = expectThrows(
|
||||
NodeValidationException.class,
|
||||
() -> BootstrapCheck.check(true, false, Collections.singletonList(check), "testIgnoringSystemChecks"));
|
||||
assertThat(notIgnored, hasToString(containsString("error")));
|
||||
|
||||
final Logger logger = mock(Logger.class);
|
||||
|
||||
// nothing should happen if we ignore system checks
|
||||
BootstrapCheck.check(true, true, Collections.singletonList(check), logger);
|
||||
verify(logger).info("bound or publishing to a non-loopback or non-link-local address, enforcing bootstrap checks");
|
||||
verify(logger).warn("enforcing bootstrap checks but ignoring system bootstrap checks, consider not ignoring system checks");
|
||||
verify(logger).warn("error");
|
||||
verifyNoMoreInteractions(logger);
|
||||
reset(logger);
|
||||
|
||||
// nothing should happen if we ignore all checks
|
||||
BootstrapCheck.check(false, randomBoolean(), Collections.singletonList(check), logger);
|
||||
verify(logger).warn("error");
|
||||
verifyNoMoreInteractions(logger);
|
||||
}
|
||||
|
||||
public void testAlwaysEnforcedChecks() {
|
||||
final BootstrapCheck.Check check = new BootstrapCheck.Check() {
|
||||
@Override
|
||||
|
@ -601,11 +547,6 @@ public class BootstrapCheckTests extends ESTestCase {
|
|||
return "error";
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isSystemCheck() {
|
||||
return randomBoolean();
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean alwaysEnforce() {
|
||||
return true;
|
||||
|
@ -614,7 +555,7 @@ public class BootstrapCheckTests extends ESTestCase {
|
|||
|
||||
final NodeValidationException alwaysEnforced = expectThrows(
|
||||
NodeValidationException.class,
|
||||
() -> BootstrapCheck.check(randomBoolean(), randomBoolean(), Collections.singletonList(check), "testAlwaysEnforcedChecks"));
|
||||
() -> BootstrapCheck.check(randomBoolean(), Collections.singletonList(check), "testAlwaysEnforcedChecks"));
|
||||
assertThat(alwaysEnforced, hasToString(containsString("error")));
|
||||
}
|
||||
|
||||
|
|
|
@ -330,9 +330,11 @@ public class ClusterSettingsIT extends ESIntegTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
@AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/20318")
|
||||
public void testLoggerLevelUpdate() {
|
||||
assertAcked(prepareCreate("test"));
|
||||
|
||||
final Level level = ESLoggerFactory.getRootLogger().getLevel();
|
||||
|
||||
final IllegalArgumentException e =
|
||||
expectThrows(
|
||||
IllegalArgumentException.class,
|
||||
|
@ -352,8 +354,8 @@ public class ClusterSettingsIT extends ESIntegTestCase {
|
|||
final Settings.Builder defaultSettings = Settings.builder().putNull("logger.*");
|
||||
client().admin().cluster().prepareUpdateSettings().setTransientSettings(defaultSettings).execute().actionGet();
|
||||
}
|
||||
assertEquals(ESLoggerFactory.LOG_DEFAULT_LEVEL_SETTING.get(Settings.EMPTY), ESLoggerFactory.getLogger("test").getLevel());
|
||||
assertEquals(ESLoggerFactory.LOG_DEFAULT_LEVEL_SETTING.get(Settings.EMPTY), ESLoggerFactory.getRootLogger().getLevel());
|
||||
assertEquals(level, ESLoggerFactory.getLogger("test").getLevel());
|
||||
assertEquals(level, ESLoggerFactory.getRootLogger().getLevel());
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -33,6 +33,7 @@ import java.util.concurrent.atomic.AtomicReference;
|
|||
import java.util.function.Function;
|
||||
|
||||
import static org.hamcrest.Matchers.containsString;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.hamcrest.Matchers.is;
|
||||
|
||||
public class SettingTests extends ESTestCase {
|
||||
|
@ -517,4 +518,16 @@ public class SettingTests extends ESTestCase {
|
|||
assertThat(ex.getMessage(), containsString("properties cannot be null for setting"));
|
||||
}
|
||||
}
|
||||
|
||||
public void testTimeValue() {
|
||||
final TimeValue random = TimeValue.parseTimeValue(randomTimeValue(), "test");
|
||||
|
||||
Setting<TimeValue> setting = Setting.timeSetting("foo", random);
|
||||
assertThat(setting.get(Settings.EMPTY), equalTo(random));
|
||||
|
||||
final int factor = randomIntBetween(1, 10);
|
||||
setting = Setting.timeSetting("foo", (s) -> TimeValue.timeValueMillis(random.getMillis() * factor), TimeValue.ZERO);
|
||||
assertThat(setting.get(Settings.builder().put("foo", "12h").build()), equalTo(TimeValue.timeValueHours(12)));
|
||||
assertThat(setting.get(Settings.EMPTY).getMillis(), equalTo(random.getMillis() * factor));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -29,9 +29,11 @@ import org.elasticsearch.action.index.IndexResponse;
|
|||
import org.elasticsearch.action.index.TransportIndexAction;
|
||||
import org.elasticsearch.action.support.PlainActionFuture;
|
||||
import org.elasticsearch.action.support.replication.ReplicationOperation;
|
||||
import org.elasticsearch.action.support.replication.ReplicationRequest;
|
||||
import org.elasticsearch.action.support.replication.ReplicationResponse;
|
||||
import org.elasticsearch.action.support.replication.TransportReplicationAction;
|
||||
import org.elasticsearch.action.support.replication.TransportReplicationAction.ReplicaResponse;
|
||||
import org.elasticsearch.action.support.replication.TransportWriteAction;
|
||||
import org.elasticsearch.action.support.replication.TransportWriteActionTestHelper;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
|
@ -41,13 +43,13 @@ import org.elasticsearch.common.collect.Iterators;
|
|||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.transport.LocalTransportAddress;
|
||||
import org.elasticsearch.index.Index;
|
||||
import org.elasticsearch.index.engine.Engine;
|
||||
import org.elasticsearch.index.mapper.Uid;
|
||||
import org.elasticsearch.index.shard.IndexShard;
|
||||
import org.elasticsearch.index.shard.IndexShardTestCase;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
import org.elasticsearch.indices.recovery.RecoveryState;
|
||||
import org.elasticsearch.indices.recovery.RecoveryTarget;
|
||||
import org.elasticsearch.transport.TransportResponse;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
|
@ -82,7 +84,7 @@ public abstract class ESIndexLevelReplicationTestCase extends IndexShardTestCase
|
|||
IndexMetaData.Builder metaData = IndexMetaData.builder(index.getName())
|
||||
.settings(settings)
|
||||
.primaryTerm(0, 1);
|
||||
for (Map.Entry<String, String> typeMapping: indexMapping.entrySet()) {
|
||||
for (Map.Entry<String, String> typeMapping : indexMapping.entrySet()) {
|
||||
metaData.putMapping(typeMapping.getKey(), typeMapping.getValue());
|
||||
}
|
||||
return new ReplicationGroup(metaData.build());
|
||||
|
@ -130,10 +132,9 @@ public abstract class ESIndexLevelReplicationTestCase extends IndexShardTestCase
|
|||
}
|
||||
|
||||
public IndexResponse index(IndexRequest indexRequest) throws Exception {
|
||||
PlainActionFuture<IndexingResult> listener = new PlainActionFuture<>();
|
||||
IndexingOp op = new IndexingOp(indexRequest, listener, this);
|
||||
op.execute();
|
||||
return listener.get().finalResponse;
|
||||
PlainActionFuture<IndexResponse> listener = new PlainActionFuture<>();
|
||||
new IndexingAction(indexRequest, listener, this).execute();
|
||||
return listener.get();
|
||||
}
|
||||
|
||||
public synchronized void startAll() throws IOException {
|
||||
|
@ -147,7 +148,7 @@ public abstract class ESIndexLevelReplicationTestCase extends IndexShardTestCase
|
|||
}
|
||||
|
||||
public synchronized IndexShard addReplica() throws IOException {
|
||||
final IndexShard replica = newShard(shardId, false,"s" + replicaId.incrementAndGet(), indexMetaData, null);
|
||||
final IndexShard replica = newShard(shardId, false, "s" + replicaId.incrementAndGet(), indexMetaData, null);
|
||||
replicas.add(replica);
|
||||
return replica;
|
||||
}
|
||||
|
@ -223,7 +224,7 @@ public abstract class ESIndexLevelReplicationTestCase extends IndexShardTestCase
|
|||
|
||||
@Override
|
||||
public Iterator<IndexShard> iterator() {
|
||||
return Iterators.<IndexShard>concat(replicas.iterator(), Collections.singleton(primary).iterator());
|
||||
return Iterators.concat(replicas.iterator(), Collections.singleton(primary).iterator());
|
||||
}
|
||||
|
||||
public IndexShard getPrimary() {
|
||||
|
@ -231,135 +232,162 @@ public abstract class ESIndexLevelReplicationTestCase extends IndexShardTestCase
|
|||
}
|
||||
}
|
||||
|
||||
class IndexingOp extends ReplicationOperation<IndexRequest, IndexRequest, IndexingResult> {
|
||||
|
||||
abstract class ReplicationAction<Request extends ReplicationRequest<Request>, ReplicaRequest extends ReplicationRequest<ReplicaRequest>,
|
||||
Response extends ReplicationResponse> {
|
||||
private final Request request;
|
||||
private ActionListener<Response> listener;
|
||||
private final ReplicationGroup replicationGroup;
|
||||
private final String opType;
|
||||
|
||||
public IndexingOp(IndexRequest request, ActionListener<IndexingResult> listener, ReplicationGroup replicationGroup) {
|
||||
super(request, new PrimaryRef(replicationGroup), listener, true, new ReplicasRef(replicationGroup),
|
||||
() -> null, logger, "indexing");
|
||||
this.replicationGroup = replicationGroup;
|
||||
public ReplicationAction(Request request, ActionListener<Response> listener,
|
||||
ReplicationGroup group, String opType) {
|
||||
this.request = request;
|
||||
this.listener = listener;
|
||||
this.replicationGroup = group;
|
||||
this.opType = opType;
|
||||
}
|
||||
|
||||
public void execute() throws Exception {
|
||||
new ReplicationOperation<Request, ReplicaRequest, PrimaryResult>(request, new PrimaryRef(),
|
||||
new ActionListener<PrimaryResult>() {
|
||||
@Override
|
||||
public void onResponse(PrimaryResult result) {
|
||||
result.respond(listener);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
listener.onFailure(e);
|
||||
}
|
||||
}, true, new ReplicasRef(), () -> null, logger, opType) {
|
||||
@Override
|
||||
protected List<ShardRouting> getShards(ShardId shardId, ClusterState state) {
|
||||
return replicationGroup.shardRoutings();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected String checkActiveShardCount() {
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Set<String> getInSyncAllocationIds(ShardId shardId, ClusterState clusterState) {
|
||||
return replicationGroup.shardRoutings().stream().filter(ShardRouting::active).map(r -> r.allocationId().getId())
|
||||
.collect(Collectors.toSet());
|
||||
}
|
||||
}.execute();
|
||||
}
|
||||
|
||||
protected abstract PrimaryResult performOnPrimary(IndexShard primary, Request request) throws Exception;
|
||||
|
||||
protected abstract void performOnReplica(ReplicaRequest request, IndexShard replica);
|
||||
|
||||
class PrimaryRef implements ReplicationOperation.Primary<Request, ReplicaRequest, PrimaryResult> {
|
||||
|
||||
@Override
|
||||
public ShardRouting routingEntry() {
|
||||
return replicationGroup.primary.routingEntry();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void failShard(String message, Exception exception) {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
@Override
|
||||
public PrimaryResult perform(Request request) throws Exception {
|
||||
PrimaryResult response = performOnPrimary(replicationGroup.primary, request);
|
||||
response.replicaRequest().primaryTerm(replicationGroup.primary.getPrimaryTerm());
|
||||
return response;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void updateLocalCheckpointForShard(String allocationId, long checkpoint) {
|
||||
replicationGroup.getPrimary().updateLocalCheckpointForShard(allocationId, checkpoint);
|
||||
}
|
||||
|
||||
@Override
|
||||
public long localCheckpoint() {
|
||||
return replicationGroup.getPrimary().getLocalCheckpoint();
|
||||
}
|
||||
}
|
||||
|
||||
class ReplicasRef implements ReplicationOperation.Replicas<ReplicaRequest> {
|
||||
|
||||
@Override
|
||||
public void performOn(
|
||||
ShardRouting replicaRouting,
|
||||
ReplicaRequest request,
|
||||
ActionListener<ReplicationOperation.ReplicaResponse> listener) {
|
||||
try {
|
||||
IndexShard replica = replicationGroup.replicas.stream()
|
||||
.filter(s -> replicaRouting.isSameAllocation(s.routingEntry())).findFirst().get();
|
||||
performOnReplica(request, replica);
|
||||
listener.onResponse(new ReplicaResponse(replica.routingEntry().allocationId().getId(), replica.getLocalCheckpoint()));
|
||||
} catch (Exception e) {
|
||||
listener.onFailure(e);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void failShard(ShardRouting replica, long primaryTerm, String message, Exception exception, Runnable onSuccess,
|
||||
Consumer<Exception> onPrimaryDemoted, Consumer<Exception> onIgnoredFailure) {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void markShardCopyAsStale(ShardId shardId, String allocationId, long primaryTerm, Runnable onSuccess,
|
||||
Consumer<Exception> onPrimaryDemoted, Consumer<Exception> onIgnoredFailure) {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
}
|
||||
|
||||
class PrimaryResult implements ReplicationOperation.PrimaryResult<ReplicaRequest> {
|
||||
final ReplicaRequest replicaRequest;
|
||||
final Response finalResponse;
|
||||
|
||||
public PrimaryResult(ReplicaRequest replicaRequest, Response finalResponse) {
|
||||
this.replicaRequest = replicaRequest;
|
||||
this.finalResponse = finalResponse;
|
||||
}
|
||||
|
||||
@Override
|
||||
public ReplicaRequest replicaRequest() {
|
||||
return replicaRequest;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setShardInfo(ReplicationResponse.ShardInfo shardInfo) {
|
||||
finalResponse.setShardInfo(shardInfo);
|
||||
}
|
||||
|
||||
public void respond(ActionListener<Response> listener) {
|
||||
listener.onResponse(finalResponse);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
class IndexingAction extends ReplicationAction<IndexRequest, IndexRequest, IndexResponse> {
|
||||
|
||||
public IndexingAction(IndexRequest request, ActionListener<IndexResponse> listener, ReplicationGroup replicationGroup) {
|
||||
super(request, listener, replicationGroup, "indexing");
|
||||
request.process(null, true, request.index());
|
||||
}
|
||||
|
||||
@Override
|
||||
protected List<ShardRouting> getShards(ShardId shardId, ClusterState state) {
|
||||
return replicationGroup.shardRoutings();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Set<String> getInSyncAllocationIds(ShardId shardId, ClusterState clusterState) {
|
||||
return replicationGroup.shardRoutings().stream().filter(ShardRouting::active)
|
||||
.map(shr -> shr.allocationId().getId()).collect(Collectors.toSet());
|
||||
}
|
||||
|
||||
@Override
|
||||
protected String checkActiveShardCount() {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
private static class PrimaryRef implements ReplicationOperation.Primary<IndexRequest, IndexRequest, IndexingResult> {
|
||||
|
||||
final IndexShard primary;
|
||||
|
||||
private PrimaryRef(ReplicationGroup replicationGroup) {
|
||||
this.primary = replicationGroup.primary;
|
||||
}
|
||||
|
||||
@Override
|
||||
public ShardRouting routingEntry() {
|
||||
return primary.routingEntry();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void failShard(String message, Exception exception) {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
@Override
|
||||
public IndexingResult perform(IndexRequest request) throws Exception {
|
||||
protected PrimaryResult performOnPrimary(IndexShard primary, IndexRequest request) throws Exception {
|
||||
TransportWriteAction.WriteResult<IndexResponse> result = TransportIndexAction.executeIndexRequestOnPrimary(request, primary,
|
||||
null);
|
||||
request.primaryTerm(primary.getPrimaryTerm());
|
||||
return new IndexingResult(request, result.getResponse());
|
||||
TransportWriteActionTestHelper.performPostWriteActions(primary, request, result.getLocation(), logger);
|
||||
return new PrimaryResult(request, result.getResponse());
|
||||
}
|
||||
|
||||
@Override
|
||||
public void updateLocalCheckpointForShard(String allocationId, long checkpoint) {
|
||||
primary.updateLocalCheckpointForShard(allocationId, checkpoint);
|
||||
}
|
||||
|
||||
@Override
|
||||
public long localCheckpoint() {
|
||||
return primary.getLocalCheckpoint();
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
private static class ReplicasRef implements ReplicationOperation.Replicas<IndexRequest> {
|
||||
|
||||
private final ReplicationGroup replicationGroup;
|
||||
|
||||
private ReplicasRef(ReplicationGroup replicationGroup) {
|
||||
this.replicationGroup = replicationGroup;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void performOn(
|
||||
ShardRouting replicaRouting,
|
||||
IndexRequest request,
|
||||
ActionListener<ReplicationOperation.ReplicaResponse> listener) {
|
||||
try {
|
||||
IndexShard replica = replicationGroup.replicas.stream()
|
||||
.filter(s -> replicaRouting.isSameAllocation(s.routingEntry())).findFirst().get();
|
||||
TransportIndexAction.executeIndexRequestOnReplica(request, replica);
|
||||
listener.onResponse(
|
||||
new TransportReplicationAction.ReplicaResponse(
|
||||
replica.routingEntry().allocationId().getId(),
|
||||
replica.getLocalCheckpoint()));
|
||||
} catch (Exception e) {
|
||||
listener.onFailure(e);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void failShard(ShardRouting replica, long primaryTerm, String message, Exception exception, Runnable onSuccess,
|
||||
Consumer<Exception> onPrimaryDemoted, Consumer<Exception> onIgnoredFailure) {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void markShardCopyAsStale(ShardId shardId, String allocationId, long primaryTerm, Runnable onSuccess,
|
||||
Consumer<Exception> onPrimaryDemoted, Consumer<Exception> onIgnoredFailure) {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
||||
private static class IndexingResult implements ReplicationOperation.PrimaryResult<IndexRequest> {
|
||||
final IndexRequest replicaRequest;
|
||||
final IndexResponse finalResponse;
|
||||
|
||||
public IndexingResult(IndexRequest replicaRequest, IndexResponse finalResponse) {
|
||||
this.replicaRequest = replicaRequest;
|
||||
this.finalResponse = finalResponse;
|
||||
}
|
||||
|
||||
@Override
|
||||
public IndexRequest replicaRequest() {
|
||||
return replicaRequest;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setShardInfo(ReplicationResponse.ShardInfo shardInfo) {
|
||||
finalResponse.setShardInfo(shardInfo);
|
||||
}
|
||||
|
||||
public void respond(ActionListener<IndexResponse> listener) {
|
||||
listener.onResponse(finalResponse);
|
||||
protected void performOnReplica(IndexRequest request, IndexShard replica) {
|
||||
Engine.Index index = TransportIndexAction.executeIndexRequestOnReplica(request, replica);
|
||||
TransportWriteActionTestHelper.performPostWriteActions(replica, request, index.getTranslogLocation(), logger);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -38,7 +38,11 @@ specific index module:
|
|||
|
||||
The number of primary shards that an index should have. Defaults to 5.
|
||||
This setting can only be set at index creation time. It cannot be
|
||||
changed on a closed index.
|
||||
changed on a closed index. Note: the number of shards are limited to `1024` per
|
||||
index. This limitation is a safety limit to prevent accidental creation of indices
|
||||
that can destabilize a cluster due to resource allocation. The limit can be modified
|
||||
by specifying `export ES_JAVA_OPTS="-Des.index.max_number_of_shards=128"` system property on every node that is
|
||||
part of the cluster.
|
||||
|
||||
`index.shard.check_on_startup`::
|
||||
+
|
||||
|
|
|
@ -934,6 +934,12 @@ to the requester.
|
|||
|
||||
[[foreach-processor]]
|
||||
=== Foreach Processor
|
||||
|
||||
experimental[This processor may change or be replaced by something else that provides similar functionality. This
|
||||
processor executes in its own context, which makes it different compared to all other processors and for features like
|
||||
verbose simulation the subprocessor isn't visible. The reason we still expose this processor, is that it is the only
|
||||
processor that can operate on an array]
|
||||
|
||||
Processes elements in an array of unknown length.
|
||||
|
||||
All processors can operate on elements inside an array, but if all elements of an array need to
|
||||
|
|
|
@ -71,11 +71,10 @@ PUT test
|
|||
}
|
||||
}
|
||||
}
|
||||
POST test/test
|
||||
POST test/test?refresh=true
|
||||
{"title": "noble warriors"}
|
||||
POST test/test
|
||||
POST test/test?refresh=true
|
||||
{"title": "nobel prize"}
|
||||
POST _refresh
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
// TESTSETUP
|
||||
|
@ -126,7 +125,7 @@ can contain misspellings (See parameter descriptions below).
|
|||
"options" : [ {
|
||||
"text" : "nobel prize",
|
||||
"highlighted": "<em>nobel</em> prize",
|
||||
"score" : 0.40765354
|
||||
"score" : 0.5962314
|
||||
}]
|
||||
}
|
||||
]
|
||||
|
|
|
@ -38,6 +38,8 @@ import static org.elasticsearch.ingest.ConfigurationUtils.readStringProperty;
|
|||
*
|
||||
* This can be useful in cases to do string operations on json array of strings,
|
||||
* or remove a field from objects inside a json array.
|
||||
*
|
||||
* Note that this processor is experimental.
|
||||
*/
|
||||
public final class ForEachProcessor extends AbstractProcessor {
|
||||
|
||||
|
|
|
@ -56,7 +56,7 @@ public class ScriptProcessorFactoryTests extends ESTestCase {
|
|||
|
||||
ElasticsearchException exception = expectThrows(ElasticsearchException.class,
|
||||
() -> factory.create(null, randomAsciiOfLength(10), configMap));
|
||||
assertThat(exception.getMessage(), is("[null] Only one of [file], [id], or [inline] may be configured"));
|
||||
assertThat(exception.getMessage(), is("Only one of [file], [id], or [inline] may be configured"));
|
||||
}
|
||||
|
||||
public void testFactoryValidationAtLeastOneScriptingType() throws Exception {
|
||||
|
@ -66,6 +66,6 @@ public class ScriptProcessorFactoryTests extends ESTestCase {
|
|||
ElasticsearchException exception = expectThrows(ElasticsearchException.class,
|
||||
() -> factory.create(null, randomAsciiOfLength(10), configMap));
|
||||
|
||||
assertThat(exception.getMessage(), is("[null] Need [file], [id], or [inline] parameter to refer to scripts"));
|
||||
assertThat(exception.getMessage(), is("Need [file], [id], or [inline] parameter to refer to scripts"));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,47 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.cluster.metadata;
|
||||
|
||||
import org.elasticsearch.common.SuppressForbidden;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
|
||||
public class EvilSystemPropertyTests extends ESTestCase {
|
||||
|
||||
@SuppressForbidden(reason = "manipulates system properties for testing")
|
||||
public void testMaxNumShards() {
|
||||
IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, () ->
|
||||
IndexMetaData.buildNumberOfShardsSetting()
|
||||
.get(Settings.builder().put("index.number_of_shards", 1025).build()));
|
||||
assertEquals("Failed to parse value [1025] for setting [index.number_of_shards] must be <= 1024", exception.getMessage());
|
||||
|
||||
Integer numShards = IndexMetaData.INDEX_NUMBER_OF_SHARDS_SETTING.get(Settings.builder().put("index.number_of_shards", 100).build());
|
||||
assertEquals(100, numShards.intValue());
|
||||
int limit = randomIntBetween(1, 10);
|
||||
System.setProperty("es.index.max_number_of_shards", Integer.toString(limit));
|
||||
try {
|
||||
IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () ->
|
||||
IndexMetaData.buildNumberOfShardsSetting()
|
||||
.get(Settings.builder().put("index.number_of_shards", 11).build()));
|
||||
assertEquals("Failed to parse value [11] for setting [index.number_of_shards] must be <= " + limit, e.getMessage());
|
||||
} finally {
|
||||
System.clearProperty("es.index.max_number_of_shards");
|
||||
}
|
||||
}
|
||||
}
|
|
@ -21,6 +21,7 @@ package org.elasticsearch.test.test;
|
|||
|
||||
import org.apache.logging.log4j.Level;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.elasticsearch.common.logging.ESLoggerFactory;
|
||||
import org.elasticsearch.common.logging.Loggers;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.elasticsearch.test.junit.annotations.TestLogging;
|
||||
|
@ -50,27 +51,28 @@ public class LoggingListenerTests extends ESTestCase {
|
|||
Logger xyzLogger = Loggers.getLogger("xyz");
|
||||
Logger abcLogger = Loggers.getLogger("abc");
|
||||
|
||||
assertEquals(Level.INFO, abcLogger.getLevel());
|
||||
assertThat(xyzLogger.getLevel(), equalTo(Level.INFO));
|
||||
assertThat(abcLogger.getLevel(), equalTo(Level.INFO));
|
||||
final Level level = ESLoggerFactory.getRootLogger().getLevel();
|
||||
|
||||
assertThat(xyzLogger.getLevel(), equalTo(level));
|
||||
assertThat(abcLogger.getLevel(), equalTo(level));
|
||||
loggingListener.testRunStarted(suiteDescription);
|
||||
assertThat(xyzLogger.getLevel(), equalTo(Level.INFO));
|
||||
assertThat(abcLogger.getLevel(), equalTo(Level.INFO));
|
||||
assertThat(xyzLogger.getLevel(), equalTo(level));
|
||||
assertThat(abcLogger.getLevel(), equalTo(level));
|
||||
|
||||
Method method = TestClass.class.getMethod("annotatedTestMethod");
|
||||
TestLogging annotation = method.getAnnotation(TestLogging.class);
|
||||
Description testDescription = Description.createTestDescription(LoggingListenerTests.class, "annotatedTestMethod", annotation);
|
||||
loggingListener.testStarted(testDescription);
|
||||
assertThat(xyzLogger.getLevel(), equalTo(Level.TRACE));
|
||||
assertThat(abcLogger.getLevel(), equalTo(Level.INFO));
|
||||
assertThat(abcLogger.getLevel(), equalTo(level));
|
||||
|
||||
loggingListener.testFinished(testDescription);
|
||||
assertThat(xyzLogger.getLevel(), equalTo(Level.INFO));
|
||||
assertThat(abcLogger.getLevel(), equalTo(Level.INFO));
|
||||
assertThat(xyzLogger.getLevel(), equalTo(level));
|
||||
assertThat(abcLogger.getLevel(), equalTo(level));
|
||||
|
||||
loggingListener.testRunFinished(new Result());
|
||||
assertThat(xyzLogger.getLevel(), equalTo(Level.INFO));
|
||||
assertThat(abcLogger.getLevel(), equalTo(Level.INFO));
|
||||
assertThat(xyzLogger.getLevel(), equalTo(level));
|
||||
assertThat(abcLogger.getLevel(), equalTo(level));
|
||||
}
|
||||
|
||||
public void testCustomLevelPerClass() throws Exception {
|
||||
|
@ -81,24 +83,26 @@ public class LoggingListenerTests extends ESTestCase {
|
|||
Logger abcLogger = Loggers.getLogger("abc");
|
||||
Logger xyzLogger = Loggers.getLogger("xyz");
|
||||
|
||||
assertThat(xyzLogger.getLevel(), equalTo(Level.INFO));
|
||||
assertThat(abcLogger.getLevel(), equalTo(Level.INFO));
|
||||
final Level level = ESLoggerFactory.getRootLogger().getLevel();
|
||||
|
||||
assertThat(xyzLogger.getLevel(), equalTo(level));
|
||||
assertThat(abcLogger.getLevel(), equalTo(level));
|
||||
loggingListener.testRunStarted(suiteDescription);
|
||||
assertThat(xyzLogger.getLevel(), equalTo(Level.INFO));
|
||||
assertThat(xyzLogger.getLevel(), equalTo(level));
|
||||
assertThat(abcLogger.getLevel(), equalTo(Level.WARN));
|
||||
|
||||
Description testDescription = Description.createTestDescription(LoggingListenerTests.class, "test");
|
||||
loggingListener.testStarted(testDescription);
|
||||
assertThat(xyzLogger.getLevel(), equalTo(Level.INFO));
|
||||
assertThat(xyzLogger.getLevel(), equalTo(level));
|
||||
assertThat(abcLogger.getLevel(), equalTo(Level.WARN));
|
||||
|
||||
loggingListener.testFinished(testDescription);
|
||||
assertThat(xyzLogger.getLevel(), equalTo(Level.INFO));
|
||||
assertThat(xyzLogger.getLevel(), equalTo(level));
|
||||
assertThat(abcLogger.getLevel(), equalTo(Level.WARN));
|
||||
|
||||
loggingListener.testRunFinished(new Result());
|
||||
assertThat(xyzLogger.getLevel(), equalTo(Level.INFO));
|
||||
assertThat(abcLogger.getLevel(), equalTo(Level.INFO));
|
||||
assertThat(xyzLogger.getLevel(), equalTo(level));
|
||||
assertThat(abcLogger.getLevel(), equalTo(level));
|
||||
}
|
||||
|
||||
public void testCustomLevelPerClassAndPerMethod() throws Exception {
|
||||
|
@ -109,10 +113,12 @@ public class LoggingListenerTests extends ESTestCase {
|
|||
Logger abcLogger = Loggers.getLogger("abc");
|
||||
Logger xyzLogger = Loggers.getLogger("xyz");
|
||||
|
||||
assertThat(xyzLogger.getLevel(), equalTo(Level.INFO));
|
||||
assertThat(abcLogger.getLevel(), equalTo(Level.INFO));
|
||||
final Level level = ESLoggerFactory.getRootLogger().getLevel();
|
||||
|
||||
assertThat(xyzLogger.getLevel(), equalTo(level));
|
||||
assertThat(abcLogger.getLevel(), equalTo(level));
|
||||
loggingListener.testRunStarted(suiteDescription);
|
||||
assertThat(xyzLogger.getLevel(), equalTo(Level.INFO));
|
||||
assertThat(xyzLogger.getLevel(), equalTo(level));
|
||||
assertThat(abcLogger.getLevel(), equalTo(Level.WARN));
|
||||
|
||||
Method method = TestClass.class.getMethod("annotatedTestMethod");
|
||||
|
@ -123,7 +129,7 @@ public class LoggingListenerTests extends ESTestCase {
|
|||
assertThat(abcLogger.getLevel(), equalTo(Level.WARN));
|
||||
|
||||
loggingListener.testFinished(testDescription);
|
||||
assertThat(xyzLogger.getLevel(), equalTo(Level.INFO));
|
||||
assertThat(xyzLogger.getLevel(), equalTo(level));
|
||||
assertThat(abcLogger.getLevel(), equalTo(Level.WARN));
|
||||
|
||||
Method method2 = TestClass.class.getMethod("annotatedTestMethod2");
|
||||
|
@ -134,12 +140,12 @@ public class LoggingListenerTests extends ESTestCase {
|
|||
assertThat(abcLogger.getLevel(), equalTo(Level.TRACE));
|
||||
|
||||
loggingListener.testFinished(testDescription2);
|
||||
assertThat(xyzLogger.getLevel(), equalTo(Level.INFO));
|
||||
assertThat(xyzLogger.getLevel(), equalTo(level));
|
||||
assertThat(abcLogger.getLevel(), equalTo(Level.WARN));
|
||||
|
||||
loggingListener.testRunFinished(new Result());
|
||||
assertThat(xyzLogger.getLevel(), equalTo(Level.INFO));
|
||||
assertThat(abcLogger.getLevel(), equalTo(Level.INFO));
|
||||
assertThat(xyzLogger.getLevel(), equalTo(level));
|
||||
assertThat(abcLogger.getLevel(), equalTo(level));
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
Loading…
Reference in New Issue