Add an underscore to flood stage setting

This is a minor nitty bikeshedding change that renames the suffix of the
disk flood stage setting to "flood_stage" from "floodstage".

Relates #25659
This commit is contained in:
Jason Tedor 2017-07-11 22:02:00 -04:00 committed by GitHub
parent 3a827827c1
commit e165c405ac
4 changed files with 7 additions and 7 deletions

View File

@ -314,7 +314,7 @@ class ClusterFormationTasks {
esConfig['cluster.routing.allocation.disk.watermark.low'] = '1b'
esConfig['cluster.routing.allocation.disk.watermark.high'] = '1b'
if (Version.fromString(node.nodeVersion).major >= 6) {
esConfig['cluster.routing.allocation.disk.watermark.floodstage'] = '1b'
esConfig['cluster.routing.allocation.disk.watermark.flood_stage'] = '1b'
}
esConfig.putAll(node.config.settings)

View File

@ -66,7 +66,7 @@ public class DiskThresholdMonitor extends AbstractComponent {
private void warnAboutDiskIfNeeded(DiskUsage usage) {
// Check absolute disk values
if (usage.getFreeBytes() < diskThresholdSettings.getFreeBytesThresholdFloodStage().getBytes()) {
logger.warn("floodstage disk watermark [{}] exceeded on {}, all indices on this node will marked read-only",
logger.warn("flood stage disk watermark [{}] exceeded on {}, all indices on this node will marked read-only",
diskThresholdSettings.getFreeBytesThresholdFloodStage(), usage);
} else if (usage.getFreeBytes() < diskThresholdSettings.getFreeBytesThresholdHigh().getBytes()) {
logger.warn("high disk watermark [{}] exceeded on {}, shards will be relocated away from this node",
@ -78,7 +78,7 @@ public class DiskThresholdMonitor extends AbstractComponent {
// Check percentage disk values
if (usage.getFreeDiskAsPercentage() < diskThresholdSettings.getFreeDiskThresholdHigh()) {
logger.warn("floodstage disk watermark [{}] exceeded on {}, all indices on this node will marked read-only",
logger.warn("flood stage disk watermark [{}] exceeded on {}, all indices on this node will marked read-only",
Strings.format1Decimals(100.0 - diskThresholdSettings.getFreeDiskThresholdFloodStage(), "%"), usage);
} else if (usage.getFreeDiskAsPercentage() < diskThresholdSettings.getFreeDiskThresholdHigh()) {
logger.warn("high disk watermark [{}] exceeded on {}, shards will be relocated away from this node",

View File

@ -50,8 +50,8 @@ public class DiskThresholdSettings {
new HighDiskWatermarkValidator(),
Setting.Property.Dynamic, Setting.Property.NodeScope);
public static final Setting<String> CLUSTER_ROUTING_ALLOCATION_DISK_FLOOD_STAGE_WATERMARK_SETTING =
new Setting<>("cluster.routing.allocation.disk.watermark.floodstage", "95%",
(s) -> validWatermarkSetting(s, "cluster.routing.allocation.disk.watermark.floodstage"),
new Setting<>("cluster.routing.allocation.disk.watermark.flood_stage", "95%",
(s) -> validWatermarkSetting(s, "cluster.routing.allocation.disk.watermark.flood_stage"),
new FloodStageValidator(),
Setting.Property.Dynamic, Setting.Property.NodeScope);
public static final Setting<Boolean> CLUSTER_ROUTING_ALLOCATION_INCLUDE_RELOCATIONS_SETTING =

View File

@ -28,7 +28,7 @@ file or updated dynamically on a live cluster with the
relocate shards once less than the configured amount of space is available on
the node.
`cluster.routing.allocation.disk.watermark.floodstage`::
`cluster.routing.allocation.disk.watermark.flood_stage`::
+
--
Controls the flood stage watermark. It defaults to 95%, meaning ES enforces
@ -91,7 +91,7 @@ PUT _cluster/settings
"transient": {
"cluster.routing.allocation.disk.watermark.low": "100gb",
"cluster.routing.allocation.disk.watermark.high": "50gb",
"cluster.routing.allocation.disk.watermark.floodstage": "10gb",
"cluster.routing.allocation.disk.watermark.flood_stage": "10gb",
"cluster.info.update.interval": "1m"
}
}