Autoscaling decision return absolute capacity (#61575) (#62670)

The autoscaling decision API now returns an absolute capacity,
and leaves the actual decision of whether a scale up or down
is needed to the orchestration system.

The decision API now returns both a tier and node level required
and current capacity as wells as a decider level breakdown of the
same though with in particular current memory still not populated.
This commit is contained in:
Henning Andersen 2020-09-19 09:05:23 +02:00 committed by GitHub
parent 1580fc70bd
commit 2eeb1bddde
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
32 changed files with 1220 additions and 438 deletions

View File

@ -17,7 +17,7 @@ PUT /_autoscaling/policy/my_autoscaling_policy
{
"policy": {
"deciders": {
"always": {
"fixed": {
}
}
}

View File

@ -17,7 +17,7 @@ PUT /_autoscaling/policy/my_autoscaling_policy
{
"policy": {
"deciders": {
"always": {
"fixed": {
}
}
}

View File

@ -17,7 +17,7 @@ PUT /_autoscaling/policy/<name>
{
"policy": {
"deciders": {
"always": {
"fixed": {
}
}
}
@ -59,7 +59,7 @@ PUT /_autoscaling/policy/my_autoscaling_policy
{
"policy": {
"deciders": {
"always": {
"fixed": {
}
}
}

View File

@ -6,7 +6,7 @@
body:
policy:
deciders:
always: {}
fixed: {}
- match: { "acknowledged": true }

View File

@ -6,24 +6,34 @@
- match: { "decisions": [] }
---
"Test get always autoscaling decision":
"Test get fixed autoscaling decision":
- do:
autoscaling.put_autoscaling_policy:
name: my_autoscaling_policy
body:
policy:
deciders:
always: {}
fixed:
storage: 1337b
memory: 7331b
nodes: 10
- match: { "acknowledged": true }
- do:
autoscaling.get_autoscaling_decision: {}
- match: { decisions.0.my_autoscaling_policy.decision: scale_up }
- match: { decisions.0.my_autoscaling_policy.decisions.0.name: always }
- match: { decisions.0.my_autoscaling_policy.decisions.0.type: scale_up }
- match: { decisions.0.my_autoscaling_policy.decisions.0.reason: always }
- match: { decisions.0.tier: my_autoscaling_policy }
- match: { decisions.0.required_capacity.tier.storage: 13370b }
- match: { decisions.0.required_capacity.tier.memory: 73310b }
- match: { decisions.0.required_capacity.node.storage: 1337b }
- match: { decisions.0.required_capacity.node.memory: 7331b }
- match: { decisions.0.decisions.0.name: fixed }
- match: { decisions.0.decisions.0.required_capacity.tier.storage: 13370b }
- match: { decisions.0.decisions.0.required_capacity.tier.memory: 73310b }
- match: { decisions.0.decisions.0.required_capacity.node.storage: 1337b }
- match: { decisions.0.decisions.0.required_capacity.node.memory: 7331b }
- match: { decisions.0.decisions.0.reason_summary: "fixed storage [1.3kb] memory [7.1kb] nodes [10]" }
# test cleanup
- do:

View File

@ -6,7 +6,7 @@
body:
policy:
deciders:
always: {}
fixed: {}
- match: { "acknowledged": true }
@ -14,7 +14,7 @@
autoscaling.get_autoscaling_policy:
name: my_autoscaling_policy
- match: { policy.deciders.always: {} }
- match: { policy.deciders.fixed: {} }
# test cleanup
- do:

View File

@ -6,7 +6,7 @@
body:
policy:
deciders:
always: {}
fixed: {}
- match: { "acknowledged": true }

View File

@ -45,8 +45,9 @@ import org.elasticsearch.xpack.autoscaling.action.TransportDeleteAutoscalingPoli
import org.elasticsearch.xpack.autoscaling.action.TransportGetAutoscalingDecisionAction;
import org.elasticsearch.xpack.autoscaling.action.TransportGetAutoscalingPolicyAction;
import org.elasticsearch.xpack.autoscaling.action.TransportPutAutoscalingPolicyAction;
import org.elasticsearch.xpack.autoscaling.decision.AlwaysAutoscalingDeciderConfiguration;
import org.elasticsearch.xpack.autoscaling.decision.AlwaysAutoscalingDeciderService;
import org.elasticsearch.xpack.autoscaling.decision.AutoscalingDecision;
import org.elasticsearch.xpack.autoscaling.decision.FixedAutoscalingDeciderConfiguration;
import org.elasticsearch.xpack.autoscaling.decision.FixedAutoscalingDeciderService;
import org.elasticsearch.xpack.autoscaling.decision.AutoscalingDeciderConfiguration;
import org.elasticsearch.xpack.autoscaling.decision.AutoscalingDeciderService;
import org.elasticsearch.xpack.autoscaling.decision.AutoscalingDecisionService;
@ -181,8 +182,13 @@ public class Autoscaling extends Plugin implements ActionPlugin, ExtensiblePlugi
new NamedWriteableRegistry.Entry(NamedDiff.class, AutoscalingMetadata.NAME, AutoscalingMetadata.AutoscalingMetadataDiff::new),
new NamedWriteableRegistry.Entry(
AutoscalingDeciderConfiguration.class,
AlwaysAutoscalingDeciderConfiguration.NAME,
AlwaysAutoscalingDeciderConfiguration::new
FixedAutoscalingDeciderConfiguration.NAME,
FixedAutoscalingDeciderConfiguration::new
),
new NamedWriteableRegistry.Entry(
AutoscalingDecision.Reason.class,
FixedAutoscalingDeciderConfiguration.NAME,
FixedAutoscalingDeciderService.FixedReason::new
)
);
}
@ -193,8 +199,8 @@ public class Autoscaling extends Plugin implements ActionPlugin, ExtensiblePlugi
new NamedXContentRegistry.Entry(Metadata.Custom.class, new ParseField(AutoscalingMetadata.NAME), AutoscalingMetadata::parse),
new NamedXContentRegistry.Entry(
AutoscalingDeciderConfiguration.class,
new ParseField(AlwaysAutoscalingDeciderConfiguration.NAME),
AlwaysAutoscalingDeciderConfiguration::parse
new ParseField(FixedAutoscalingDeciderConfiguration.NAME),
FixedAutoscalingDeciderConfiguration::parse
)
);
}
@ -210,7 +216,7 @@ public class Autoscaling extends Plugin implements ActionPlugin, ExtensiblePlugi
@Override
public Collection<AutoscalingDeciderService<? extends AutoscalingDeciderConfiguration>> deciders() {
return org.elasticsearch.common.collect.List.of(new AlwaysAutoscalingDeciderService());
return org.elasticsearch.common.collect.List.of(new FixedAutoscalingDeciderService());
}
public Set<AutoscalingDeciderService<? extends AutoscalingDeciderConfiguration>> createDeciderServices() {

View File

@ -17,7 +17,6 @@ import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.xpack.autoscaling.decision.AutoscalingDecisions;
import java.io.IOException;
import java.util.Map;
import java.util.Objects;
import java.util.SortedMap;
import java.util.TreeMap;
@ -89,12 +88,8 @@ public class GetAutoscalingDecisionAction extends ActionType<GetAutoscalingDecis
{
builder.startArray("decisions");
{
for (final Map.Entry<String, AutoscalingDecisions> decision : decisions.entrySet()) {
builder.startObject();
{
builder.field(decision.getKey(), decision.getValue());
}
builder.endObject();
for (final AutoscalingDecisions decision : decisions.values()) {
decision.toXContent(builder, params);
}
}
builder.endArray();

View File

@ -9,6 +9,7 @@ package org.elasticsearch.xpack.autoscaling.action;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.master.TransportMasterNodeAction;
import org.elasticsearch.cluster.ClusterInfoService;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.block.ClusterBlockException;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
@ -26,6 +27,7 @@ public class TransportGetAutoscalingDecisionAction extends TransportMasterNodeAc
GetAutoscalingDecisionAction.Response> {
private final AutoscalingDecisionService decisionService;
private final ClusterInfoService clusterInfoService;
@Inject
public TransportGetAutoscalingDecisionAction(
@ -34,7 +36,8 @@ public class TransportGetAutoscalingDecisionAction extends TransportMasterNodeAc
final ThreadPool threadPool,
final ActionFilters actionFilters,
final IndexNameExpressionResolver indexNameExpressionResolver,
final AutoscalingDecisionService.Holder decisionServiceHolder
final AutoscalingDecisionService.Holder decisionServiceHolder,
final ClusterInfoService clusterInfoService
) {
super(
GetAutoscalingDecisionAction.NAME,
@ -46,6 +49,7 @@ public class TransportGetAutoscalingDecisionAction extends TransportMasterNodeAc
indexNameExpressionResolver
);
this.decisionService = decisionServiceHolder.get();
this.clusterInfoService = clusterInfoService;
assert this.decisionService != null;
}
@ -65,7 +69,7 @@ public class TransportGetAutoscalingDecisionAction extends TransportMasterNodeAc
final ClusterState state,
final ActionListener<GetAutoscalingDecisionAction.Response> listener
) {
listener.onResponse(new GetAutoscalingDecisionAction.Response(decisionService.decide(state)));
listener.onResponse(new GetAutoscalingDecisionAction.Response(decisionService.decide(state, clusterInfoService.getClusterInfo())));
}
@Override

View File

@ -1,72 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.autoscaling.decision;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.xcontent.ObjectParser;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentParser;
import java.io.IOException;
public class AlwaysAutoscalingDeciderConfiguration implements AutoscalingDeciderConfiguration {
public static final String NAME = "always";
private static final ObjectParser<AlwaysAutoscalingDeciderConfiguration, Void> PARSER = new ObjectParser<>(
NAME,
AlwaysAutoscalingDeciderConfiguration::new
);
public static AlwaysAutoscalingDeciderConfiguration parse(final XContentParser parser) {
return PARSER.apply(parser, null);
}
public AlwaysAutoscalingDeciderConfiguration() {}
@SuppressWarnings("unused")
public AlwaysAutoscalingDeciderConfiguration(final StreamInput in) {
}
@Override
public String name() {
return NAME;
}
@Override
public String getWriteableName() {
return NAME;
}
@Override
public void writeTo(final StreamOutput out) {
}
@Override
public XContentBuilder toXContent(final XContentBuilder builder, final Params params) throws IOException {
builder.startObject();
{}
builder.endObject();
return builder;
}
@Override
public boolean equals(final Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
return true;
}
@Override
public int hashCode() {
return 0;
}
}

View File

@ -1,25 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.autoscaling.decision;
import org.elasticsearch.common.inject.Inject;
public class AlwaysAutoscalingDeciderService implements AutoscalingDeciderService<AlwaysAutoscalingDeciderConfiguration> {
@Inject
public AlwaysAutoscalingDeciderService() {}
@Override
public String name() {
return AlwaysAutoscalingDeciderConfiguration.NAME;
}
@Override
public AutoscalingDecision scale(AlwaysAutoscalingDeciderConfiguration decider, AutoscalingDeciderContext context) {
return new AutoscalingDecision(AlwaysAutoscalingDeciderConfiguration.NAME, AutoscalingDecisionType.SCALE_UP, "always");
}
}

View File

@ -0,0 +1,262 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.autoscaling.decision;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
import java.io.IOException;
import java.util.Objects;
/**
* Represents current/required capacity of a single tier.
*/
public class AutoscalingCapacity implements ToXContent, Writeable {
private final AutoscalingResources tier;
private final AutoscalingResources node;
public static class AutoscalingResources implements ToXContent, Writeable {
private final ByteSizeValue storage;
private final ByteSizeValue memory;
public static final AutoscalingResources ZERO = new AutoscalingResources(new ByteSizeValue(0), new ByteSizeValue(0));
public AutoscalingResources(ByteSizeValue storage, ByteSizeValue memory) {
assert storage != null || memory != null;
this.storage = storage;
this.memory = memory;
}
public AutoscalingResources(StreamInput in) throws IOException {
this.storage = in.readOptionalWriteable(ByteSizeValue::new);
this.memory = in.readOptionalWriteable(ByteSizeValue::new);
}
public ByteSizeValue storage() {
return storage;
}
public ByteSizeValue memory() {
return memory;
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject();
if (storage != null) {
builder.field("storage", storage.getStringRep());
}
if (memory != null) {
builder.field("memory", memory.getStringRep());
}
builder.endObject();
return builder;
}
@Override
public boolean isFragment() {
return false;
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeOptionalWriteable(storage);
out.writeOptionalWriteable(memory);
}
public static AutoscalingResources max(AutoscalingResources sm1, AutoscalingResources sm2) {
if (sm1 == null) {
return sm2;
}
if (sm2 == null) {
return sm1;
}
return new AutoscalingResources(max(sm1.storage, sm2.storage), max(sm1.memory, sm2.memory));
}
public static AutoscalingResources sum(AutoscalingResources sm1, AutoscalingResources sm2) {
if (sm1 == null) {
return sm2;
}
if (sm2 == null) {
return sm1;
}
return new AutoscalingResources(add(sm1.storage, sm2.storage), add(sm1.memory, sm2.memory));
}
private static ByteSizeValue max(ByteSizeValue v1, ByteSizeValue v2) {
if (v1 == null) {
return v2;
}
if (v2 == null) {
return v1;
}
return v1.compareTo(v2) < 0 ? v2 : v1;
}
private static ByteSizeValue add(ByteSizeValue v1, ByteSizeValue v2) {
if (v1 == null) {
return v2;
}
if (v2 == null) {
return v1;
}
return new ByteSizeValue(v1.getBytes() + v2.getBytes());
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
AutoscalingResources that = (AutoscalingResources) o;
return Objects.equals(storage, that.storage) && Objects.equals(memory, that.memory);
}
@Override
public int hashCode() {
return Objects.hash(storage, memory);
}
@Override
public String toString() {
return Strings.toString(this);
}
}
public static final AutoscalingCapacity ZERO = new AutoscalingCapacity(AutoscalingResources.ZERO, AutoscalingResources.ZERO);
public AutoscalingCapacity(AutoscalingResources tier, AutoscalingResources node) {
assert tier != null : "Cannot provide capacity without specifying tier level capacity";
assert node == null || node.memory == null
// implies
|| tier.memory != null : "Cannot provide node memory without tier memory";
assert node == null || node.storage == null
// implies
|| tier.storage != null : "Cannot provide node storage without tier memory";
this.tier = tier;
this.node = node;
}
public AutoscalingCapacity(StreamInput in) throws IOException {
this.tier = new AutoscalingResources(in);
this.node = in.readOptionalWriteable(AutoscalingResources::new);
}
public AutoscalingResources tier() {
return tier;
}
public AutoscalingResources node() {
return node;
}
@Override
public void writeTo(StreamOutput out) throws IOException {
tier.writeTo(out);
out.writeOptionalWriteable(node);
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject();
if (node != null) {
builder.field("node", node);
}
builder.field("tier", tier);
builder.endObject();
return builder;
}
@Override
public boolean isFragment() {
return false;
}
public static AutoscalingCapacity upperBound(AutoscalingCapacity c1, AutoscalingCapacity c2) {
return new AutoscalingCapacity(AutoscalingResources.max(c1.tier, c2.tier), AutoscalingResources.max(c1.node, c2.node));
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
AutoscalingCapacity capacity = (AutoscalingCapacity) o;
return tier.equals(capacity.tier) && Objects.equals(node, capacity.node);
}
@Override
public int hashCode() {
return Objects.hash(tier, node);
}
@Override
public String toString() {
return Strings.toString(this);
}
public static Builder builder() {
return new Builder();
}
public static class Builder {
private AutoscalingResources tier;
private AutoscalingResources node;
public Builder() {}
public Builder capacity(AutoscalingCapacity capacity) {
this.tier = capacity.tier;
this.node = capacity.node;
return this;
}
public Builder tier(Long storage, Long memory) {
return tier(byteSizeValue(storage), byteSizeValue(memory));
}
public Builder tier(ByteSizeValue storage, ByteSizeValue memory) {
return tier(new AutoscalingResources(storage, memory));
}
public Builder tier(AutoscalingResources tier) {
this.tier = tier;
return this;
}
public Builder node(Long storage, Long memory) {
return node(byteSizeValue(storage), byteSizeValue(memory));
}
public Builder node(ByteSizeValue storage, ByteSizeValue memory) {
return node(new AutoscalingResources(storage, memory));
}
public Builder node(AutoscalingResources node) {
this.node = node;
return this;
}
public AutoscalingCapacity build() {
return new AutoscalingCapacity(tier, node);
}
private ByteSizeValue byteSizeValue(Long memory) {
return memory == null ? null : new ByteSizeValue(memory);
}
}
}

View File

@ -10,4 +10,10 @@ import org.elasticsearch.cluster.ClusterState;
public interface AutoscalingDeciderContext {
ClusterState state();
/**
* Return current capacity of tier. Can be null if the capacity of some nodes is unavailable. If a decider relies on this value and
* gets a null current capacity, it should return a decision with a null requiredCapacity (undecided).
*/
AutoscalingCapacity currentCapacity();
}

View File

@ -6,6 +6,7 @@
package org.elasticsearch.xpack.autoscaling.decision;
import org.elasticsearch.common.io.stream.NamedWriteable;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Writeable;
@ -16,70 +17,70 @@ import java.io.IOException;
import java.util.Objects;
/**
* Represents an autoscaling decision.
* Represents an autoscaling decision from a single decider
*/
public class AutoscalingDecision implements ToXContent, Writeable {
private final String name;
private final AutoscalingCapacity requiredCapacity;
private final Reason reason;
public String name() {
return name;
public interface Reason extends ToXContent, NamedWriteable {
String summary();
}
private final AutoscalingDecisionType type;
public AutoscalingDecisionType type() {
return type;
/**
* Create a new decision with required capacity.
* @param requiredCapacity required capacity or null if no decision can be made due to insufficient information.
* @param reason details/data behind the decision
*/
public AutoscalingDecision(AutoscalingCapacity requiredCapacity, Reason reason) {
this.requiredCapacity = requiredCapacity;
this.reason = reason;
}
private final String reason;
public AutoscalingDecision(StreamInput in) throws IOException {
this.requiredCapacity = in.readOptionalWriteable(AutoscalingCapacity::new);
this.reason = in.readOptionalNamedWriteable(Reason.class);
}
public String reason() {
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeOptionalWriteable(requiredCapacity);
out.writeOptionalNamedWriteable(reason);
}
public AutoscalingCapacity requiredCapacity() {
return requiredCapacity;
}
public Reason reason() {
return reason;
}
public AutoscalingDecision(final String name, final AutoscalingDecisionType type, final String reason) {
this.name = Objects.requireNonNull(name);
this.type = Objects.requireNonNull(type);
this.reason = Objects.requireNonNull(reason);
}
public AutoscalingDecision(final StreamInput in) throws IOException {
this.name = in.readString();
this.type = AutoscalingDecisionType.readFrom(in);
this.reason = in.readString();
}
@Override
public void writeTo(final StreamOutput out) throws IOException {
out.writeString(name);
type.writeTo(out);
out.writeString(reason);
}
@Override
public XContentBuilder toXContent(final XContentBuilder builder, final ToXContent.Params params) throws IOException {
builder.startObject();
{
builder.field("name", name);
builder.field("type", type);
builder.field("reason", reason);
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
if (requiredCapacity != null) {
builder.field("required_capacity", requiredCapacity);
}
builder.endObject();
if (reason != null) {
builder.field("reason_summary", reason.summary());
builder.field("reason_details", reason);
}
return builder;
}
@Override
public boolean equals(final Object o) {
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
final AutoscalingDecision that = (AutoscalingDecision) o;
return name.equals(that.name) && type == that.type && reason.equals(that.reason);
AutoscalingDecision that = (AutoscalingDecision) o;
return Objects.equals(requiredCapacity, that.requiredCapacity) && Objects.equals(reason, that.reason);
}
@Override
public int hashCode() {
return Objects.hash(name, type, reason);
return Objects.hash(requiredCapacity, reason);
}
}

View File

@ -7,25 +7,32 @@
package org.elasticsearch.xpack.autoscaling.decision;
import org.apache.lucene.util.SetOnce;
import org.elasticsearch.cluster.ClusterInfo;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.DiskUsage;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.node.DiscoveryNodeRole;
import org.elasticsearch.common.collect.ImmutableOpenMap;
import org.elasticsearch.common.collect.Tuple;
import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.xpack.autoscaling.Autoscaling;
import org.elasticsearch.xpack.autoscaling.AutoscalingMetadata;
import org.elasticsearch.xpack.autoscaling.policy.AutoscalingPolicy;
import java.util.Collection;
import java.util.Map;
import java.util.Objects;
import java.util.Set;
import java.util.SortedMap;
import java.util.TreeMap;
import java.util.function.Function;
import java.util.stream.Collectors;
import java.util.stream.StreamSupport;
public class AutoscalingDecisionService {
private Map<String, AutoscalingDeciderService<? extends AutoscalingDeciderConfiguration>> deciderByName;
public AutoscalingDecisionService(Set<AutoscalingDeciderService<? extends AutoscalingDeciderConfiguration>> deciders) {
assert deciders.size() >= 1; // always have always
assert deciders.size() >= 1; // always have fixed
this.deciderByName = deciders.stream().collect(Collectors.toMap(AutoscalingDeciderService::name, Function.identity()));
}
@ -49,8 +56,7 @@ public class AutoscalingDecisionService {
}
}
public SortedMap<String, AutoscalingDecisions> decide(ClusterState state) {
AutoscalingDeciderContext context = () -> state;
public SortedMap<String, AutoscalingDecisions> decide(ClusterState state, ClusterInfo clusterInfo) {
AutoscalingMetadata autoscalingMetadata = state.metadata().custom(AutoscalingMetadata.NAME);
if (autoscalingMetadata != null) {
@ -58,7 +64,7 @@ public class AutoscalingDecisionService {
autoscalingMetadata.policies()
.entrySet()
.stream()
.map(e -> Tuple.tuple(e.getKey(), getDecision(e.getValue().policy(), context)))
.map(e -> Tuple.tuple(e.getKey(), getDecision(e.getValue().policy(), state, clusterInfo)))
.collect(Collectors.toMap(Tuple::v1, Tuple::v2))
);
} else {
@ -66,13 +72,14 @@ public class AutoscalingDecisionService {
}
}
private AutoscalingDecisions getDecision(AutoscalingPolicy policy, AutoscalingDeciderContext context) {
Collection<AutoscalingDecision> decisions = policy.deciders()
.values()
private AutoscalingDecisions getDecision(AutoscalingPolicy policy, ClusterState state, ClusterInfo clusterInfo) {
DecisionAutoscalingDeciderContext context = new DecisionAutoscalingDeciderContext(policy.name(), state, clusterInfo);
SortedMap<String, AutoscalingDecision> decisions = policy.deciders()
.entrySet()
.stream()
.map(decider -> getDecision(decider, context))
.collect(Collectors.toList());
return new AutoscalingDecisions(decisions);
.map(entry -> Tuple.tuple(entry.getKey(), getDecision(entry.getValue(), context)))
.collect(Collectors.toMap(Tuple::v1, Tuple::v2, (a, b) -> { throw new UnsupportedOperationException(); }, TreeMap::new));
return new AutoscalingDecisions(context.tier, context.currentCapacity, decisions);
}
private <T extends AutoscalingDeciderConfiguration> AutoscalingDecision getDecision(T decider, AutoscalingDeciderContext context) {
@ -81,4 +88,85 @@ public class AutoscalingDecisionService {
AutoscalingDeciderService<T> service = (AutoscalingDeciderService<T>) deciderByName.get(decider.name());
return service.scale(decider, context);
}
static class DecisionAutoscalingDeciderContext implements AutoscalingDeciderContext {
private final String tier;
private final ClusterState state;
private final ClusterInfo clusterInfo;
private final AutoscalingCapacity currentCapacity;
private final boolean currentCapacityAccurate;
DecisionAutoscalingDeciderContext(String tier, ClusterState state, ClusterInfo clusterInfo) {
this.tier = tier;
Objects.requireNonNull(state);
Objects.requireNonNull(clusterInfo);
this.state = state;
this.clusterInfo = clusterInfo;
this.currentCapacity = calculateCurrentCapacity();
this.currentCapacityAccurate = calculateCurrentCapacityAccurate();
}
@Override
public ClusterState state() {
return state;
}
@Override
public AutoscalingCapacity currentCapacity() {
if (currentCapacityAccurate) {
return currentCapacity;
} else {
return null;
}
}
private boolean calculateCurrentCapacityAccurate() {
return StreamSupport.stream(state.nodes().spliterator(), false)
.filter(this::informalTierFilter)
.allMatch(this::nodeHasAccurateCapacity);
}
private boolean nodeHasAccurateCapacity(DiscoveryNode node) {
return totalStorage(clusterInfo.getNodeLeastAvailableDiskUsages(), node) >= 0
&& totalStorage(clusterInfo.getNodeMostAvailableDiskUsages(), node) >= 0;
}
private AutoscalingCapacity calculateCurrentCapacity() {
return StreamSupport.stream(state.nodes().spliterator(), false)
.filter(this::informalTierFilter)
.map(this::resourcesFor)
.map(c -> new AutoscalingCapacity(c, c))
.reduce(
(c1, c2) -> new AutoscalingCapacity(
AutoscalingCapacity.AutoscalingResources.sum(c1.tier(), c2.tier()),
AutoscalingCapacity.AutoscalingResources.max(c1.node(), c2.node())
)
)
.orElse(AutoscalingCapacity.ZERO);
}
private AutoscalingCapacity.AutoscalingResources resourcesFor(DiscoveryNode node) {
long storage = Math.max(
totalStorage(clusterInfo.getNodeLeastAvailableDiskUsages(), node),
totalStorage(clusterInfo.getNodeMostAvailableDiskUsages(), node)
);
// todo: also capture memory across cluster.
return new AutoscalingCapacity.AutoscalingResources(
storage == -1 ? ByteSizeValue.ZERO : new ByteSizeValue(storage),
ByteSizeValue.ZERO
);
}
private long totalStorage(ImmutableOpenMap<String, DiskUsage> diskUsages, DiscoveryNode node) {
DiskUsage diskUsage = diskUsages.get(node.getId());
return diskUsage != null ? diskUsage.getTotalBytes() : -1;
}
private boolean informalTierFilter(DiscoveryNode discoveryNode) {
return discoveryNode.getRoles().stream().map(DiscoveryNodeRole::roleName).anyMatch(tier::equals)
|| tier.equals(discoveryNode.getAttributes().get("data"));
}
}
}

View File

@ -1,73 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.autoscaling.decision;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.common.xcontent.ToXContentFragment;
import org.elasticsearch.common.xcontent.XContentBuilder;
import java.io.IOException;
import java.util.Locale;
/**
* Represents the type of an autoscaling decision: to indicating if a scale down, no scaling event, or a scale up is needed.
*/
public enum AutoscalingDecisionType implements Writeable, ToXContentFragment {
/**
* Indicates that a scale down event is needed.
*/
SCALE_DOWN((byte) 0),
/**
* Indicates that no scaling event is needed.
*/
NO_SCALE((byte) 1),
/**
* Indicates that a scale up event is needed.
*/
SCALE_UP((byte) 2);
private final byte id;
byte id() {
return id;
}
AutoscalingDecisionType(final byte id) {
this.id = id;
}
public static AutoscalingDecisionType readFrom(final StreamInput in) throws IOException {
final byte id = in.readByte();
switch (id) {
case 0:
return SCALE_DOWN;
case 1:
return NO_SCALE;
case 2:
return SCALE_UP;
default:
throw new IllegalArgumentException("unexpected value [" + id + "] for autoscaling decision type");
}
}
@Override
public void writeTo(final StreamOutput out) throws IOException {
out.writeByte(id);
}
@Override
public XContentBuilder toXContent(final XContentBuilder builder, final Params params) throws IOException {
builder.value(name().toLowerCase(Locale.ROOT));
return builder;
}
}

View File

@ -13,21 +13,37 @@ import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
import java.io.IOException;
import java.util.Collection;
import java.util.Map;
import java.util.Objects;
import java.util.Optional;
import java.util.SortedMap;
import java.util.TreeMap;
/**
* Represents a collection of individual autoscaling decisions that can be aggregated into a single autoscaling decision.
* Represents a collection of individual autoscaling decisions that can be aggregated into a single autoscaling decision for a tier
*/
public class AutoscalingDecisions implements ToXContent, Writeable {
private final Collection<AutoscalingDecision> decisions;
private final String tier;
private final AutoscalingCapacity currentCapacity;
private final SortedMap<String, AutoscalingDecision> decisions;
public Collection<AutoscalingDecision> decisions() {
/**
* Return map of decisions, keyed by decider name.
*/
public Map<String, AutoscalingDecision> decisions() {
return decisions;
}
public AutoscalingDecisions(final Collection<AutoscalingDecision> decisions) {
public AutoscalingDecisions(
final String tier,
final AutoscalingCapacity currentCapacity,
final SortedMap<String, AutoscalingDecision> decisions
) {
Objects.requireNonNull(tier);
Objects.requireNonNull(currentCapacity);
this.tier = tier;
this.currentCapacity = currentCapacity;
Objects.requireNonNull(decisions);
if (decisions.isEmpty()) {
throw new IllegalArgumentException("decisions can not be empty");
@ -36,47 +52,70 @@ public class AutoscalingDecisions implements ToXContent, Writeable {
}
public AutoscalingDecisions(final StreamInput in) throws IOException {
this.decisions = in.readList(AutoscalingDecision::new);
this.tier = in.readString();
this.currentCapacity = new AutoscalingCapacity(in);
this.decisions = new TreeMap<>(in.readMap(StreamInput::readString, AutoscalingDecision::new));
}
@Override
public void writeTo(final StreamOutput out) throws IOException {
out.writeCollection(decisions);
out.writeString(tier);
currentCapacity.writeTo(out);
out.writeMap(decisions, StreamOutput::writeString, (output, decision) -> decision.writeTo(output));
}
@Override
public XContentBuilder toXContent(final XContentBuilder builder, final Params params) throws IOException {
builder.startObject();
builder.field("decision", type());
builder.array("decisions", decisions.toArray());
builder.field("tier", tier);
AutoscalingCapacity requiredCapacity = requiredCapacity();
if (requiredCapacity != null) {
builder.field("required_capacity", requiredCapacity);
}
builder.field("current_capacity", currentCapacity);
builder.startArray("decisions");
for (Map.Entry<String, AutoscalingDecision> entry : decisions.entrySet()) {
builder.startObject();
builder.field("name", entry.getKey());
entry.getValue().toXContent(builder, params);
builder.endObject();
}
builder.endArray();
builder.endObject();
return builder;
}
public AutoscalingDecisionType type() {
if (decisions.stream().anyMatch(p -> p.type() == AutoscalingDecisionType.SCALE_UP)) {
// if any deciders say to scale up
return AutoscalingDecisionType.SCALE_UP;
} else if (decisions.stream().allMatch(p -> p.type() == AutoscalingDecisionType.SCALE_DOWN)) {
// if all deciders say to scale down
return AutoscalingDecisionType.SCALE_DOWN;
} else {
// otherwise, do not scale
return AutoscalingDecisionType.NO_SCALE;
public AutoscalingCapacity requiredCapacity() {
if (decisions.values().stream().map(AutoscalingDecision::requiredCapacity).anyMatch(Objects::isNull)) {
// any undetermined decider cancels out any decision making.
return null;
}
Optional<AutoscalingCapacity> result = decisions.values()
.stream()
.map(AutoscalingDecision::requiredCapacity)
.reduce(AutoscalingCapacity::upperBound);
assert result.isPresent();
return result.get();
}
public AutoscalingCapacity currentCapacity() {
return currentCapacity;
}
public String tier() {
return tier;
}
@Override
public boolean equals(final Object o) {
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
final AutoscalingDecisions that = (AutoscalingDecisions) o;
return decisions.equals(that.decisions);
AutoscalingDecisions that = (AutoscalingDecisions) o;
return tier.equals(that.tier) && currentCapacity.equals(that.currentCapacity) && decisions.equals(that.decisions);
}
@Override
public int hashCode() {
return Objects.hash(decisions);
return Objects.hash(tier, currentCapacity, decisions);
}
}

View File

@ -0,0 +1,133 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.autoscaling.decision;
import org.elasticsearch.common.ParseField;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.common.xcontent.ConstructingObjectParser;
import org.elasticsearch.common.xcontent.ObjectParser;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentParser;
import java.io.IOException;
import java.util.Objects;
public class FixedAutoscalingDeciderConfiguration implements AutoscalingDeciderConfiguration {
public static final String NAME = "fixed";
private static final ConstructingObjectParser<FixedAutoscalingDeciderConfiguration, Void> PARSER = new ConstructingObjectParser<>(
NAME,
c -> new FixedAutoscalingDeciderConfiguration((ByteSizeValue) c[0], (ByteSizeValue) c[1], (Integer) c[2])
);
private static final ParseField STORAGE = new ParseField("storage");
private static final ParseField MEMORY = new ParseField("memory");
private static final ParseField NODES = new ParseField("nodes");
static {
PARSER.declareField(
ConstructingObjectParser.optionalConstructorArg(),
(p, c) -> ByteSizeValue.parseBytesSizeValue(p.textOrNull(), STORAGE.getPreferredName()),
STORAGE,
ObjectParser.ValueType.VALUE
);
PARSER.declareField(
ConstructingObjectParser.optionalConstructorArg(),
(p, c) -> ByteSizeValue.parseBytesSizeValue(p.textOrNull(), MEMORY.getPreferredName()),
MEMORY,
ObjectParser.ValueType.VALUE
);
PARSER.declareInt(ConstructingObjectParser.optionalConstructorArg(), NODES);
}
public static FixedAutoscalingDeciderConfiguration parse(final XContentParser parser) {
return PARSER.apply(parser, null);
}
private final ByteSizeValue storage;
private final ByteSizeValue memory;
private final Integer nodes;
public FixedAutoscalingDeciderConfiguration() {
this(null, null, 1);
}
public FixedAutoscalingDeciderConfiguration(ByteSizeValue storage, ByteSizeValue memory, Integer nodes) {
this.storage = storage;
this.memory = memory;
this.nodes = nodes;
}
@SuppressWarnings("unused")
public FixedAutoscalingDeciderConfiguration(final StreamInput in) throws IOException {
this.storage = in.readOptionalWriteable(ByteSizeValue::new);
this.memory = in.readOptionalWriteable(ByteSizeValue::new);
this.nodes = in.readOptionalInt();
}
public ByteSizeValue storage() {
return storage;
}
public ByteSizeValue memory() {
return memory;
}
public Integer nodes() {
return nodes;
}
@Override
public String name() {
return NAME;
}
@Override
public String getWriteableName() {
return NAME;
}
@Override
public void writeTo(final StreamOutput out) throws IOException {
out.writeOptionalWriteable(storage);
out.writeOptionalWriteable(memory);
out.writeOptionalInt(nodes);
}
@Override
public XContentBuilder toXContent(final XContentBuilder builder, final Params params) throws IOException {
builder.startObject();
{
if (storage != null) {
builder.field("storage", storage.getStringRep());
}
if (memory != null) {
builder.field("memory", memory.getStringRep());
}
if (nodes != null) {
builder.field("nodes", nodes);
}
}
builder.endObject();
return builder;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
FixedAutoscalingDeciderConfiguration that = (FixedAutoscalingDeciderConfiguration) o;
return Objects.equals(storage, that.storage) && Objects.equals(memory, that.memory) && Objects.equals(nodes, that.nodes);
}
@Override
public int hashCode() {
return Objects.hash(storage, memory, nodes);
}
}

View File

@ -0,0 +1,110 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.autoscaling.decision;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.common.xcontent.XContentBuilder;
import java.io.IOException;
import java.util.Objects;
public class FixedAutoscalingDeciderService implements AutoscalingDeciderService<FixedAutoscalingDeciderConfiguration> {
@Inject
public FixedAutoscalingDeciderService() {}
@Override
public String name() {
return FixedAutoscalingDeciderConfiguration.NAME;
}
@Override
public AutoscalingDecision scale(FixedAutoscalingDeciderConfiguration configuration, AutoscalingDeciderContext context) {
int nodes = configuration.nodes() != null ? configuration.nodes() : 1;
AutoscalingCapacity requiredCapacity;
if (configuration.storage() != null || configuration.memory() != null) {
requiredCapacity = AutoscalingCapacity.builder()
.tier(tierCapacity(configuration.storage(), nodes), tierCapacity(configuration.memory(), nodes))
.node(configuration.storage(), configuration.memory())
.build();
} else {
requiredCapacity = null;
}
return new AutoscalingDecision(requiredCapacity, new FixedReason(configuration.storage(), configuration.memory(), nodes));
}
private static ByteSizeValue tierCapacity(ByteSizeValue nodeCapacity, int nodes) {
if (nodeCapacity != null) {
return new ByteSizeValue(nodeCapacity.getBytes() * nodes);
} else {
return null;
}
}
public static class FixedReason implements AutoscalingDecision.Reason {
private final ByteSizeValue storage;
private final ByteSizeValue memory;
private final int nodes;
public FixedReason(ByteSizeValue storage, ByteSizeValue memory, int nodes) {
this.storage = storage;
this.memory = memory;
this.nodes = nodes;
}
public FixedReason(StreamInput in) throws IOException {
this.storage = in.readOptionalWriteable(ByteSizeValue::new);
this.memory = in.readOptionalWriteable(ByteSizeValue::new);
this.nodes = in.readInt();
}
@Override
public String summary() {
return "fixed storage [" + storage + "] memory [" + memory + "] nodes [" + nodes + "]";
}
@Override
public String getWriteableName() {
return FixedAutoscalingDeciderConfiguration.NAME;
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeOptionalWriteable(storage);
out.writeOptionalWriteable(memory);
out.writeInt(nodes);
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject();
builder.field("storage", storage);
builder.field("memory", memory);
builder.field("nodes", nodes);
builder.endObject();
return builder;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
FixedReason that = (FixedReason) o;
return nodes == that.nodes && Objects.equals(storage, that.storage) && Objects.equals(memory, that.memory);
}
@Override
public int hashCode() {
return Objects.hash(storage, memory, nodes);
}
}
}

View File

@ -6,76 +6,105 @@
package org.elasticsearch.xpack.autoscaling;
import org.elasticsearch.common.Randomness;
import org.elasticsearch.common.collect.Tuple;
import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.common.xcontent.NamedXContentRegistry;
import org.elasticsearch.test.ESTestCase;
import org.elasticsearch.xpack.autoscaling.decision.AlwaysAutoscalingDeciderConfiguration;
import org.elasticsearch.xpack.autoscaling.decision.AutoscalingCapacity;
import org.elasticsearch.xpack.autoscaling.decision.FixedAutoscalingDeciderConfiguration;
import org.elasticsearch.xpack.autoscaling.decision.AutoscalingDeciderConfiguration;
import org.elasticsearch.xpack.autoscaling.decision.AutoscalingDecision;
import org.elasticsearch.xpack.autoscaling.decision.AutoscalingDecisionType;
import org.elasticsearch.xpack.autoscaling.decision.AutoscalingDecisions;
import org.elasticsearch.xpack.autoscaling.decision.FixedAutoscalingDeciderService;
import org.elasticsearch.xpack.autoscaling.policy.AutoscalingPolicy;
import org.elasticsearch.xpack.autoscaling.policy.AutoscalingPolicyMetadata;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.SortedMap;
import java.util.TreeMap;
import java.util.function.Function;
import java.util.stream.Collectors;
import java.util.stream.IntStream;
public abstract class AutoscalingTestCase extends ESTestCase {
public static AutoscalingDecision randomAutoscalingDecision() {
return randomAutoscalingDecisionOfType(randomFrom(AutoscalingDecisionType.values()));
AutoscalingCapacity capacity = randomNullableAutoscalingCapacity();
return randomAutoscalingDecisionWithCapacity(capacity);
}
public static AutoscalingDecision randomAutoscalingDecisionOfType(final AutoscalingDecisionType type) {
return new AutoscalingDecision(randomAlphaOfLength(8), type, randomAlphaOfLength(8));
protected static AutoscalingDecision randomAutoscalingDecisionWithCapacity(AutoscalingCapacity capacity) {
return new AutoscalingDecision(
capacity,
new FixedAutoscalingDeciderService.FixedReason(randomNullableByteSizeValue(), randomNullableByteSizeValue(), randomInt(1000))
);
}
public static AutoscalingDecisions randomAutoscalingDecisions() {
final int numberOfDecisions = 1 + randomIntBetween(1, 8);
final List<AutoscalingDecision> decisions = new ArrayList<>(numberOfDecisions);
for (int i = 0; i < numberOfDecisions; i++) {
decisions.add(randomAutoscalingDecisionOfType(AutoscalingDecisionType.SCALE_DOWN));
}
final int numberOfDownDecisions = randomIntBetween(0, 8);
final int numberOfNoDecisions = randomIntBetween(0, 8);
final int numberOfUpDecisions = randomIntBetween(numberOfDownDecisions + numberOfNoDecisions == 0 ? 1 : 0, 8);
return randomAutoscalingDecisions(numberOfDownDecisions, numberOfNoDecisions, numberOfUpDecisions);
final SortedMap<String, AutoscalingDecision> decisions = IntStream.range(0, randomIntBetween(1, 10))
.mapToObj(i -> Tuple.tuple(Integer.toString(i), randomAutoscalingDecision()))
.collect(Collectors.toMap(Tuple::v1, Tuple::v2, (a, b) -> { throw new IllegalStateException(); }, TreeMap::new));
AutoscalingCapacity capacity = new AutoscalingCapacity(randomAutoscalingResources(), randomAutoscalingResources());
return new AutoscalingDecisions(randomAlphaOfLength(10), capacity, decisions);
}
public static AutoscalingDecisions randomAutoscalingDecisions(
final int numberOfDownDecisions,
final int numberOfNoDecisions,
final int numberOfUpDecisions
) {
final List<AutoscalingDecision> decisions = new ArrayList<>(numberOfDownDecisions + numberOfNoDecisions + numberOfUpDecisions);
for (int i = 0; i < numberOfDownDecisions; i++) {
decisions.add(randomAutoscalingDecisionOfType(AutoscalingDecisionType.SCALE_DOWN));
}
for (int i = 0; i < numberOfNoDecisions; i++) {
decisions.add(randomAutoscalingDecisionOfType(AutoscalingDecisionType.NO_SCALE));
}
for (int i = 0; i < numberOfUpDecisions; i++) {
decisions.add(randomAutoscalingDecisionOfType(AutoscalingDecisionType.SCALE_UP));
}
Randomness.shuffle(decisions);
return new AutoscalingDecisions(decisions);
public static AutoscalingCapacity randomAutoscalingCapacity() {
AutoscalingCapacity.AutoscalingResources tier = randomNullValueAutoscalingResources();
return new AutoscalingCapacity(
tier,
randomBoolean() ? randomNullValueAutoscalingResources(tier.storage() != null, tier.memory() != null) : null
);
}
protected static AutoscalingCapacity randomNullableAutoscalingCapacity() {
return randomBoolean() ? randomAutoscalingCapacity() : null;
}
protected static AutoscalingCapacity.AutoscalingResources randomAutoscalingResources() {
return new AutoscalingCapacity.AutoscalingResources(randomByteSizeValue(), randomByteSizeValue());
}
private static AutoscalingCapacity.AutoscalingResources randomNullValueAutoscalingResources() {
return randomNullValueAutoscalingResources(true, true);
}
public static AutoscalingCapacity.AutoscalingResources randomNullValueAutoscalingResources(boolean allowStorage, boolean allowMemory) {
assert allowMemory || allowStorage;
boolean addStorage = (allowStorage && randomBoolean()) || allowMemory == false;
boolean addMemory = (allowMemory && randomBoolean()) || addStorage == false;
return new AutoscalingCapacity.AutoscalingResources(
addStorage ? randomByteSizeValue() : null,
addMemory ? randomByteSizeValue() : null
);
}
public static ByteSizeValue randomByteSizeValue() {
// do not want to test any overflow.
return new ByteSizeValue(randomLongBetween(0, Long.MAX_VALUE >> 16));
}
public static ByteSizeValue randomNullableByteSizeValue() {
return randomBoolean() ? randomByteSizeValue() : null;
}
public static SortedMap<String, AutoscalingDeciderConfiguration> randomAutoscalingDeciders() {
return new TreeMap<>(
org.elasticsearch.common.collect.List.of(new AlwaysAutoscalingDeciderConfiguration())
org.elasticsearch.common.collect.List.of(randomFixedDecider())
.stream()
.collect(Collectors.toMap(AutoscalingDeciderConfiguration::name, Function.identity()))
);
}
public static FixedAutoscalingDeciderConfiguration randomFixedDecider() {
return new FixedAutoscalingDeciderConfiguration(
randomNullableByteSizeValue(),
randomNullableByteSizeValue(),
randomFrom(randomInt(1000), null)
);
}
public static AutoscalingPolicy randomAutoscalingPolicy() {
return randomAutoscalingPolicyOfName(randomAlphaOfLength(8));
}
@ -130,5 +159,4 @@ public abstract class AutoscalingTestCase extends ESTestCase {
public static NamedXContentRegistry getAutoscalingXContentRegistry() {
return new NamedXContentRegistry(new Autoscaling(Settings.EMPTY).getNamedXContent());
}
}

View File

@ -44,9 +44,7 @@ public class GetAutoscalingDecisionActionResponseTests extends AutoscalingTestCa
expected.startObject();
expected.startArray("decisions");
for (Map.Entry<String, AutoscalingDecisions> entry : decisions.entrySet()) {
expected.startObject();
expected.field(entry.getKey(), entry.getValue());
expected.endObject();
entry.getValue().toXContent(expected, null);
}
expected.endArray();
expected.endObject();

View File

@ -6,8 +6,10 @@
package org.elasticsearch.xpack.autoscaling.action;
import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.test.AbstractWireSerializingTestCase;
import org.elasticsearch.xpack.autoscaling.AutoscalingTestCase;
import org.elasticsearch.xpack.autoscaling.decision.AutoscalingDecisions;
import java.util.Collections;
@ -19,6 +21,11 @@ import static org.elasticsearch.xpack.autoscaling.AutoscalingTestCase.randomAuto
public class GetAutoscalingDecisionActionResponseWireSerializingTests extends AbstractWireSerializingTestCase<
GetAutoscalingDecisionAction.Response> {
@Override
protected NamedWriteableRegistry getNamedWriteableRegistry() {
return AutoscalingTestCase.getAutoscalingNamedWriteableRegistry();
}
@Override
protected Writeable.Reader<GetAutoscalingDecisionAction.Response> instanceReader() {
return GetAutoscalingDecisionAction.Response::new;

View File

@ -0,0 +1,75 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.autoscaling.decision;
import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.test.AbstractWireSerializingTestCase;
import org.elasticsearch.xpack.autoscaling.AutoscalingTestCase;
import java.io.IOException;
public class AutoscalingCapacityWireSerializationTests extends AbstractWireSerializingTestCase<AutoscalingCapacity> {
@Override
protected Writeable.Reader<AutoscalingCapacity> instanceReader() {
return AutoscalingCapacity::new;
}
@Override
protected AutoscalingCapacity createTestInstance() {
return AutoscalingTestCase.randomAutoscalingCapacity();
}
@Override
protected AutoscalingCapacity mutateInstance(AutoscalingCapacity instance) throws IOException {
AutoscalingCapacity.Builder builder = AutoscalingCapacity.builder().capacity(instance);
if (randomBoolean()) {
// mutate tier
boolean hasBothStorageAndMemory = instance.tier().memory() != null && instance.tier().storage() != null;
if (randomBoolean()) {
builder.tier(
randomByteSize(
hasBothStorageAndMemory && (instance.node() == null || instance.node().storage() == null),
instance.tier().storage()
),
instance.tier().memory()
);
} else {
builder.tier(
instance.tier().storage(),
randomByteSize(
hasBothStorageAndMemory && (instance.node() == null || instance.node().memory() == null),
instance.tier().memory()
)
);
}
} else {
// mutate node
if (instance.node() == null) {
builder.node(
AutoscalingTestCase.randomNullValueAutoscalingResources(
instance.tier().storage() != null,
instance.tier().memory() != null
)
);
} else if (randomBoolean() && instance.tier().storage() != null || instance.tier().memory() == null) {
builder.node(randomByteSize(instance.node().memory() != null, instance.node().storage()), instance.node().memory());
} else {
builder.node(instance.node().storage(), randomByteSize(instance.node().storage() != null, instance.node().memory()));
}
}
return builder.build();
}
private static ByteSizeValue randomByteSize(boolean allowNull, ByteSizeValue original) {
return randomValueOtherThan(
original,
allowNull ? AutoscalingTestCase::randomNullableByteSizeValue : AutoscalingTestCase::randomByteSizeValue
);
}
}

View File

@ -6,60 +6,153 @@
package org.elasticsearch.xpack.autoscaling.decision;
import org.elasticsearch.Version;
import org.elasticsearch.cluster.ClusterInfo;
import org.elasticsearch.cluster.ClusterName;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.DiskUsage;
import org.elasticsearch.cluster.metadata.Metadata;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.node.DiscoveryNodes;
import org.elasticsearch.common.collect.ImmutableOpenMap;
import org.elasticsearch.common.collect.Tuple;
import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.xpack.autoscaling.AutoscalingMetadata;
import org.elasticsearch.xpack.autoscaling.AutoscalingTestCase;
import org.elasticsearch.xpack.autoscaling.policy.AutoscalingPolicy;
import org.elasticsearch.xpack.autoscaling.policy.AutoscalingPolicyMetadata;
import org.hamcrest.Matchers;
import java.util.Map;
import java.util.Set;
import java.util.SortedMap;
import java.util.TreeMap;
import java.util.stream.Collectors;
import java.util.stream.IntStream;
import static org.hamcrest.Matchers.equalTo;
public class AutoscalingDecisionServiceTests extends AutoscalingTestCase {
public void testAlwaysDecision() {
public void testMultiplePoliciesFixedDecision() {
AutoscalingDecisionService service = new AutoscalingDecisionService(
org.elasticsearch.common.collect.Set.of(new AlwaysAutoscalingDeciderService())
org.elasticsearch.common.collect.Set.of(new FixedAutoscalingDeciderService())
);
Set<String> policyNames = IntStream.range(0, randomIntBetween(1, 10))
.mapToObj(i -> randomAlphaOfLength(10))
.mapToObj(i -> "test_ " + randomAlphaOfLength(10))
.collect(Collectors.toSet());
SortedMap<String, AutoscalingDeciderConfiguration> deciders = new TreeMap<>(
org.elasticsearch.common.collect.Map.of(AlwaysAutoscalingDeciderConfiguration.NAME, new AlwaysAutoscalingDeciderConfiguration())
);
SortedMap<String, AutoscalingPolicyMetadata> policies = new TreeMap<>(
policyNames.stream()
.map(s -> Tuple.tuple(s, new AutoscalingPolicyMetadata(new AutoscalingPolicy(s, deciders))))
.map(s -> Tuple.tuple(s, new AutoscalingPolicyMetadata(new AutoscalingPolicy(s, randomFixedDeciders()))))
.collect(Collectors.toMap(Tuple::v1, Tuple::v2))
);
ClusterState state = ClusterState.builder(ClusterName.DEFAULT)
.metadata(Metadata.builder().putCustom(AutoscalingMetadata.NAME, new AutoscalingMetadata(policies)))
.build();
SortedMap<String, AutoscalingDecisions> decisions = service.decide(state);
SortedMap<String, AutoscalingDecisions> expected = new TreeMap<>(
policyNames.stream()
.map(
s -> Tuple.tuple(
s,
new AutoscalingDecisions(
org.elasticsearch.common.collect.List.of(
new AutoscalingDecision(
AlwaysAutoscalingDeciderConfiguration.NAME,
AutoscalingDecisionType.SCALE_UP,
"always"
)
)
)
)
SortedMap<String, AutoscalingDecisions> decisions = service.decide(state, new ClusterInfo() {
});
assertThat(decisions.keySet(), equalTo(policyNames));
for (Map.Entry<String, AutoscalingDecisions> entry : decisions.entrySet()) {
AutoscalingDecisions decision = entry.getValue();
assertThat(decision.tier(), equalTo(entry.getKey()));
SortedMap<String, AutoscalingDeciderConfiguration> deciders = policies.get(decision.tier()).policy().deciders();
assertThat(deciders.size(), equalTo(1));
FixedAutoscalingDeciderConfiguration configuration = (FixedAutoscalingDeciderConfiguration) deciders.values().iterator().next();
AutoscalingCapacity requiredCapacity = calculateFixedDecisionCapacity(configuration);
assertThat(decision.requiredCapacity(), equalTo(requiredCapacity));
assertThat(decision.decisions().size(), equalTo(1));
AutoscalingDecision deciderDecision = decision.decisions().get(deciders.firstKey());
assertNotNull(deciderDecision);
assertThat(deciderDecision.requiredCapacity(), equalTo(requiredCapacity));
ByteSizeValue storage = configuration.storage();
ByteSizeValue memory = configuration.memory();
int nodes = configuration.nodes();
assertThat(deciderDecision.reason(), equalTo(new FixedAutoscalingDeciderService.FixedReason(storage, memory, nodes)));
assertThat(
deciderDecision.reason().summary(),
equalTo("fixed storage [" + storage + "] memory [" + memory + "] nodes [" + nodes + "]")
);
// there is no nodes in any tier.
assertThat(decision.currentCapacity(), equalTo(AutoscalingCapacity.ZERO));
}
}
private SortedMap<String, AutoscalingDeciderConfiguration> randomFixedDeciders() {
return new TreeMap<>(
org.elasticsearch.common.collect.Map.of(
FixedAutoscalingDeciderConfiguration.NAME,
new FixedAutoscalingDeciderConfiguration(
randomNullableByteSizeValue(),
randomNullableByteSizeValue(),
randomIntBetween(1, 10)
)
.collect(Collectors.toMap(Tuple::v1, Tuple::v2))
)
);
assertThat(decisions, Matchers.equalTo(expected));
}
private AutoscalingCapacity calculateFixedDecisionCapacity(FixedAutoscalingDeciderConfiguration configuration) {
ByteSizeValue totalStorage = configuration.storage() != null
? new ByteSizeValue(configuration.storage().getBytes() * configuration.nodes())
: null;
ByteSizeValue totalMemory = configuration.memory() != null
? new ByteSizeValue(configuration.memory().getBytes() * configuration.nodes())
: null;
if (totalStorage == null && totalMemory == null) {
return null;
} else {
return new AutoscalingCapacity(
new AutoscalingCapacity.AutoscalingResources(totalStorage, totalMemory),
new AutoscalingCapacity.AutoscalingResources(configuration.storage(), configuration.memory())
);
}
}
public void testContext() {
String tier = randomAlphaOfLength(5);
ClusterState state = ClusterState.builder(ClusterName.DEFAULT).build();
ClusterInfo info = ClusterInfo.EMPTY;
AutoscalingDecisionService.DecisionAutoscalingDeciderContext context =
new AutoscalingDecisionService.DecisionAutoscalingDeciderContext(tier, state, info);
assertSame(state, context.state());
// there is no nodes in any tier.
assertThat(context.currentCapacity(), equalTo(AutoscalingCapacity.ZERO));
tier = "data";
state = ClusterState.builder(ClusterName.DEFAULT)
.nodes(DiscoveryNodes.builder().add(new DiscoveryNode("nodeId", buildNewFakeTransportAddress(), Version.CURRENT)))
.build();
context = new AutoscalingDecisionService.DecisionAutoscalingDeciderContext(tier, state, info);
assertNull(context.currentCapacity());
ImmutableOpenMap.Builder<String, DiskUsage> leastUsages = ImmutableOpenMap.<String, DiskUsage>builder();
ImmutableOpenMap.Builder<String, DiskUsage> mostUsages = ImmutableOpenMap.<String, DiskUsage>builder();
DiscoveryNodes.Builder nodes = DiscoveryNodes.builder();
long sumTotal = 0;
long maxTotal = 0;
for (int i = 0; i < randomIntBetween(1, 5); ++i) {
String nodeId = "nodeId" + i;
nodes.add(new DiscoveryNode(nodeId, buildNewFakeTransportAddress(), Version.CURRENT));
long total = randomLongBetween(1, 1L << 40);
long total1 = randomBoolean() ? total : randomLongBetween(0, total);
long total2 = total1 != total ? total : randomLongBetween(0, total);
leastUsages.fPut(nodeId, new DiskUsage(nodeId, null, null, total1, randomLongBetween(0, total)));
mostUsages.fPut(nodeId, new DiskUsage(nodeId, null, null, total2, randomLongBetween(0, total)));
sumTotal += total;
maxTotal = Math.max(total, maxTotal);
}
state = ClusterState.builder(ClusterName.DEFAULT).nodes(nodes).build();
info = new ClusterInfo(leastUsages.build(), mostUsages.build(), null, null, null);
context = new AutoscalingDecisionService.DecisionAutoscalingDeciderContext(tier, state, info);
AutoscalingCapacity capacity = context.currentCapacity();
assertThat(capacity.node().storage(), equalTo(new ByteSizeValue(maxTotal)));
assertThat(capacity.tier().storage(), equalTo(new ByteSizeValue(sumTotal)));
// todo: fix these once we know memory of all node on master.
assertThat(capacity.node().memory(), equalTo(ByteSizeValue.ZERO));
assertThat(capacity.tier().memory(), equalTo(ByteSizeValue.ZERO));
}
}

View File

@ -1,32 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.autoscaling.decision;
import org.elasticsearch.common.io.stream.BytesStreamOutput;
import org.elasticsearch.xpack.autoscaling.AutoscalingTestCase;
import java.io.IOException;
import static org.hamcrest.Matchers.equalTo;
public class AutoscalingDecisionTests extends AutoscalingTestCase {
public void testAutoscalingDecisionType() {
final AutoscalingDecisionType type = randomFrom(AutoscalingDecisionType.values());
final AutoscalingDecision decision = randomAutoscalingDecisionOfType(type);
assertThat(decision.type(), equalTo(type));
}
public void testAutoscalingDecisionTypeSerialization() throws IOException {
final AutoscalingDecisionType before = randomFrom(AutoscalingDecisionType.values());
final BytesStreamOutput out = new BytesStreamOutput();
before.writeTo(out);
final AutoscalingDecisionType after = AutoscalingDecisionType.readFrom(out.bytes().streamInput());
assertThat(after, equalTo(before));
}
}

View File

@ -1,54 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.autoscaling.decision;
import org.elasticsearch.common.io.stream.BytesStreamOutput;
import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.test.AbstractWireSerializingTestCase;
import org.elasticsearch.test.ESTestCase;
import java.io.IOException;
import java.util.Arrays;
import java.util.Set;
import java.util.stream.Collectors;
import static org.hamcrest.Matchers.equalTo;
public class AutoscalingDecisionTypeWireSerializingTests extends AbstractWireSerializingTestCase<AutoscalingDecisionType> {
@Override
protected Writeable.Reader<AutoscalingDecisionType> instanceReader() {
return AutoscalingDecisionType::readFrom;
}
@Override
protected AutoscalingDecisionType createTestInstance() {
return randomFrom(AutoscalingDecisionType.values());
}
@Override
protected void assertEqualInstances(final AutoscalingDecisionType expectedInstance, final AutoscalingDecisionType newInstance) {
assertSame(expectedInstance, newInstance);
assertEquals(expectedInstance, newInstance);
assertEquals(expectedInstance.hashCode(), newInstance.hashCode());
}
public void testInvalidAutoscalingDecisionTypeSerialization() throws IOException {
final BytesStreamOutput out = new BytesStreamOutput();
final Set<Byte> values = Arrays.stream(AutoscalingDecisionType.values())
.map(AutoscalingDecisionType::id)
.collect(Collectors.toSet());
final byte value = randomValueOtherThanMany(values::contains, ESTestCase::randomByte);
out.writeByte(value);
final IllegalArgumentException e = expectThrows(
IllegalArgumentException.class,
() -> AutoscalingDecisionType.readFrom(out.bytes().streamInput())
);
assertThat(e.getMessage(), equalTo("unexpected value [" + value + "] for autoscaling decision type"));
}
}

View File

@ -6,12 +6,18 @@
package org.elasticsearch.xpack.autoscaling.decision;
import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.test.AbstractWireSerializingTestCase;
import org.elasticsearch.xpack.autoscaling.AutoscalingTestCase;
public class AutoscalingDecisionWireSerializingTests extends AbstractWireSerializingTestCase<AutoscalingDecision> {
@Override
protected NamedWriteableRegistry getNamedWriteableRegistry() {
return AutoscalingTestCase.getAutoscalingNamedWriteableRegistry();
}
@Override
protected Writeable.Reader<AutoscalingDecision> instanceReader() {
return AutoscalingDecision::new;

View File

@ -6,31 +6,104 @@
package org.elasticsearch.xpack.autoscaling.decision;
import org.elasticsearch.common.collect.List;
import org.elasticsearch.common.Randomness;
import org.elasticsearch.xpack.autoscaling.AutoscalingTestCase;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.SortedMap;
import java.util.TreeMap;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.function.Function;
import java.util.stream.Collectors;
import java.util.stream.IntStream;
import static org.hamcrest.Matchers.equalTo;
public class AutoscalingDecisionsTests extends AutoscalingTestCase {
public void testAutoscalingDecisionsRejectsEmptyDecisions() {
final IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> new AutoscalingDecisions(List.of()));
final IllegalArgumentException e = expectThrows(
IllegalArgumentException.class,
() -> new AutoscalingDecisions(
randomAlphaOfLength(10),
new AutoscalingCapacity(randomAutoscalingResources(), randomAutoscalingResources()),
new TreeMap<>()
)
);
assertThat(e.getMessage(), equalTo("decisions can not be empty"));
}
public void testAutoscalingDecisionsTypeDown() {
final AutoscalingDecisions decisions = randomAutoscalingDecisions(randomIntBetween(1, 8), 0, 0);
assertThat(decisions.type(), equalTo(AutoscalingDecisionType.SCALE_DOWN));
public void testRequiredCapacity() {
AutoscalingCapacity single = randomBoolean() ? randomAutoscalingCapacity() : null;
verifyRequiredCapacity(single, single);
// any undecided decider nulls out any decision making
verifyRequiredCapacity(null, single, null);
verifyRequiredCapacity(null, null, single);
boolean node = randomBoolean();
boolean storage = randomBoolean();
boolean memory = randomBoolean() || storage == false;
AutoscalingCapacity large = randomCapacity(node, storage, memory, 1000, 2000);
List<AutoscalingCapacity> autoscalingCapacities = new ArrayList<>();
autoscalingCapacities.add(large);
IntStream.range(0, 10).mapToObj(i -> randomCapacity(node, storage, memory, 0, 1000)).forEach(autoscalingCapacities::add);
Randomness.shuffle(autoscalingCapacities);
verifyRequiredCapacity(large, autoscalingCapacities.toArray(new AutoscalingCapacity[0]));
AutoscalingCapacity largerStorage = randomCapacity(node, true, false, 2000, 3000);
verifySingleMetricLarger(node, largerStorage, large, autoscalingCapacities, largerStorage);
AutoscalingCapacity largerMemory = randomCapacity(node, false, true, 2000, 3000);
verifySingleMetricLarger(node, large, largerMemory, autoscalingCapacities, largerMemory);
}
public void testAutoscalingDecisionsTypeNo() {
final AutoscalingDecisions decision = randomAutoscalingDecisions(randomIntBetween(0, 8), randomIntBetween(1, 8), 0);
assertThat(decision.type(), equalTo(AutoscalingDecisionType.NO_SCALE));
private void verifySingleMetricLarger(
boolean node,
AutoscalingCapacity expectedStorage,
AutoscalingCapacity expectedMemory,
List<AutoscalingCapacity> other,
AutoscalingCapacity larger
) {
List<AutoscalingCapacity> autoscalingCapacities = new ArrayList<>(other);
autoscalingCapacities.add(larger);
Randomness.shuffle(autoscalingCapacities);
AutoscalingCapacity.Builder expectedBuilder = AutoscalingCapacity.builder()
.tier(expectedStorage.tier().storage(), expectedMemory.tier().memory());
if (node) {
expectedBuilder.node(expectedStorage.node().storage(), expectedMemory.node().memory());
}
verifyRequiredCapacity(expectedBuilder.build(), autoscalingCapacities.toArray(new AutoscalingCapacity[0]));
}
public void testAutoscalingDecisionsTypeUp() {
final AutoscalingDecisions decision = randomAutoscalingDecisions(0, 0, randomIntBetween(1, 8));
assertThat(decision.type(), equalTo(AutoscalingDecisionType.SCALE_UP));
private void verifyRequiredCapacity(AutoscalingCapacity expected, AutoscalingCapacity... capacities) {
AtomicInteger uniqueGenerator = new AtomicInteger();
SortedMap<String, AutoscalingDecision> decisions = Arrays.stream(capacities)
.map(AutoscalingDecisionsTests::randomAutoscalingDecisionWithCapacity)
.collect(
Collectors.toMap(
k -> randomAlphaOfLength(10) + "-" + uniqueGenerator.incrementAndGet(),
Function.identity(),
(a, b) -> { throw new UnsupportedOperationException(); },
TreeMap::new
)
);
assertThat(
new AutoscalingDecisions(randomAlphaOfLength(10), randomAutoscalingCapacity(), decisions).requiredCapacity(),
equalTo(expected)
);
}
private AutoscalingCapacity randomCapacity(boolean node, boolean storage, boolean memory, int lower, int upper) {
AutoscalingCapacity.Builder builder = AutoscalingCapacity.builder();
builder.tier(storage ? randomLongBetween(lower, upper) : null, memory ? randomLongBetween(lower, upper) : null);
if (node) {
builder.node(storage ? randomLongBetween(lower, upper) : null, memory ? randomLongBetween(lower, upper) : null);
}
return builder.build();
}
}

View File

@ -6,12 +6,18 @@
package org.elasticsearch.xpack.autoscaling.decision;
import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.test.AbstractWireSerializingTestCase;
import org.elasticsearch.xpack.autoscaling.AutoscalingTestCase;
public class AutoscalingDecisionsWireSerializingTests extends AbstractWireSerializingTestCase<AutoscalingDecisions> {
@Override
protected NamedWriteableRegistry getNamedWriteableRegistry() {
return AutoscalingTestCase.getAutoscalingNamedWriteableRegistry();
}
@Override
protected Writeable.Reader<AutoscalingDecisions> instanceReader() {
return AutoscalingDecisions::new;

View File

@ -0,0 +1,53 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.autoscaling.decision;
import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.test.AbstractSerializingTestCase;
import org.elasticsearch.xpack.autoscaling.AutoscalingTestCase;
import java.io.IOException;
public class FixedAutoscalingDeciderConfigurationSerializationTests extends AbstractSerializingTestCase<
FixedAutoscalingDeciderConfiguration> {
@Override
protected FixedAutoscalingDeciderConfiguration doParseInstance(XContentParser parser) throws IOException {
return FixedAutoscalingDeciderConfiguration.parse(parser);
}
@Override
protected Writeable.Reader<FixedAutoscalingDeciderConfiguration> instanceReader() {
return FixedAutoscalingDeciderConfiguration::new;
}
@Override
protected FixedAutoscalingDeciderConfiguration createTestInstance() {
return AutoscalingTestCase.randomFixedDecider();
}
@Override
protected FixedAutoscalingDeciderConfiguration mutateInstance(FixedAutoscalingDeciderConfiguration instance) throws IOException {
int parameter = randomInt(2);
ByteSizeValue storage = instance.storage();
ByteSizeValue memory = instance.memory();
Integer nodes = instance.nodes();
switch (parameter) {
case 0:
storage = randomValueOtherThan(storage, AutoscalingTestCase::randomNullableByteSizeValue);
break;
case 1:
memory = randomValueOtherThan(memory, AutoscalingTestCase::randomNullableByteSizeValue);
break;
default:
nodes = randomValueOtherThan(nodes, () -> randomBoolean() ? randomIntBetween(1, 1000) : null);
break;
}
return new FixedAutoscalingDeciderConfiguration(storage, memory, nodes);
}
}

View File

@ -0,0 +1,45 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.autoscaling.decision;
import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.xpack.autoscaling.AutoscalingTestCase;
import org.hamcrest.Matchers;
public class FixedAutoscalingDeciderServiceTests extends AutoscalingTestCase {
public void testScale() {
FixedAutoscalingDeciderConfiguration configuration = new FixedAutoscalingDeciderConfiguration(
null,
null,
randomFrom(randomIntBetween(1, 1000), null)
);
verify(configuration, null);
ByteSizeValue storage = randomNullableByteSizeValue();
ByteSizeValue memory = storage != null ? randomNullableByteSizeValue() : randomByteSizeValue();
verify(
new FixedAutoscalingDeciderConfiguration(storage, memory, null),
AutoscalingCapacity.builder().node(storage, memory).tier(storage, memory).build()
);
int nodes = randomIntBetween(1, 1000);
verify(
new FixedAutoscalingDeciderConfiguration(storage, memory, nodes),
AutoscalingCapacity.builder().node(storage, memory).tier(multiply(storage, nodes), multiply(memory, nodes)).build()
);
}
private void verify(FixedAutoscalingDeciderConfiguration configuration, AutoscalingCapacity expected) {
FixedAutoscalingDeciderService service = new FixedAutoscalingDeciderService();
AutoscalingDecision decision = service.scale(configuration, null);
assertThat(decision.requiredCapacity(), Matchers.equalTo(expected));
}
private ByteSizeValue multiply(ByteSizeValue bytes, int nodes) {
return bytes == null ? null : new ByteSizeValue(bytes.getBytes() * nodes);
}
}