diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/loadingservice/ModelLoadingService.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/loadingservice/ModelLoadingService.java index cee7fbb1807..eef0e5ae15d 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/loadingservice/ModelLoadingService.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/loadingservice/ModelLoadingService.java @@ -20,7 +20,6 @@ import org.elasticsearch.common.cache.RemovalNotification; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.set.Sets; @@ -57,8 +56,8 @@ public class ModelLoadingService implements ClusterStateListener { * Once the limit is reached, LRU models are evicted in favor of new models */ public static final Setting INFERENCE_MODEL_CACHE_SIZE = - Setting.byteSizeSetting("xpack.ml.inference_model.cache_size", - new ByteSizeValue(1, ByteSizeUnit.GB), + Setting.memorySizeSetting("xpack.ml.inference_model.cache_size", + "40%", Setting.Property.NodeScope); /**