[Infra] remove indicesLifecycle.Listener from IndexingMemoryController
The IndexingMemoryController determines the amount of indexing buffer size and translog buffer size each shard should have. It takes memory from inactive shards (indexing wise) and assigns it to other shards. To do so it needs to know about the addition and closing of shards. The current implementation hooks into the indicesService.indicesLifecycle() mechanism to receive call backs, such shard entered the POST_RECOVERY state. Those call backs are typically run on the thread that actually made the change. A mutex was used to synchronize those callbacks with IndexingMemoryController's background thread, which updates the internal engines memory usage on a regular interval. This introduced a dependency between those threads and the locks of the internal engines hosted on the node. In a *very* rare situation (two tests runs locally) this can cause recovery time outs where two nodes are recovering replicas from each other. This commit introduces a a lock free approach that updates the internal data structures during iterations in the background thread. Closes #6892
This commit is contained in:
parent
9714dd55c2
commit
38d8e3ccc2
|
@ -20,7 +20,6 @@
|
||||||
package org.elasticsearch.indices.memory;
|
package org.elasticsearch.indices.memory;
|
||||||
|
|
||||||
import com.google.common.collect.Lists;
|
import com.google.common.collect.Lists;
|
||||||
import com.google.common.collect.Maps;
|
|
||||||
import org.elasticsearch.ElasticsearchException;
|
import org.elasticsearch.ElasticsearchException;
|
||||||
import org.elasticsearch.common.component.AbstractLifecycleComponent;
|
import org.elasticsearch.common.component.AbstractLifecycleComponent;
|
||||||
import org.elasticsearch.common.inject.Inject;
|
import org.elasticsearch.common.inject.Inject;
|
||||||
|
@ -37,16 +36,12 @@ import org.elasticsearch.index.shard.ShardId;
|
||||||
import org.elasticsearch.index.shard.service.IndexShard;
|
import org.elasticsearch.index.shard.service.IndexShard;
|
||||||
import org.elasticsearch.index.shard.service.InternalIndexShard;
|
import org.elasticsearch.index.shard.service.InternalIndexShard;
|
||||||
import org.elasticsearch.index.translog.Translog;
|
import org.elasticsearch.index.translog.Translog;
|
||||||
import org.elasticsearch.indices.IndicesLifecycle;
|
|
||||||
import org.elasticsearch.indices.IndicesService;
|
import org.elasticsearch.indices.IndicesService;
|
||||||
import org.elasticsearch.monitor.jvm.JvmInfo;
|
import org.elasticsearch.monitor.jvm.JvmInfo;
|
||||||
import org.elasticsearch.threadpool.ThreadPool;
|
import org.elasticsearch.threadpool.ThreadPool;
|
||||||
|
|
||||||
import java.util.EnumSet;
|
import java.util.*;
|
||||||
import java.util.List;
|
|
||||||
import java.util.Map;
|
|
||||||
import java.util.concurrent.ScheduledFuture;
|
import java.util.concurrent.ScheduledFuture;
|
||||||
import java.util.concurrent.atomic.AtomicBoolean;
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
*
|
*
|
||||||
|
@ -66,16 +61,9 @@ public class IndexingMemoryController extends AbstractLifecycleComponent<Indexin
|
||||||
|
|
||||||
private final TimeValue inactiveTime;
|
private final TimeValue inactiveTime;
|
||||||
private final TimeValue interval;
|
private final TimeValue interval;
|
||||||
private final AtomicBoolean shardsRecoveredOrDeleted = new AtomicBoolean();
|
|
||||||
|
|
||||||
private final Listener listener = new Listener();
|
|
||||||
|
|
||||||
private final Map<ShardId, ShardIndexingStatus> shardsIndicesStatus = Maps.newHashMap();
|
|
||||||
|
|
||||||
private volatile ScheduledFuture scheduler;
|
private volatile ScheduledFuture scheduler;
|
||||||
|
|
||||||
private final Object mutex = new Object();
|
|
||||||
|
|
||||||
private static final EnumSet<IndexShardState> CAN_UPDATE_INDEX_BUFFER_STATES = EnumSet.of(IndexShardState.POST_RECOVERY, IndexShardState.STARTED, IndexShardState.RELOCATED);
|
private static final EnumSet<IndexShardState> CAN_UPDATE_INDEX_BUFFER_STATES = EnumSet.of(IndexShardState.POST_RECOVERY, IndexShardState.STARTED, IndexShardState.RELOCATED);
|
||||||
|
|
||||||
@Inject
|
@Inject
|
||||||
|
@ -137,14 +125,12 @@ public class IndexingMemoryController extends AbstractLifecycleComponent<Indexin
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected void doStart() throws ElasticsearchException {
|
protected void doStart() throws ElasticsearchException {
|
||||||
indicesService.indicesLifecycle().addListener(listener);
|
|
||||||
// its fine to run it on the scheduler thread, no busy work
|
// its fine to run it on the scheduler thread, no busy work
|
||||||
this.scheduler = threadPool.scheduleWithFixedDelay(new ShardsIndicesStatusChecker(), interval);
|
this.scheduler = threadPool.scheduleWithFixedDelay(new ShardsIndicesStatusChecker(), interval);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected void doStop() throws ElasticsearchException {
|
protected void doStop() throws ElasticsearchException {
|
||||||
indicesService.indicesLifecycle().removeListener(listener);
|
|
||||||
if (scheduler != null) {
|
if (scheduler != null) {
|
||||||
scheduler.cancel(false);
|
scheduler.cancel(false);
|
||||||
scheduler = null;
|
scheduler = null;
|
||||||
|
@ -164,49 +150,18 @@ public class IndexingMemoryController extends AbstractLifecycleComponent<Indexin
|
||||||
}
|
}
|
||||||
|
|
||||||
class ShardsIndicesStatusChecker implements Runnable {
|
class ShardsIndicesStatusChecker implements Runnable {
|
||||||
|
|
||||||
|
private final Map<ShardId, ShardIndexingStatus> shardsIndicesStatus = new HashMap<>();
|
||||||
|
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void run() {
|
public void run() {
|
||||||
synchronized (mutex) {
|
EnumSet<ShardStatusChangeType> changes = EnumSet.noneOf(ShardStatusChangeType.class);
|
||||||
boolean activeInactiveStatusChanges = false;
|
|
||||||
List<IndexShard> activeToInactiveIndexingShards = Lists.newArrayList();
|
changes.addAll(purgeDeletedAndClosedShards());
|
||||||
List<IndexShard> inactiveToActiveIndexingShards = Lists.newArrayList();
|
|
||||||
for (IndexService indexService : indicesService) {
|
final List<IndexShard> activeToInactiveIndexingShards = Lists.newArrayList();
|
||||||
for (IndexShard indexShard : indexService) {
|
final int activeShards = updateShardStatuses(changes, activeToInactiveIndexingShards);
|
||||||
long time = threadPool.estimatedTimeInMillis();
|
|
||||||
Translog translog = ((InternalIndexShard) indexShard).translog();
|
|
||||||
ShardIndexingStatus status = shardsIndicesStatus.get(indexShard.shardId());
|
|
||||||
if (status == null) { // not added yet
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
// check if it is deemed to be inactive (sam translogId and numberOfOperations over a long period of time)
|
|
||||||
if (status.translogId == translog.currentId() && translog.estimatedNumberOfOperations() == 0) {
|
|
||||||
if (status.time == -1) { // first time
|
|
||||||
status.time = time;
|
|
||||||
}
|
|
||||||
// inactive?
|
|
||||||
if (!status.inactiveIndexing) {
|
|
||||||
// mark it as inactive only if enough time has passed and there are no ongoing merges going on...
|
|
||||||
if ((time - status.time) > inactiveTime.millis() && indexShard.mergeStats().getCurrent() == 0) {
|
|
||||||
// inactive for this amount of time, mark it
|
|
||||||
activeToInactiveIndexingShards.add(indexShard);
|
|
||||||
status.inactiveIndexing = true;
|
|
||||||
activeInactiveStatusChanges = true;
|
|
||||||
logger.debug("marking shard [{}][{}] as inactive (inactive_time[{}]) indexing wise, setting size to [{}]", indexShard.shardId().index().name(), indexShard.shardId().id(), inactiveTime, Engine.INACTIVE_SHARD_INDEXING_BUFFER);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
if (status.inactiveIndexing) {
|
|
||||||
inactiveToActiveIndexingShards.add(indexShard);
|
|
||||||
status.inactiveIndexing = false;
|
|
||||||
activeInactiveStatusChanges = true;
|
|
||||||
logger.debug("marking shard [{}][{}] as active indexing wise", indexShard.shardId().index().name(), indexShard.shardId().id());
|
|
||||||
}
|
|
||||||
status.time = -1;
|
|
||||||
}
|
|
||||||
status.translogId = translog.currentId();
|
|
||||||
status.translogNumberOfOperations = translog.estimatedNumberOfOperations();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
for (IndexShard indexShard : activeToInactiveIndexingShards) {
|
for (IndexShard indexShard : activeToInactiveIndexingShards) {
|
||||||
// update inactive indexing buffer size
|
// update inactive indexing buffer size
|
||||||
try {
|
try {
|
||||||
|
@ -218,40 +173,110 @@ public class IndexingMemoryController extends AbstractLifecycleComponent<Indexin
|
||||||
// ignore
|
// ignore
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
boolean shardsRecoveredOrDeleted = IndexingMemoryController.this.shardsRecoveredOrDeleted.compareAndSet(true, false);
|
if (!changes.isEmpty()) {
|
||||||
if (shardsRecoveredOrDeleted || activeInactiveStatusChanges) {
|
calcAndSetShardBuffers(activeShards, "[" + changes + "]");
|
||||||
calcAndSetShardBuffers("active/inactive[" + activeInactiveStatusChanges + "] recovered/deleted[" + shardsRecoveredOrDeleted + "]");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
class Listener extends IndicesLifecycle.Listener {
|
/**
|
||||||
|
* goes through all existing shards and check whether the changes their active status
|
||||||
|
*
|
||||||
|
* @return the current count of active shards
|
||||||
|
*/
|
||||||
|
private int updateShardStatuses(EnumSet<ShardStatusChangeType> changes, List<IndexShard> activeToInactiveIndexingShards) {
|
||||||
|
int activeShards = 0;
|
||||||
|
for (IndexService indexService : indicesService) {
|
||||||
|
for (IndexShard indexShard : indexService) {
|
||||||
|
|
||||||
@Override
|
if (!CAN_UPDATE_INDEX_BUFFER_STATES.contains(indexShard.state())) {
|
||||||
public void afterIndexShardPostRecovery(IndexShard indexShard) {
|
// not ready to be updated yet.
|
||||||
synchronized (mutex) {
|
continue;
|
||||||
shardsIndicesStatus.put(indexShard.shardId(), new ShardIndexingStatus());
|
|
||||||
shardsRecoveredOrDeleted.set(true);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
final long time = threadPool.estimatedTimeInMillis();
|
||||||
public void afterIndexShardClosed(ShardId shardId) {
|
|
||||||
synchronized (mutex) {
|
Translog translog = ((InternalIndexShard) indexShard).translog();
|
||||||
shardsIndicesStatus.remove(shardId);
|
ShardIndexingStatus status = shardsIndicesStatus.get(indexShard.shardId());
|
||||||
shardsRecoveredOrDeleted.set(true);
|
if (status == null) {
|
||||||
|
status = new ShardIndexingStatus();
|
||||||
|
shardsIndicesStatus.put(indexShard.shardId(), status);
|
||||||
|
changes.add(ShardStatusChangeType.ADDED);
|
||||||
|
}
|
||||||
|
// check if it is deemed to be inactive (sam translogId and numberOfOperations over a long period of time)
|
||||||
|
if (status.translogId == translog.currentId() && translog.estimatedNumberOfOperations() == 0) {
|
||||||
|
if (status.time == -1) { // first time
|
||||||
|
status.time = time;
|
||||||
|
}
|
||||||
|
// inactive?
|
||||||
|
if (status.activeIndexing) {
|
||||||
|
// mark it as inactive only if enough time has passed and there are no ongoing merges going on...
|
||||||
|
if ((time - status.time) > inactiveTime.millis() && indexShard.mergeStats().getCurrent() == 0) {
|
||||||
|
// inactive for this amount of time, mark it
|
||||||
|
activeToInactiveIndexingShards.add(indexShard);
|
||||||
|
status.activeIndexing = false;
|
||||||
|
changes.add(ShardStatusChangeType.BECAME_INACTIVE);
|
||||||
|
logger.debug("marking shard [{}][{}] as inactive (inactive_time[{}]) indexing wise, setting size to [{}]", indexShard.shardId().index().name(), indexShard.shardId().id(), inactiveTime, Engine.INACTIVE_SHARD_INDEXING_BUFFER);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
} else {
|
||||||
|
if (!status.activeIndexing) {
|
||||||
|
status.activeIndexing = true;
|
||||||
|
changes.add(ShardStatusChangeType.BECAME_ACTIVE);
|
||||||
|
logger.debug("marking shard [{}][{}] as active indexing wise", indexShard.shardId().index().name(), indexShard.shardId().id());
|
||||||
|
}
|
||||||
|
status.time = -1;
|
||||||
|
}
|
||||||
|
status.translogId = translog.currentId();
|
||||||
|
status.translogNumberOfOperations = translog.estimatedNumberOfOperations();
|
||||||
|
|
||||||
|
if (status.activeIndexing) {
|
||||||
|
activeShards++;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return activeShards;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* purge any existing statuses that are no longer updated
|
||||||
|
*
|
||||||
|
* @return true if any change
|
||||||
|
*/
|
||||||
|
private EnumSet<ShardStatusChangeType> purgeDeletedAndClosedShards() {
|
||||||
|
EnumSet<ShardStatusChangeType> changes = EnumSet.noneOf(ShardStatusChangeType.class);
|
||||||
|
|
||||||
private void calcAndSetShardBuffers(String reason) {
|
Iterator<ShardId> statusShardIdIterator = shardsIndicesStatus.keySet().iterator();
|
||||||
int shardsCount = countActiveShards();
|
while (statusShardIdIterator.hasNext()) {
|
||||||
if (shardsCount == 0) {
|
ShardId statusShardId = statusShardIdIterator.next();
|
||||||
|
IndexService indexService = indicesService.indexService(statusShardId.getIndex());
|
||||||
|
boolean remove = false;
|
||||||
|
try {
|
||||||
|
if (indexService == null) {
|
||||||
|
remove = true;
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
IndexShard indexShard = indexService.shard(statusShardId.id());
|
||||||
|
if (indexShard == null) {
|
||||||
|
remove = true;
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
remove = !CAN_UPDATE_INDEX_BUFFER_STATES.contains(indexShard.state());
|
||||||
|
|
||||||
|
} finally {
|
||||||
|
if (remove) {
|
||||||
|
changes.add(ShardStatusChangeType.DELETED);
|
||||||
|
statusShardIdIterator.remove();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return changes;
|
||||||
|
}
|
||||||
|
|
||||||
|
private void calcAndSetShardBuffers(int activeShards, String reason) {
|
||||||
|
if (activeShards == 0) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
ByteSizeValue shardIndexingBufferSize = new ByteSizeValue(indexingBuffer.bytes() / shardsCount);
|
ByteSizeValue shardIndexingBufferSize = new ByteSizeValue(indexingBuffer.bytes() / activeShards);
|
||||||
if (shardIndexingBufferSize.bytes() < minShardIndexBufferSize.bytes()) {
|
if (shardIndexingBufferSize.bytes() < minShardIndexBufferSize.bytes()) {
|
||||||
shardIndexingBufferSize = minShardIndexBufferSize;
|
shardIndexingBufferSize = minShardIndexBufferSize;
|
||||||
}
|
}
|
||||||
|
@ -259,7 +284,7 @@ public class IndexingMemoryController extends AbstractLifecycleComponent<Indexin
|
||||||
shardIndexingBufferSize = maxShardIndexBufferSize;
|
shardIndexingBufferSize = maxShardIndexBufferSize;
|
||||||
}
|
}
|
||||||
|
|
||||||
ByteSizeValue shardTranslogBufferSize = new ByteSizeValue(translogBuffer.bytes() / shardsCount);
|
ByteSizeValue shardTranslogBufferSize = new ByteSizeValue(translogBuffer.bytes() / activeShards);
|
||||||
if (shardTranslogBufferSize.bytes() < minShardTranslogBufferSize.bytes()) {
|
if (shardTranslogBufferSize.bytes() < minShardTranslogBufferSize.bytes()) {
|
||||||
shardTranslogBufferSize = minShardTranslogBufferSize;
|
shardTranslogBufferSize = minShardTranslogBufferSize;
|
||||||
}
|
}
|
||||||
|
@ -267,7 +292,7 @@ public class IndexingMemoryController extends AbstractLifecycleComponent<Indexin
|
||||||
shardTranslogBufferSize = maxShardTranslogBufferSize;
|
shardTranslogBufferSize = maxShardTranslogBufferSize;
|
||||||
}
|
}
|
||||||
|
|
||||||
logger.debug("recalculating shard indexing buffer (reason={}), total is [{}] with [{}] active shards, each shard set to indexing=[{}], translog=[{}]", reason, indexingBuffer, shardsCount, shardIndexingBufferSize, shardTranslogBufferSize);
|
logger.debug("recalculating shard indexing buffer (reason={}), total is [{}] with [{}] active shards, each shard set to indexing=[{}], translog=[{}]", reason, indexingBuffer, activeShards, shardIndexingBufferSize, shardTranslogBufferSize);
|
||||||
for (IndexService indexService : indicesService) {
|
for (IndexService indexService : indicesService) {
|
||||||
for (IndexShard indexShard : indexService) {
|
for (IndexShard indexShard : indexService) {
|
||||||
IndexShardState state = indexShard.state();
|
IndexShardState state = indexShard.state();
|
||||||
|
@ -276,7 +301,7 @@ public class IndexingMemoryController extends AbstractLifecycleComponent<Indexin
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
ShardIndexingStatus status = shardsIndicesStatus.get(indexShard.shardId());
|
ShardIndexingStatus status = shardsIndicesStatus.get(indexShard.shardId());
|
||||||
if (status == null || !status.inactiveIndexing) {
|
if (status == null || status.activeIndexing) {
|
||||||
try {
|
try {
|
||||||
((InternalIndexShard) indexShard).engine().updateIndexingBufferSize(shardIndexingBufferSize);
|
((InternalIndexShard) indexShard).engine().updateIndexingBufferSize(shardIndexingBufferSize);
|
||||||
((InternalIndexShard) indexShard).translog().updateBuffer(shardTranslogBufferSize);
|
((InternalIndexShard) indexShard).translog().updateBuffer(shardTranslogBufferSize);
|
||||||
|
@ -293,24 +318,17 @@ public class IndexingMemoryController extends AbstractLifecycleComponent<Indexin
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
private int countActiveShards() {
|
private static enum ShardStatusChangeType {
|
||||||
int shardsCount = 0;
|
ADDED, DELETED, BECAME_ACTIVE, BECAME_INACTIVE
|
||||||
for (IndexService indexService : indicesService) {
|
|
||||||
for (IndexShard indexShard : indexService) {
|
|
||||||
ShardIndexingStatus status = shardsIndicesStatus.get(indexShard.shardId());
|
|
||||||
if (status == null || !status.inactiveIndexing) {
|
|
||||||
shardsCount++;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return shardsCount;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
static class ShardIndexingStatus {
|
static class ShardIndexingStatus {
|
||||||
long translogId = -1;
|
long translogId = -1;
|
||||||
int translogNumberOfOperations = -1;
|
int translogNumberOfOperations = -1;
|
||||||
boolean inactiveIndexing = false;
|
boolean activeIndexing = true;
|
||||||
long time = -1; // contains the first time we saw this shard with no operations done on it
|
long time = -1; // contains the first time we saw this shard with no operations done on it
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,80 +0,0 @@
|
||||||
/*
|
|
||||||
* Licensed to Elasticsearch under one or more contributor
|
|
||||||
* license agreements. See the NOTICE file distributed with
|
|
||||||
* this work for additional information regarding copyright
|
|
||||||
* ownership. Elasticsearch licenses this file to you under
|
|
||||||
* the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
* not use this file except in compliance with the License.
|
|
||||||
* You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing,
|
|
||||||
* software distributed under the License is distributed on an
|
|
||||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
|
||||||
* KIND, either express or implied. See the License for the
|
|
||||||
* specific language governing permissions and limitations
|
|
||||||
* under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package org.elasticsearch.indices.memory;
|
|
||||||
|
|
||||||
import com.google.common.base.Predicate;
|
|
||||||
import org.elasticsearch.common.settings.ImmutableSettings;
|
|
||||||
import org.elasticsearch.index.engine.internal.InternalEngine;
|
|
||||||
import org.elasticsearch.index.shard.service.InternalIndexShard;
|
|
||||||
import org.elasticsearch.indices.IndicesService;
|
|
||||||
import org.elasticsearch.test.ElasticsearchIntegrationTest;
|
|
||||||
import org.junit.Test;
|
|
||||||
|
|
||||||
|
|
||||||
@ElasticsearchIntegrationTest.ClusterScope(scope = ElasticsearchIntegrationTest.Scope.TEST, numDataNodes = 0)
|
|
||||||
public class IndexMemoryControllerTests extends ElasticsearchIntegrationTest {
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testIndexBufferSizeUpdateAfterShardCreation() throws InterruptedException {
|
|
||||||
|
|
||||||
internalCluster().startNode(ImmutableSettings.builder()
|
|
||||||
.put("http.enabled", "false")
|
|
||||||
.put("discovery.type", "local")
|
|
||||||
.put("indices.memory.interval", "1s")
|
|
||||||
);
|
|
||||||
|
|
||||||
client().admin().indices().prepareCreate("test1")
|
|
||||||
.setSettings(ImmutableSettings.builder()
|
|
||||||
.put("number_of_shards", 1)
|
|
||||||
.put("number_of_replicas", 0)
|
|
||||||
).get();
|
|
||||||
|
|
||||||
ensureGreen();
|
|
||||||
|
|
||||||
final InternalIndexShard shard1 = (InternalIndexShard) internalCluster().getInstance(IndicesService.class).indexService("test1").shard(0);
|
|
||||||
|
|
||||||
client().admin().indices().prepareCreate("test2")
|
|
||||||
.setSettings(ImmutableSettings.builder()
|
|
||||||
.put("number_of_shards", 1)
|
|
||||||
.put("number_of_replicas", 0)
|
|
||||||
).get();
|
|
||||||
|
|
||||||
ensureGreen();
|
|
||||||
|
|
||||||
final InternalIndexShard shard2 = (InternalIndexShard) internalCluster().getInstance(IndicesService.class).indexService("test2").shard(0);
|
|
||||||
final long expectedShardSize = internalCluster().getInstance(IndexingMemoryController.class).indexingBufferSize().bytes() / 2;
|
|
||||||
|
|
||||||
boolean success = awaitBusy(new Predicate<Object>() {
|
|
||||||
@Override
|
|
||||||
public boolean apply(Object input) {
|
|
||||||
return ((InternalEngine) shard1.engine()).indexingBufferSize().bytes() <= expectedShardSize &&
|
|
||||||
((InternalEngine) shard2.engine()).indexingBufferSize().bytes() <= expectedShardSize;
|
|
||||||
}
|
|
||||||
});
|
|
||||||
|
|
||||||
if (!success) {
|
|
||||||
fail("failed to update shard indexing buffer size. expected [" + expectedShardSize + "] shard1 [" +
|
|
||||||
((InternalEngine) shard1.engine()).indexingBufferSize().bytes() + "] shard2 [" +
|
|
||||||
((InternalEngine) shard1.engine()).indexingBufferSize().bytes() + "]"
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -0,0 +1,155 @@
|
||||||
|
/*
|
||||||
|
* Licensed to Elasticsearch under one or more contributor
|
||||||
|
* license agreements. See the NOTICE file distributed with
|
||||||
|
* this work for additional information regarding copyright
|
||||||
|
* ownership. Elasticsearch licenses this file to you under
|
||||||
|
* the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
* not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing,
|
||||||
|
* software distributed under the License is distributed on an
|
||||||
|
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||||
|
* KIND, either express or implied. See the License for the
|
||||||
|
* specific language governing permissions and limitations
|
||||||
|
* under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package org.elasticsearch.indices.memory;
|
||||||
|
|
||||||
|
import com.google.common.base.Predicate;
|
||||||
|
import org.elasticsearch.cluster.ClusterName;
|
||||||
|
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||||
|
import org.elasticsearch.common.settings.ImmutableSettings;
|
||||||
|
import org.elasticsearch.common.settings.Settings;
|
||||||
|
import org.elasticsearch.common.util.concurrent.EsExecutors;
|
||||||
|
import org.elasticsearch.index.engine.Engine;
|
||||||
|
import org.elasticsearch.index.engine.internal.InternalEngine;
|
||||||
|
import org.elasticsearch.index.shard.service.InternalIndexShard;
|
||||||
|
import org.elasticsearch.indices.IndicesService;
|
||||||
|
import org.elasticsearch.test.ElasticsearchIntegrationTest;
|
||||||
|
import org.junit.Test;
|
||||||
|
|
||||||
|
|
||||||
|
@ElasticsearchIntegrationTest.ClusterScope(scope = ElasticsearchIntegrationTest.Scope.TEST, numDataNodes = 0)
|
||||||
|
public class IndexingMemoryControllerTests extends ElasticsearchIntegrationTest {
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testIndexBufferSizeUpdateAfterCreationRemoval() throws InterruptedException {
|
||||||
|
|
||||||
|
createNode(ImmutableSettings.EMPTY);
|
||||||
|
|
||||||
|
prepareCreate("test1").setSettings(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1, IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0).get();
|
||||||
|
|
||||||
|
ensureGreen();
|
||||||
|
|
||||||
|
final InternalIndexShard shard1 = (InternalIndexShard) internalCluster().getInstance(IndicesService.class).indexService("test1").shard(0);
|
||||||
|
|
||||||
|
prepareCreate("test2").setSettings(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1, IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0).get();
|
||||||
|
|
||||||
|
ensureGreen();
|
||||||
|
|
||||||
|
final InternalIndexShard shard2 = (InternalIndexShard) internalCluster().getInstance(IndicesService.class).indexService("test2").shard(0);
|
||||||
|
final long expected1ShardSize = internalCluster().getInstance(IndexingMemoryController.class).indexingBufferSize().bytes();
|
||||||
|
final long expected2ShardsSize = expected1ShardSize / 2;
|
||||||
|
|
||||||
|
boolean success = awaitBusy(new Predicate<Object>() {
|
||||||
|
@Override
|
||||||
|
public boolean apply(Object input) {
|
||||||
|
return ((InternalEngine) shard1.engine()).indexingBufferSize().bytes() <= expected2ShardsSize &&
|
||||||
|
((InternalEngine) shard2.engine()).indexingBufferSize().bytes() <= expected2ShardsSize;
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
if (!success) {
|
||||||
|
fail("failed to update shard indexing buffer size. expected [" + expected2ShardsSize + "] shard1 [" +
|
||||||
|
((InternalEngine) shard1.engine()).indexingBufferSize().bytes() + "] shard2 [" +
|
||||||
|
((InternalEngine) shard2.engine()).indexingBufferSize().bytes() + "]"
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
client().admin().indices().prepareDelete("test2").get();
|
||||||
|
success = awaitBusy(new Predicate<Object>() {
|
||||||
|
@Override
|
||||||
|
public boolean apply(Object input) {
|
||||||
|
return ((InternalEngine) shard1.engine()).indexingBufferSize().bytes() >= expected1ShardSize;
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
if (!success) {
|
||||||
|
fail("failed to update shard indexing buffer size after deleting shards. expected [" + expected1ShardSize + "] got [" +
|
||||||
|
((InternalEngine) shard1.engine()).indexingBufferSize().bytes() + "]"
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testIndexBufferSizeUpdateInactiveShard() throws InterruptedException {
|
||||||
|
|
||||||
|
createNode(ImmutableSettings.builder().put("indices.memory.shard_inactive_time", "100ms").build());
|
||||||
|
|
||||||
|
prepareCreate("test1").setSettings(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1, IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0).get();
|
||||||
|
|
||||||
|
ensureGreen();
|
||||||
|
|
||||||
|
final InternalIndexShard shard1 = (InternalIndexShard) internalCluster().getInstance(IndicesService.class).indexService("test1").shard(0);
|
||||||
|
boolean success = awaitBusy(new Predicate<Object>() {
|
||||||
|
@Override
|
||||||
|
public boolean apply(Object input) {
|
||||||
|
return ((InternalEngine) shard1.engine()).indexingBufferSize().bytes() == Engine.INACTIVE_SHARD_INDEXING_BUFFER.bytes();
|
||||||
|
}
|
||||||
|
});
|
||||||
|
if (!success) {
|
||||||
|
fail("failed to update shard indexing buffer size due to inactive state. expected [" + Engine.INACTIVE_SHARD_INDEXING_BUFFER + "] got [" +
|
||||||
|
((InternalEngine) shard1.engine()).indexingBufferSize().bytes() + "]"
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
index("test1", "type", "1", "f", 1);
|
||||||
|
|
||||||
|
success = awaitBusy(new Predicate<Object>() {
|
||||||
|
@Override
|
||||||
|
public boolean apply(Object input) {
|
||||||
|
return ((InternalEngine) shard1.engine()).indexingBufferSize().bytes() > Engine.INACTIVE_SHARD_INDEXING_BUFFER.bytes();
|
||||||
|
}
|
||||||
|
});
|
||||||
|
if (!success) {
|
||||||
|
fail("failed to update shard indexing buffer size due to inactive state. expected something larger then [" + Engine.INACTIVE_SHARD_INDEXING_BUFFER + "] got [" +
|
||||||
|
((InternalEngine) shard1.engine()).indexingBufferSize().bytes() + "]"
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
flush(); // clean translogs
|
||||||
|
|
||||||
|
success = awaitBusy(new Predicate<Object>() {
|
||||||
|
@Override
|
||||||
|
public boolean apply(Object input) {
|
||||||
|
return ((InternalEngine) shard1.engine()).indexingBufferSize().bytes() == Engine.INACTIVE_SHARD_INDEXING_BUFFER.bytes();
|
||||||
|
}
|
||||||
|
});
|
||||||
|
if (!success) {
|
||||||
|
fail("failed to update shard indexing buffer size due to inactive state. expected [" + Engine.INACTIVE_SHARD_INDEXING_BUFFER + "] got [" +
|
||||||
|
((InternalEngine) shard1.engine()).indexingBufferSize().bytes() + "]"
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private void createNode(Settings settings) {
|
||||||
|
internalCluster().startNode(ImmutableSettings.builder()
|
||||||
|
.put(ClusterName.SETTING, "IndexingMemoryControllerTests")
|
||||||
|
.put("node.name", "IndexingMemoryControllerTests")
|
||||||
|
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1)
|
||||||
|
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0)
|
||||||
|
.put(EsExecutors.PROCESSORS, 1) // limit the number of threads created
|
||||||
|
.put("http.enabled", false)
|
||||||
|
.put("index.store.type", "ram")
|
||||||
|
.put("config.ignore_system_properties", true) // make sure we get what we set :)
|
||||||
|
.put("gateway.type", "none")
|
||||||
|
.put("indices.memory.interval", "100ms")
|
||||||
|
.put(settings)
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
Loading…
Reference in New Issue