Changed expectedOperationsPerItem from AtomicInteger array to AtomicReferenceArray<AtomicInteger>.

Made node stopping / starting less aggressive in RecoveryPercolatorTests.
This commit is contained in:
Martijn van Groningen 2013-08-30 16:10:53 +02:00
parent c9c850167f
commit be00437c65
2 changed files with 37 additions and 31 deletions

View File

@ -137,15 +137,13 @@ public class TransportMultiPercolateAction extends TransportAction<MultiPercolat
private void multiPercolate(MultiPercolateRequest multiPercolateRequest, final AtomicReferenceArray<Object> percolateRequests,
final ActionListener<MultiPercolateResponse> listener, ClusterState clusterState) {
final AtomicInteger[] expectedOperationsPerItem = new AtomicInteger[percolateRequests.length()];
final AtomicReferenceArray<AtomicInteger> expectedOperationsPerItem = new AtomicReferenceArray<AtomicInteger>(percolateRequests.length());
final AtomicReferenceArray<AtomicReferenceArray> responsesByItemAndShard = new AtomicReferenceArray<AtomicReferenceArray>(multiPercolateRequest.requests().size());
final AtomicArray<Object> reducedResponses = new AtomicArray<Object>(percolateRequests.length());
// Keep track what slots belong to what shard, in case a request to a shard fails on all copies
final ConcurrentMap<ShardId, AtomicIntegerArray> shardToSlots = ConcurrentCollections.newConcurrentMap();
// Resolving concrete indices and routing and grouping the requests by shard
Map<ShardId, TransportShardMultiPercolateAction.Request> requestsByShard = new HashMap<ShardId, TransportShardMultiPercolateAction.Request>();
// Keep track what slots belong to what shard, in case a request to a shard fails on all copies
Map<ShardId, TIntArrayList> shardToSlotsBuilder = new HashMap<ShardId, TIntArrayList>();
int expectedResults = 0;
for (int i = 0; i < percolateRequests.length(); i++) {
@ -161,7 +159,7 @@ public class TransportMultiPercolateAction extends TransportAction<MultiPercolat
);
responsesByItemAndShard.set(i, new AtomicReferenceArray(shards.size()));
expectedOperationsPerItem[i] = new AtomicInteger(shards.size());
expectedOperationsPerItem.set(i, new AtomicInteger(shards.size()));
for (ShardIterator shard : shards) {
ShardId shardId = shard.shardId();
TransportShardMultiPercolateAction.Request requests = requestsByShard.get(shardId);
@ -180,7 +178,7 @@ public class TransportMultiPercolateAction extends TransportAction<MultiPercolat
} else if (element instanceof Throwable) {
reducedResponses.set(i, element);
responsesByItemAndShard.set(i, new AtomicReferenceArray(0));
expectedOperationsPerItem[i] = new AtomicInteger(0);
expectedOperationsPerItem.set(i, new AtomicInteger(0));
}
}
@ -189,6 +187,8 @@ public class TransportMultiPercolateAction extends TransportAction<MultiPercolat
return;
}
// Move slot to shard tracking from normal map to concurrent save map
final ConcurrentMap<ShardId, AtomicIntegerArray> shardToSlots = ConcurrentCollections.newConcurrentMap();
for (Map.Entry<ShardId, TIntArrayList> entry : shardToSlotsBuilder.entrySet()) {
shardToSlots.put(entry.getKey(), new AtomicIntegerArray(entry.getValue().toArray()));
}
@ -215,8 +215,8 @@ public class TransportMultiPercolateAction extends TransportAction<MultiPercolat
shardResults.set(shardId.id(), item.response());
}
assert expectedOperationsPerItem[item.slot()].get() >= 1 : "slot[" + item.slot() + "] can't be lower than one";
if (expectedOperationsPerItem[item.slot()].decrementAndGet() == 0) {
assert expectedOperationsPerItem.get(item.slot()).get() >= 1 : "slot[" + item.slot() + "] can't be lower than one";
if (expectedOperationsPerItem.get(item.slot()).decrementAndGet() == 0) {
// Failure won't bubble up, since we fail the whole request now via the catch clause below,
// so expectedOperationsPerItem will not be decremented twice.
reduce(item.slot(), percolateRequests, expectedOperations, reducedResponses, listener, responsesByItemAndShard);
@ -242,8 +242,8 @@ public class TransportMultiPercolateAction extends TransportAction<MultiPercolat
}
shardResults.set(shardId.id(), new BroadcastShardOperationFailedException(shardId, e));
assert expectedOperationsPerItem[slot].get() >= 1 : "slot[" + slot + "] can't be lower than one. Caused by: " + e.getMessage();
if (expectedOperationsPerItem[slot].decrementAndGet() == 0) {
assert expectedOperationsPerItem.get(slot).get() >= 1 : "slot[" + slot + "] can't be lower than one. Caused by: " + e.getMessage();
if (expectedOperationsPerItem.get(slot).decrementAndGet() == 0) {
reduce(slot, percolateRequests, expectedOperations, reducedResponses, listener, responsesByItemAndShard);
}
}

View File

@ -31,6 +31,7 @@ import org.elasticsearch.client.Client;
import org.elasticsearch.client.Requests;
import org.elasticsearch.common.Priority;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.env.NodeEnvironment;
import org.elasticsearch.gateway.Gateway;
@ -100,8 +101,8 @@ public class RecoveryPercolatorTests extends AbstractNodesTests {
PercolateResponse percolate = client.preparePercolate()
.setIndices("test").setDocumentType("type1")
.setSource(jsonBuilder().startObject().startObject("doc")
.field("field1", "value1")
.endObject().endObject())
.field("field1", "value1")
.endObject().endObject())
.execute().actionGet();
assertThat(percolate.getMatches(), arrayWithSize(1));
@ -120,8 +121,8 @@ public class RecoveryPercolatorTests extends AbstractNodesTests {
percolate = client.preparePercolate()
.setIndices("test").setDocumentType("type1")
.setSource(jsonBuilder().startObject().startObject("doc")
.field("field1", "value1")
.endObject().endObject())
.field("field1", "value1")
.endObject().endObject())
.execute().actionGet();
assertThat(percolate.getMatches(), arrayWithSize(1));
}
@ -153,8 +154,8 @@ public class RecoveryPercolatorTests extends AbstractNodesTests {
PercolateResponse percolate = client.preparePercolate()
.setIndices("test").setDocumentType("type1")
.setSource(jsonBuilder().startObject().startObject("doc")
.field("field1", "value1")
.endObject().endObject())
.field("field1", "value1")
.endObject().endObject())
.execute().actionGet();
assertThat(percolate.getMatches(), arrayWithSize(1));
@ -184,8 +185,8 @@ public class RecoveryPercolatorTests extends AbstractNodesTests {
percolate = client.preparePercolate()
.setIndices("test").setDocumentType("type1")
.setSource(jsonBuilder().startObject().startObject("doc")
.field("field1", "value1")
.endObject().endObject())
.field("field1", "value1")
.endObject().endObject())
.execute().actionGet();
assertThat(percolate.getMatches(), emptyArray());
@ -203,8 +204,8 @@ public class RecoveryPercolatorTests extends AbstractNodesTests {
percolate = client.preparePercolate()
.setIndices("test").setDocumentType("type1")
.setSource(jsonBuilder().startObject().startObject("doc")
.field("field1", "value1")
.endObject().endObject())
.field("field1", "value1")
.endObject().endObject())
.execute().actionGet();
assertThat(percolate.getMatches(), arrayWithSize(1));
}
@ -370,7 +371,7 @@ public class RecoveryPercolatorTests extends AbstractNodesTests {
for (MultiPercolateResponse.Item item : response) {
assertThat(item.isFailure(), equalTo(false));
assertNoFailures(item.getResponse());
assertThat(item.getResponse().getSuccessfulShards(), equalTo(2));
assertThat(item.getResponse().getSuccessfulShards(), equalTo(item.getResponse().getTotalShards()));
assertThat(item.getResponse().getCount(), equalTo((long) numQueries));
assertThat(item.getResponse().getMatches().length, equalTo(numQueries));
}
@ -390,7 +391,7 @@ public class RecoveryPercolatorTests extends AbstractNodesTests {
.execute().actionGet();
}
assertNoFailures(response);
assertThat(response.getSuccessfulShards(), equalTo(2));
assertThat(response.getSuccessfulShards(), equalTo(response.getTotalShards()));
assertThat(response.getCount(), equalTo((long) numQueries));
assertThat(response.getMatches().length, equalTo(numQueries));
}
@ -407,33 +408,38 @@ public class RecoveryPercolatorTests extends AbstractNodesTests {
new Thread(r).start();
try {
// 1 index, 2 primaries, 2 replicas per primary
for (int i = 0; i < 4; i++) {
closeNode("node3");
client.admin().cluster().prepareHealth()
client.admin().cluster().prepareHealth("test")
.setWaitForEvents(Priority.LANGUID)
.setTimeout(TimeValue.timeValueMinutes(2))
.setWaitForYellowStatus()
.setWaitForNodes("2")
.setWaitForActiveShards(4) // 2 nodes, so 4 shards (2 primaries, 2 replicas)
.execute().actionGet();
assertThat(error.get(), nullValue());
closeNode("node2");
client.admin().cluster().prepareHealth()
client.admin().cluster().prepareHealth("test")
.setWaitForEvents(Priority.LANGUID)
.setTimeout(TimeValue.timeValueMinutes(2))
.setWaitForYellowStatus()
.setWaitForNodes("1")
.setWaitForActiveShards(2) // 1 node, so 2 shards (2 primaries, 0 replicas)
.execute().actionGet();
assertThat(error.get(), nullValue());
startNode("node3");
client.admin().cluster().prepareHealth()
client.admin().cluster().prepareHealth("test")
.setWaitForEvents(Priority.LANGUID)
.setTimeout(TimeValue.timeValueMinutes(2))
.setWaitForYellowStatus()
.setWaitForNodes("2")
.setWaitForActiveShards(4) // 2 nodes, so 4 shards (2 primaries, 2 replicas)
.execute().actionGet();
assertThat(error.get(), nullValue());
startNode("node2");
client.admin().cluster().prepareHealth()
client.admin().cluster().prepareHealth("test")
.setWaitForEvents(Priority.LANGUID)
.setWaitForYellowStatus()
.setWaitForNodes("3")
.setTimeout(TimeValue.timeValueMinutes(2))
.setWaitForGreenStatus() // We're confirm the shard settings, so green instead of yellow
.setWaitForActiveShards(6) // 3 nodes, so 6 shards (2 primaries, 4 replicas)
.execute().actionGet();
assertThat(error.get(), nullValue());
}