Simplify SnapshotResiliencyTests (#46961) (#47108)

Simplify `SnapshotResiliencyTests` to more closely
match the structure of `AbstractCoordinatorTestCase` and allow for
future drying up between the two classes:

* Make the test cluster nodes a nested-class in the test cluster itself
* Remove the needless custom network disruption implementation and
  simply track disconnected node ids like `AbstractCoordinatorTestCase`
  does
This commit is contained in:
Armin Braun 2019-09-25 14:53:11 +02:00 committed by GitHub
parent 83365e94ba
commit c4a166fc9a
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
1 changed files with 353 additions and 379 deletions

View File

@ -165,7 +165,6 @@ import org.elasticsearch.search.fetch.FetchPhase;
import org.elasticsearch.snapshots.mockstore.MockEventuallyConsistentRepository;
import org.elasticsearch.test.ESTestCase;
import org.elasticsearch.test.disruption.DisruptableMockTransport;
import org.elasticsearch.test.disruption.NetworkDisruption;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportException;
import org.elasticsearch.transport.TransportInterceptor;
@ -240,7 +239,7 @@ public class SnapshotResiliencyTests extends ESTestCase {
(BlobStoreRepository) testClusterNodes.randomMasterNodeSafe().repositoriesService.repository("repo"),
Runnable::run);
} finally {
testClusterNodes.nodes.values().forEach(TestClusterNode::stop);
testClusterNodes.nodes.values().forEach(TestClusterNodes.TestClusterNode::stop);
}
}
@ -253,7 +252,7 @@ public class SnapshotResiliencyTests extends ESTestCase {
final int shards = randomIntBetween(1, 10);
final int documents = randomIntBetween(0, 100);
final TestClusterNode masterNode =
final TestClusterNodes.TestClusterNode masterNode =
testClusterNodes.currentMaster(testClusterNodes.nodes.values().iterator().next().clusterService.state());
final StepListener<CreateSnapshotResponse> createSnapshotResponseListener = new StepListener<>();
@ -326,7 +325,7 @@ public class SnapshotResiliencyTests extends ESTestCase {
final String index = "test";
final int shards = randomIntBetween(1, 10);
TestClusterNode masterNode =
TestClusterNodes.TestClusterNode masterNode =
testClusterNodes.currentMaster(testClusterNodes.nodes.values().iterator().next().clusterService.state());
final StepListener<CreateSnapshotResponse> createSnapshotResponseStepListener = new StepListener<>();
@ -363,7 +362,7 @@ public class SnapshotResiliencyTests extends ESTestCase {
clearDisruptionsAndAwaitSync();
final TestClusterNode randomMaster = testClusterNodes.randomMasterNode()
final TestClusterNodes.TestClusterNode randomMaster = testClusterNodes.randomMasterNode()
.orElseThrow(() -> new AssertionError("expected to find at least one active master node"));
SnapshotsInProgress finalSnapshotsInProgress = randomMaster.clusterService.state().custom(SnapshotsInProgress.TYPE);
assertThat(finalSnapshotsInProgress.entries(), empty());
@ -380,7 +379,7 @@ public class SnapshotResiliencyTests extends ESTestCase {
final String index = "test";
final int shards = randomIntBetween(1, 10);
TestClusterNode masterNode =
TestClusterNodes.TestClusterNode masterNode =
testClusterNodes.currentMaster(testClusterNodes.nodes.values().iterator().next().clusterService.state());
final StepListener<CreateSnapshotResponse> createSnapshotResponseStepListener = new StepListener<>();
@ -431,7 +430,7 @@ public class SnapshotResiliencyTests extends ESTestCase {
final int shards = randomIntBetween(1, 10);
final TestClusterNode masterNode =
final TestClusterNodes.TestClusterNode masterNode =
testClusterNodes.currentMaster(testClusterNodes.nodes.values().iterator().next().clusterService.state());
final AtomicBoolean createdSnapshot = new AtomicBoolean();
final AdminClient masterAdminClient = masterNode.client.admin();
@ -443,8 +442,8 @@ public class SnapshotResiliencyTests extends ESTestCase {
continueOrDie(clusterStateResponseStepListener, clusterStateResponse -> {
final ShardRouting shardToRelocate = clusterStateResponse.getState().routingTable().allShards(index).get(0);
final TestClusterNode currentPrimaryNode = testClusterNodes.nodeById(shardToRelocate.currentNodeId());
final TestClusterNode otherNode = testClusterNodes.randomDataNodeSafe(currentPrimaryNode.node.getName());
final TestClusterNodes.TestClusterNode currentPrimaryNode = testClusterNodes.nodeById(shardToRelocate.currentNodeId());
final TestClusterNodes.TestClusterNode otherNode = testClusterNodes.randomDataNodeSafe(currentPrimaryNode.node.getName());
scheduleNow(() -> testClusterNodes.stopNode(currentPrimaryNode));
scheduleNow(new Runnable() {
@Override
@ -504,7 +503,7 @@ public class SnapshotResiliencyTests extends ESTestCase {
final int shards = randomIntBetween(1, 10);
final int documents = randomIntBetween(2, 100);
TestClusterNode masterNode =
TestClusterNodes.TestClusterNode masterNode =
testClusterNodes.currentMaster(testClusterNodes.nodes.values().iterator().next().clusterService.state());
final StepListener<CreateSnapshotResponse> createSnapshotResponseStepListener = new StepListener<>();
@ -574,7 +573,8 @@ public class SnapshotResiliencyTests extends ESTestCase {
assertEquals(0, snapshotInfo.failedShards());
}
private StepListener<CreateIndexResponse> createRepoAndIndex(TestClusterNode masterNode, String repoName, String index, int shards) {
private StepListener<CreateIndexResponse> createRepoAndIndex(TestClusterNodes.TestClusterNode masterNode, String repoName,
String index, int shards) {
final AdminClient adminClient = masterNode.client.admin();
final StepListener<AcknowledgedResponse> createRepositoryListener = new StepListener<>();
@ -604,7 +604,7 @@ public class SnapshotResiliencyTests extends ESTestCase {
if (randomBoolean()) {
disconnectRandomDataNode();
} else {
testClusterNodes.randomDataNode().ifPresent(TestClusterNode::restart);
testClusterNodes.randomDataNode().ifPresent(TestClusterNodes.TestClusterNode::restart);
}
}
@ -712,7 +712,10 @@ public class SnapshotResiliencyTests extends ESTestCase {
// LinkedHashMap so we have deterministic ordering when iterating over the map in tests
private final Map<String, TestClusterNode> nodes = new LinkedHashMap<>();
private final DisconnectedNodes disruptedLinks = new DisconnectedNodes();
/**
* Node ids that are disconnected from all other nodes.
*/
private final Set<String> disconnectedNodes = new HashSet<>();
TestClusterNodes(int masterNodes, int dataNodes) {
for (int i = 0; i < masterNodes; ++i) {
@ -751,7 +754,7 @@ public class SnapshotResiliencyTests extends ESTestCase {
private TestClusterNode newNode(String nodeName, DiscoveryNodeRole role) throws IOException {
return new TestClusterNode(
new DiscoveryNode(nodeName, randomAlphaOfLength(10), buildNewFakeTransportAddress(), emptyMap(),
Collections.singleton(role), Version.CURRENT), this::getDisruption);
Collections.singleton(role), Version.CURRENT));
}
public TestClusterNode randomMasterNodeSafe() {
@ -790,16 +793,16 @@ public class SnapshotResiliencyTests extends ESTestCase {
}
public void disconnectNode(TestClusterNode node) {
if (disruptedLinks.disconnected.contains(node.node.getName())) {
if (disconnectedNodes.contains(node.node.getId())) {
return;
}
testClusterNodes.nodes.values().forEach(n -> n.transportService.getConnectionManager().disconnectFromNode(node.node));
disruptedLinks.disconnect(node.node.getName());
disconnectedNodes.add(node.node.getId());
}
public void clearNetworkDisruptions() {
final Set<String> disconnectedNodes = new HashSet<>(disruptedLinks.disconnected);
disruptedLinks.clear();
final Set<String> disconnectedNodes = new HashSet<>(this.disconnectedNodes);
this.disconnectedNodes.clear();
disconnectedNodes.forEach(nodeName -> {
if (testClusterNodes.nodes.containsKey(nodeName)) {
final DiscoveryNode node = testClusterNodes.nodes.get(nodeName).node;
@ -808,10 +811,6 @@ public class SnapshotResiliencyTests extends ESTestCase {
});
}
private NetworkDisruption.DisruptedLinks getDisruption() {
return disruptedLinks;
}
/**
* Builds a {@link DiscoveryNodes} instance that holds the nodes in this test cluster.
* @return DiscoveryNodes
@ -833,209 +832,185 @@ public class SnapshotResiliencyTests extends ESTestCase {
assertTrue(master.node.isMasterNode());
return master;
}
}
private final class TestClusterNode {
private final class TestClusterNode {
private final Logger logger = LogManager.getLogger(TestClusterNode.class);
private final Logger logger = LogManager.getLogger(TestClusterNode.class);
private final NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry(Stream.concat(
ClusterModule.getNamedWriteables().stream(), NetworkModule.getNamedWriteables().stream()).collect(Collectors.toList()));
private final NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry(Stream.concat(
ClusterModule.getNamedWriteables().stream(), NetworkModule.getNamedWriteables().stream()).collect(Collectors.toList()));
private final TransportService transportService;
private final TransportService transportService;
private final ClusterService clusterService;
private final ClusterService clusterService;
private final RepositoriesService repositoriesService;
private final RepositoriesService repositoriesService;
private final SnapshotsService snapshotsService;
private final SnapshotsService snapshotsService;
private final SnapshotShardsService snapshotShardsService;
private final SnapshotShardsService snapshotShardsService;
private final IndicesService indicesService;
private final IndicesService indicesService;
private final IndicesClusterStateService indicesClusterStateService;
private final IndicesClusterStateService indicesClusterStateService;
private final DiscoveryNode node;
private final DiscoveryNode node;
private final MasterService masterService;
private final MasterService masterService;
private final AllocationService allocationService;
private final AllocationService allocationService;
private final NodeClient client;
private final NodeClient client;
private final NodeEnvironment nodeEnv;
private final NodeEnvironment nodeEnv;
private final DisruptableMockTransport mockTransport;
private final DisruptableMockTransport mockTransport;
private final ThreadPool threadPool;
private final ThreadPool threadPool;
private final Supplier<NetworkDisruption.DisruptedLinks> disruption;
private Coordinator coordinator;
private Coordinator coordinator;
TestClusterNode(DiscoveryNode node, Supplier<NetworkDisruption.DisruptedLinks> disruption) throws IOException {
this.disruption = disruption;
this.node = node;
final Environment environment = createEnvironment(node.getName());
masterService = new FakeThreadPoolMasterService(node.getName(), "test", deterministicTaskQueue::scheduleNow);
final Settings settings = environment.settings();
final ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS);
threadPool = deterministicTaskQueue.getThreadPool();
clusterService = new ClusterService(settings, clusterSettings, masterService,
new ClusterApplierService(node.getName(), settings, clusterSettings, threadPool) {
@Override
protected PrioritizedEsThreadPoolExecutor createThreadPoolExecutor() {
return new MockSinglePrioritizingExecutor(node.getName(), deterministicTaskQueue);
}
@Override
protected void connectToNodesAndWait(ClusterState newClusterState) {
// don't do anything, and don't block
}
});
mockTransport = new DisruptableMockTransport(node, logger) {
@Override
protected ConnectionStatus getConnectionStatus(DiscoveryNode destination) {
return disruption.get().disrupt(node.getName(), destination.getName())
? ConnectionStatus.DISCONNECTED : ConnectionStatus.CONNECTED;
}
@Override
protected Optional<DisruptableMockTransport> getDisruptableMockTransport(TransportAddress address) {
return testClusterNodes.nodes.values().stream().map(cn -> cn.mockTransport)
.filter(transport -> transport.getLocalNode().getAddress().equals(address))
.findAny();
}
@Override
protected void execute(Runnable runnable) {
scheduleNow(CoordinatorTests.onNodeLog(getLocalNode(), runnable));
}
@Override
protected NamedWriteableRegistry writeableRegistry() {
return namedWriteableRegistry;
}
};
transportService = mockTransport.createTransportService(
settings, deterministicTaskQueue.getThreadPool(runnable -> CoordinatorTests.onNodeLog(node, runnable)),
new TransportInterceptor() {
@Override
public <T extends TransportRequest> TransportRequestHandler<T> interceptHandler(String action, String executor,
boolean forceExecution, TransportRequestHandler<T> actualHandler) {
// TODO: Remove this hack once recoveries are async and can be used in these tests
if (action.startsWith("internal:index/shard/recovery")) {
return (request, channel, task) -> scheduleSoon(
new AbstractRunnable() {
@Override
protected void doRun() throws Exception {
channel.sendResponse(new TransportException(new IOException("failed to recover shard")));
}
@Override
public void onFailure(final Exception e) {
throw new AssertionError(e);
}
});
} else {
return actualHandler;
TestClusterNode(DiscoveryNode node) throws IOException {
this.node = node;
final Environment environment = createEnvironment(node.getName());
masterService = new FakeThreadPoolMasterService(node.getName(), "test", deterministicTaskQueue::scheduleNow);
final Settings settings = environment.settings();
final ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS);
threadPool = deterministicTaskQueue.getThreadPool();
clusterService = new ClusterService(settings, clusterSettings, masterService,
new ClusterApplierService(node.getName(), settings, clusterSettings, threadPool) {
@Override
protected PrioritizedEsThreadPoolExecutor createThreadPoolExecutor() {
return new MockSinglePrioritizingExecutor(node.getName(), deterministicTaskQueue);
}
@Override
protected void connectToNodesAndWait(ClusterState newClusterState) {
// don't do anything, and don't block
}
});
mockTransport = new DisruptableMockTransport(node, logger) {
@Override
protected ConnectionStatus getConnectionStatus(DiscoveryNode destination) {
if (node.equals(destination)) {
return ConnectionStatus.CONNECTED;
}
// Check if both nodes are still part of the cluster
if (nodes.containsKey(node.getName()) == false || nodes.containsKey(destination.getName()) == false) {
return ConnectionStatus.DISCONNECTED;
}
return disconnectedNodes.contains(node.getId()) || disconnectedNodes.contains(destination.getId())
? ConnectionStatus.DISCONNECTED : ConnectionStatus.CONNECTED;
}
},
a -> node, null, emptySet()
);
final IndexNameExpressionResolver indexNameExpressionResolver = new IndexNameExpressionResolver();
repositoriesService = new RepositoriesService(
settings, clusterService, transportService,
Collections.singletonMap(FsRepository.TYPE, getRepoFactory(environment)), emptyMap(), threadPool
);
snapshotsService =
new SnapshotsService(settings, clusterService, indexNameExpressionResolver, repositoriesService, threadPool);
nodeEnv = new NodeEnvironment(settings, environment);
final NamedXContentRegistry namedXContentRegistry = new NamedXContentRegistry(Collections.emptyList());
final ScriptService scriptService = new ScriptService(settings, emptyMap(), emptyMap());
client = new NodeClient(settings, threadPool);
allocationService = ESAllocationTestCase.createAllocationService(settings);
final IndexScopedSettings indexScopedSettings =
new IndexScopedSettings(settings, IndexScopedSettings.BUILT_IN_INDEX_SETTINGS);
final BigArrays bigArrays = new BigArrays(new PageCacheRecycler(settings), null, "test");
final MapperRegistry mapperRegistry = new IndicesModule(Collections.emptyList()).getMapperRegistry();
indicesService = new IndicesService(
settings,
mock(PluginsService.class),
nodeEnv,
namedXContentRegistry,
new AnalysisRegistry(environment, emptyMap(), emptyMap(), emptyMap(), emptyMap(), emptyMap(),
emptyMap(), emptyMap(), emptyMap(), emptyMap()),
indexNameExpressionResolver,
mapperRegistry,
namedWriteableRegistry,
threadPool,
indexScopedSettings,
new NoneCircuitBreakerService(),
bigArrays,
scriptService,
clusterService,
client,
new MetaStateService(nodeEnv, namedXContentRegistry),
Collections.emptyList(),
emptyMap()
);
final RecoverySettings recoverySettings = new RecoverySettings(settings, clusterSettings);
final ActionFilters actionFilters = new ActionFilters(emptySet());
snapshotShardsService = new SnapshotShardsService(
settings, clusterService, repositoriesService, threadPool,
transportService, indicesService, actionFilters, indexNameExpressionResolver);
final ShardStateAction shardStateAction = new ShardStateAction(
clusterService, transportService, allocationService,
new BatchedRerouteService(clusterService, allocationService::reroute),
threadPool
);
final MetaDataMappingService metaDataMappingService = new MetaDataMappingService(clusterService, indicesService);
indicesClusterStateService = new IndicesClusterStateService(
settings,
indicesService,
clusterService,
threadPool,
new PeerRecoveryTargetService(threadPool, transportService, recoverySettings, clusterService),
shardStateAction,
new NodeMappingRefreshAction(transportService, metaDataMappingService),
repositoriesService,
mock(SearchService.class),
new SyncedFlushService(indicesService, clusterService, transportService, indexNameExpressionResolver),
new PeerRecoverySourceService(transportService, indicesService, recoverySettings),
snapshotShardsService,
new PrimaryReplicaSyncer(
transportService,
new TransportResyncReplicationAction(
settings,
@Override
protected Optional<DisruptableMockTransport> getDisruptableMockTransport(TransportAddress address) {
return nodes.values().stream().map(cn -> cn.mockTransport)
.filter(transport -> transport.getLocalNode().getAddress().equals(address))
.findAny();
}
@Override
protected void execute(Runnable runnable) {
scheduleNow(CoordinatorTests.onNodeLog(getLocalNode(), runnable));
}
@Override
protected NamedWriteableRegistry writeableRegistry() {
return namedWriteableRegistry;
}
};
transportService = mockTransport.createTransportService(
settings, deterministicTaskQueue.getThreadPool(runnable -> CoordinatorTests.onNodeLog(node, runnable)),
new TransportInterceptor() {
@Override
public <T extends TransportRequest> TransportRequestHandler<T> interceptHandler(String action, String executor,
boolean forceExecution, TransportRequestHandler<T> actualHandler) {
// TODO: Remove this hack once recoveries are async and can be used in these tests
if (action.startsWith("internal:index/shard/recovery")) {
return (request, channel, task) -> scheduleSoon(
new AbstractRunnable() {
@Override
protected void doRun() throws Exception {
channel.sendResponse(new TransportException(new IOException("failed to recover shard")));
}
@Override
public void onFailure(final Exception e) {
throw new AssertionError(e);
}
});
} else {
return actualHandler;
}
}
},
a -> node, null, emptySet()
);
final IndexNameExpressionResolver indexNameExpressionResolver = new IndexNameExpressionResolver();
repositoriesService = new RepositoriesService(
settings, clusterService, transportService,
Collections.singletonMap(FsRepository.TYPE, getRepoFactory(environment)), emptyMap(), threadPool
);
snapshotsService =
new SnapshotsService(settings, clusterService, indexNameExpressionResolver, repositoriesService, threadPool);
nodeEnv = new NodeEnvironment(settings, environment);
final NamedXContentRegistry namedXContentRegistry = new NamedXContentRegistry(Collections.emptyList());
final ScriptService scriptService = new ScriptService(settings, emptyMap(), emptyMap());
client = new NodeClient(settings, threadPool);
allocationService = ESAllocationTestCase.createAllocationService(settings);
final IndexScopedSettings indexScopedSettings =
new IndexScopedSettings(settings, IndexScopedSettings.BUILT_IN_INDEX_SETTINGS);
final BigArrays bigArrays = new BigArrays(new PageCacheRecycler(settings), null, "test");
final MapperRegistry mapperRegistry = new IndicesModule(Collections.emptyList()).getMapperRegistry();
indicesService = new IndicesService(
settings,
mock(PluginsService.class),
nodeEnv,
namedXContentRegistry,
new AnalysisRegistry(environment, emptyMap(), emptyMap(), emptyMap(), emptyMap(), emptyMap(),
emptyMap(), emptyMap(), emptyMap(), emptyMap()),
indexNameExpressionResolver,
mapperRegistry,
namedWriteableRegistry,
threadPool,
indexScopedSettings,
new NoneCircuitBreakerService(),
bigArrays,
scriptService,
clusterService,
client,
new MetaStateService(nodeEnv, namedXContentRegistry),
Collections.emptyList(),
emptyMap()
);
final RecoverySettings recoverySettings = new RecoverySettings(settings, clusterSettings);
final ActionFilters actionFilters = new ActionFilters(emptySet());
snapshotShardsService = new SnapshotShardsService(
settings, clusterService, repositoriesService, threadPool,
transportService, indicesService, actionFilters, indexNameExpressionResolver);
final ShardStateAction shardStateAction = new ShardStateAction(
clusterService, transportService, allocationService,
new BatchedRerouteService(clusterService, allocationService::reroute),
threadPool
);
final MetaDataMappingService metaDataMappingService = new MetaDataMappingService(clusterService, indicesService);
indicesClusterStateService = new IndicesClusterStateService(
settings,
indicesService,
clusterService,
threadPool,
new PeerRecoveryTargetService(threadPool, transportService, recoverySettings, clusterService),
shardStateAction,
new NodeMappingRefreshAction(transportService, metaDataMappingService),
repositoriesService,
mock(SearchService.class),
new SyncedFlushService(indicesService, clusterService, transportService, indexNameExpressionResolver),
new PeerRecoverySourceService(transportService, indicesService, recoverySettings),
snapshotShardsService,
new PrimaryReplicaSyncer(
transportService,
clusterService,
indicesService,
threadPool,
shardStateAction,
actionFilters,
indexNameExpressionResolver)),
new GlobalCheckpointSyncAction(
settings,
transportService,
clusterService,
indicesService,
threadPool,
shardStateAction,
actionFilters,
indexNameExpressionResolver),
new RetentionLeaseSyncAction(
settings,
transportService,
clusterService,
indicesService,
threadPool,
shardStateAction,
actionFilters,
indexNameExpressionResolver),
new RetentionLeaseBackgroundSyncAction(
new TransportResyncReplicationAction(
settings,
transportService,
clusterService,
@ -1043,196 +1018,195 @@ public class SnapshotResiliencyTests extends ESTestCase {
threadPool,
shardStateAction,
actionFilters,
indexNameExpressionResolver));
indexNameExpressionResolver)),
new GlobalCheckpointSyncAction(
settings,
transportService,
clusterService,
indicesService,
threadPool,
shardStateAction,
actionFilters,
indexNameExpressionResolver),
new RetentionLeaseSyncAction(
settings,
transportService,
clusterService,
indicesService,
threadPool,
shardStateAction,
actionFilters,
indexNameExpressionResolver),
new RetentionLeaseBackgroundSyncAction(
settings,
transportService,
clusterService,
indicesService,
threadPool,
shardStateAction,
actionFilters,
indexNameExpressionResolver));
Map<ActionType, TransportAction> actions = new HashMap<>();
final MetaDataCreateIndexService metaDataCreateIndexService = new MetaDataCreateIndexService(settings, clusterService,
indicesService,
allocationService, new AliasValidator(), environment, indexScopedSettings,
threadPool, namedXContentRegistry, false);
actions.put(CreateIndexAction.INSTANCE,
new TransportCreateIndexAction(
transportService, clusterService, threadPool,
metaDataCreateIndexService,
actionFilters, indexNameExpressionResolver
));
final MappingUpdatedAction mappingUpdatedAction = new MappingUpdatedAction(settings, clusterSettings);
mappingUpdatedAction.setClient(client);
final MetaDataCreateIndexService metaDataCreateIndexService = new MetaDataCreateIndexService(settings, clusterService,
indicesService,
allocationService, new AliasValidator(), environment, indexScopedSettings,
threadPool, namedXContentRegistry, false);
actions.put(CreateIndexAction.INSTANCE,
new TransportCreateIndexAction(
transportService, clusterService, threadPool,
metaDataCreateIndexService,
actionFilters, indexNameExpressionResolver
));
final MappingUpdatedAction mappingUpdatedAction = new MappingUpdatedAction(settings, clusterSettings);
mappingUpdatedAction.setClient(client);
final TransportShardBulkAction transportShardBulkAction = new TransportShardBulkAction(settings, transportService,
clusterService, indicesService, threadPool, shardStateAction, mappingUpdatedAction, new UpdateHelper(scriptService),
actionFilters, indexNameExpressionResolver);
actions.put(BulkAction.INSTANCE,
new TransportBulkAction(threadPool, transportService, clusterService,
new IngestService(
clusterService, threadPool, environment, scriptService,
new AnalysisModule(environment, Collections.emptyList()).getAnalysisRegistry(),
Collections.emptyList(), client),
actions.put(BulkAction.INSTANCE,
new TransportBulkAction(threadPool, transportService, clusterService,
new IngestService(
clusterService, threadPool, environment, scriptService,
new AnalysisModule(environment, Collections.emptyList()).getAnalysisRegistry(),
Collections.emptyList(), client),
transportShardBulkAction, client, actionFilters, indexNameExpressionResolver,
new AutoCreateIndex(settings, clusterSettings, indexNameExpressionResolver)
));
final RestoreService restoreService = new RestoreService(
clusterService, repositoriesService, allocationService,
metaDataCreateIndexService,
new MetaDataIndexUpgradeService(
settings, namedXContentRegistry,
mapperRegistry,
indexScopedSettings,
Collections.emptyList()
),
clusterSettings
);
actions.put(PutMappingAction.INSTANCE,
new TransportPutMappingAction(transportService, clusterService, threadPool, metaDataMappingService,
actionFilters, indexNameExpressionResolver, new RequestValidators<>(Collections.emptyList())));
final ResponseCollectorService responseCollectorService = new ResponseCollectorService(clusterService);
final SearchTransportService searchTransportService = new SearchTransportService(transportService,
SearchExecutionStatsCollector.makeWrapper(responseCollectorService));
final SearchService searchService = new SearchService(clusterService, indicesService, threadPool, scriptService,
bigArrays, new FetchPhase(Collections.emptyList()), responseCollectorService);
actions.put(SearchAction.INSTANCE,
new TransportSearchAction(threadPool, transportService, searchService,
searchTransportService, new SearchPhaseController(searchService::createReduceContext), clusterService,
actionFilters, indexNameExpressionResolver));
actions.put(RestoreSnapshotAction.INSTANCE,
new TransportRestoreSnapshotAction(transportService, clusterService, threadPool, restoreService, actionFilters,
indexNameExpressionResolver));
actions.put(DeleteIndexAction.INSTANCE,
new TransportDeleteIndexAction(
transportService, clusterService, threadPool,
new MetaDataDeleteIndexService(settings, clusterService, allocationService), actionFilters,
indexNameExpressionResolver, new DestructiveOperations(settings, clusterSettings)));
actions.put(PutRepositoryAction.INSTANCE,
new TransportPutRepositoryAction(
transportService, clusterService, repositoriesService, threadPool,
actionFilters, indexNameExpressionResolver
));
actions.put(CreateSnapshotAction.INSTANCE,
new TransportCreateSnapshotAction(
transportService, clusterService, threadPool,
snapshotsService, actionFilters, indexNameExpressionResolver
));
actions.put(ClusterRerouteAction.INSTANCE,
new TransportClusterRerouteAction(transportService, clusterService, threadPool, allocationService,
actionFilters, indexNameExpressionResolver));
actions.put(ClusterStateAction.INSTANCE,
new TransportClusterStateAction(transportService, clusterService, threadPool,
actionFilters, indexNameExpressionResolver));
actions.put(IndicesShardStoresAction.INSTANCE,
new TransportIndicesShardStoresAction(
transportService, clusterService, threadPool, actionFilters, indexNameExpressionResolver,
new TransportNodesListGatewayStartedShards(settings,
new AutoCreateIndex(settings, clusterSettings, indexNameExpressionResolver)
));
final RestoreService restoreService = new RestoreService(
clusterService, repositoriesService, allocationService,
metaDataCreateIndexService,
new MetaDataIndexUpgradeService(
settings, namedXContentRegistry,
mapperRegistry,
indexScopedSettings,
Collections.emptyList()
),
clusterSettings
);
actions.put(PutMappingAction.INSTANCE,
new TransportPutMappingAction(transportService, clusterService, threadPool, metaDataMappingService,
actionFilters, indexNameExpressionResolver, new RequestValidators<>(Collections.emptyList())));
final ResponseCollectorService responseCollectorService = new ResponseCollectorService(clusterService);
final SearchTransportService searchTransportService = new SearchTransportService(transportService,
SearchExecutionStatsCollector.makeWrapper(responseCollectorService));
final SearchService searchService = new SearchService(clusterService, indicesService, threadPool, scriptService,
bigArrays, new FetchPhase(Collections.emptyList()), responseCollectorService);
actions.put(SearchAction.INSTANCE,
new TransportSearchAction(threadPool, transportService, searchService,
searchTransportService, new SearchPhaseController(searchService::createReduceContext), clusterService,
actionFilters, indexNameExpressionResolver));
actions.put(RestoreSnapshotAction.INSTANCE,
new TransportRestoreSnapshotAction(transportService, clusterService, threadPool, restoreService, actionFilters,
indexNameExpressionResolver));
actions.put(DeleteIndexAction.INSTANCE,
new TransportDeleteIndexAction(
transportService, clusterService, threadPool,
new MetaDataDeleteIndexService(settings, clusterService, allocationService), actionFilters,
indexNameExpressionResolver, new DestructiveOperations(settings, clusterSettings)));
actions.put(PutRepositoryAction.INSTANCE,
new TransportPutRepositoryAction(
transportService, clusterService, repositoriesService, threadPool,
actionFilters, indexNameExpressionResolver
));
actions.put(CreateSnapshotAction.INSTANCE,
new TransportCreateSnapshotAction(
transportService, clusterService, threadPool,
snapshotsService, actionFilters, indexNameExpressionResolver
));
actions.put(ClusterRerouteAction.INSTANCE,
new TransportClusterRerouteAction(transportService, clusterService, threadPool, allocationService,
actionFilters, indexNameExpressionResolver));
actions.put(ClusterStateAction.INSTANCE,
new TransportClusterStateAction(transportService, clusterService, threadPool,
actionFilters, indexNameExpressionResolver));
actions.put(IndicesShardStoresAction.INSTANCE,
new TransportIndicesShardStoresAction(
transportService, clusterService, threadPool, actionFilters, indexNameExpressionResolver,
new TransportNodesListGatewayStartedShards(settings,
threadPool, clusterService, transportService, actionFilters, nodeEnv, indicesService, namedXContentRegistry))
);
actions.put(DeleteSnapshotAction.INSTANCE,
new TransportDeleteSnapshotAction(
transportService, clusterService, threadPool,
snapshotsService, actionFilters, indexNameExpressionResolver
));
client.initialize(actions, () -> clusterService.localNode().getId(), transportService.getRemoteClusterService());
}
actions.put(DeleteSnapshotAction.INSTANCE,
new TransportDeleteSnapshotAction(
transportService, clusterService, threadPool,
snapshotsService, actionFilters, indexNameExpressionResolver
));
client.initialize(actions, () -> clusterService.localNode().getId(), transportService.getRemoteClusterService());
}
private Repository.Factory getRepoFactory(Environment environment) {
// Run half the tests with the eventually consistent repository
if (blobStoreContext == null) {
return metaData -> {
final Repository repository = new FsRepository(metaData, environment, xContentRegistry(), threadPool) {
@Override
protected void assertSnapshotOrGenericThread() {
// eliminate thread name check as we create repo in the test thread
}
private Repository.Factory getRepoFactory(Environment environment) {
// Run half the tests with the eventually consistent repository
if (blobStoreContext == null) {
return metaData -> {
final Repository repository = new FsRepository(metaData, environment, xContentRegistry(), threadPool) {
@Override
protected void assertSnapshotOrGenericThread() {
// eliminate thread name check as we create repo in the test thread
}
};
repository.start();
return repository;
};
} else {
return metaData -> {
final Repository repository = new MockEventuallyConsistentRepository(
metaData, xContentRegistry(), deterministicTaskQueue.getThreadPool(), blobStoreContext);
repository.start();
return repository;
};
repository.start();
return repository;
};
} else {
return metaData -> {
final Repository repository = new MockEventuallyConsistentRepository(
metaData, xContentRegistry(), deterministicTaskQueue.getThreadPool(), blobStoreContext);
repository.start();
return repository;
};
}
}
public void restart() {
testClusterNodes.disconnectNode(this);
final ClusterState oldState = this.clusterService.state();
stop();
testClusterNodes.nodes.remove(node.getName());
scheduleSoon(() -> {
try {
final TestClusterNode restartedNode = new TestClusterNode(
new DiscoveryNode(node.getName(), node.getId(), node.getAddress(), emptyMap(),
node.getRoles(), Version.CURRENT), disruption);
testClusterNodes.nodes.put(node.getName(), restartedNode);
restartedNode.start(oldState);
} catch (IOException e) {
throw new AssertionError(e);
}
});
}
public void stop() {
testClusterNodes.disconnectNode(this);
indicesService.close();
clusterService.close();
indicesClusterStateService.close();
if (coordinator != null) {
coordinator.close();
}
nodeEnv.close();
}
public void start(ClusterState initialState) {
transportService.start();
transportService.acceptIncomingRequests();
snapshotsService.start();
snapshotShardsService.start();
final CoordinationState.PersistedState persistedState =
new InMemoryPersistedState(initialState.term(), stateForNode(initialState, node));
coordinator = new Coordinator(node.getName(), clusterService.getSettings(),
clusterService.getClusterSettings(), transportService, namedWriteableRegistry,
allocationService, masterService, () -> persistedState,
hostsResolver -> testClusterNodes.nodes.values().stream().filter(n -> n.node.isMasterNode())
.map(n -> n.node.getAddress()).collect(Collectors.toList()),
clusterService.getClusterApplierService(), Collections.emptyList(), random(),
new BatchedRerouteService(clusterService, allocationService::reroute), ElectionStrategy.DEFAULT_INSTANCE);
masterService.setClusterStatePublisher(coordinator);
coordinator.start();
masterService.start();
clusterService.getClusterApplierService().setNodeConnectionsService(
new NodeConnectionsService(clusterService.getSettings(), threadPool, transportService));
clusterService.getClusterApplierService().start();
indicesService.start();
indicesClusterStateService.start();
coordinator.startInitialJoin();
}
}
private final class DisconnectedNodes extends NetworkDisruption.DisruptedLinks {
/**
* Node names that are disconnected from all other nodes.
*/
private final Set<String> disconnected = new HashSet<>();
@Override
public boolean disrupt(String node1, String node2) {
if (node1.equals(node2)) {
return false;
public void restart() {
testClusterNodes.disconnectNode(this);
final ClusterState oldState = this.clusterService.state();
stop();
nodes.remove(node.getName());
scheduleSoon(() -> {
try {
final TestClusterNode restartedNode = new TestClusterNode(
new DiscoveryNode(node.getName(), node.getId(), node.getAddress(), emptyMap(),
node.getRoles(), Version.CURRENT));
nodes.put(node.getName(), restartedNode);
restartedNode.start(oldState);
} catch (IOException e) {
throw new AssertionError(e);
}
});
}
// Check if both nodes are still part of the cluster
if (testClusterNodes.nodes.containsKey(node1) == false
|| testClusterNodes.nodes.containsKey(node2) == false) {
return true;
public void stop() {
testClusterNodes.disconnectNode(this);
indicesService.close();
clusterService.close();
indicesClusterStateService.close();
if (coordinator != null) {
coordinator.close();
}
nodeEnv.close();
}
return disconnected.contains(node1) || disconnected.contains(node2);
}
public void disconnect(String node) {
disconnected.add(node);
}
public void clear() {
disconnected.clear();
public void start(ClusterState initialState) {
transportService.start();
transportService.acceptIncomingRequests();
snapshotsService.start();
snapshotShardsService.start();
final CoordinationState.PersistedState persistedState =
new InMemoryPersistedState(initialState.term(), stateForNode(initialState, node));
coordinator = new Coordinator(node.getName(), clusterService.getSettings(),
clusterService.getClusterSettings(), transportService, namedWriteableRegistry,
allocationService, masterService, () -> persistedState,
hostsResolver -> nodes.values().stream().filter(n -> n.node.isMasterNode())
.map(n -> n.node.getAddress()).collect(Collectors.toList()),
clusterService.getClusterApplierService(), Collections.emptyList(), random(),
new BatchedRerouteService(clusterService, allocationService::reroute), ElectionStrategy.DEFAULT_INSTANCE);
masterService.setClusterStatePublisher(coordinator);
coordinator.start();
masterService.start();
clusterService.getClusterApplierService().setNodeConnectionsService(
new NodeConnectionsService(clusterService.getSettings(), threadPool, transportService));
clusterService.getClusterApplierService().start();
indicesService.start();
indicesClusterStateService.start();
coordinator.startInitialJoin();
}
}
}
}