Replace 'master' terminology with 'cluster manager' in 'modules' directory (#3328)
* Replace 'master' terminology with 'cluster manager' in modules directory Signed-off-by: Tianli Feng <ftianli@amazon.com> * Replace 'master' with 'cluster_manager' in yaml rest test files Signed-off-by: Tianli Feng <ftianli@amazon.com> * Replace master_node with cluster_manager_node in cluster.state API test Signed-off-by: Tianli Feng <ftianli@amazon.com>
This commit is contained in:
parent
e2ad4ffbe9
commit
5a02949bbe
|
@ -7,10 +7,10 @@
|
|||
- do:
|
||||
cluster.state: {}
|
||||
|
||||
# Get master node id
|
||||
- set: { master_node: master }
|
||||
# Get cluster-manager node id
|
||||
- set: { cluster_manager_node: cluster_manager }
|
||||
|
||||
- do:
|
||||
nodes.info: {}
|
||||
|
||||
- contains: { nodes.$master.modules: { name: aggs-matrix-stats } }
|
||||
- contains: { nodes.$cluster_manager.modules: { name: aggs-matrix-stats } }
|
||||
|
|
|
@ -5,10 +5,10 @@
|
|||
- do:
|
||||
cluster.state: {}
|
||||
|
||||
# Get master node id
|
||||
- set: { master_node: master }
|
||||
# Get cluster-manager node id
|
||||
- set: { cluster_manager_node: cluster_manager }
|
||||
|
||||
- do:
|
||||
nodes.info: {}
|
||||
|
||||
- contains: { nodes.$master.modules: { name: analysis-common } }
|
||||
- contains: { nodes.$cluster_manager.modules: { name: analysis-common } }
|
||||
|
|
|
@ -5,34 +5,34 @@
|
|||
- do:
|
||||
cluster.state: {}
|
||||
|
||||
# Get master node id
|
||||
- set: { master_node: master }
|
||||
# Get cluster-manager node id
|
||||
- set: { cluster_manager_node: cluster_manager }
|
||||
|
||||
- do:
|
||||
nodes.info: {}
|
||||
|
||||
- contains: { nodes.$master.modules: { name: ingest-common } }
|
||||
- contains: { nodes.$master.ingest.processors: { type: append } }
|
||||
- contains: { nodes.$master.ingest.processors: { type: bytes } }
|
||||
- contains: { nodes.$master.ingest.processors: { type: convert } }
|
||||
- contains: { nodes.$master.ingest.processors: { type: date } }
|
||||
- contains: { nodes.$master.ingest.processors: { type: date_index_name } }
|
||||
- contains: { nodes.$master.ingest.processors: { type: dissect } }
|
||||
- contains: { nodes.$master.ingest.processors: { type: dot_expander } }
|
||||
- contains: { nodes.$master.ingest.processors: { type: fail } }
|
||||
- contains: { nodes.$master.ingest.processors: { type: foreach } }
|
||||
- contains: { nodes.$master.ingest.processors: { type: grok } }
|
||||
- contains: { nodes.$master.ingest.processors: { type: gsub } }
|
||||
- contains: { nodes.$master.ingest.processors: { type: html_strip } }
|
||||
- contains: { nodes.$master.ingest.processors: { type: join } }
|
||||
- contains: { nodes.$master.ingest.processors: { type: json } }
|
||||
- contains: { nodes.$master.ingest.processors: { type: kv } }
|
||||
- contains: { nodes.$master.ingest.processors: { type: lowercase } }
|
||||
- contains: { nodes.$master.ingest.processors: { type: remove } }
|
||||
- contains: { nodes.$master.ingest.processors: { type: rename } }
|
||||
- contains: { nodes.$master.ingest.processors: { type: script } }
|
||||
- contains: { nodes.$master.ingest.processors: { type: set } }
|
||||
- contains: { nodes.$master.ingest.processors: { type: sort } }
|
||||
- contains: { nodes.$master.ingest.processors: { type: split } }
|
||||
- contains: { nodes.$master.ingest.processors: { type: trim } }
|
||||
- contains: { nodes.$master.ingest.processors: { type: uppercase } }
|
||||
- contains: { nodes.$cluster_manager.modules: { name: ingest-common } }
|
||||
- contains: { nodes.$cluster_manager.ingest.processors: { type: append } }
|
||||
- contains: { nodes.$cluster_manager.ingest.processors: { type: bytes } }
|
||||
- contains: { nodes.$cluster_manager.ingest.processors: { type: convert } }
|
||||
- contains: { nodes.$cluster_manager.ingest.processors: { type: date } }
|
||||
- contains: { nodes.$cluster_manager.ingest.processors: { type: date_index_name } }
|
||||
- contains: { nodes.$cluster_manager.ingest.processors: { type: dissect } }
|
||||
- contains: { nodes.$cluster_manager.ingest.processors: { type: dot_expander } }
|
||||
- contains: { nodes.$cluster_manager.ingest.processors: { type: fail } }
|
||||
- contains: { nodes.$cluster_manager.ingest.processors: { type: foreach } }
|
||||
- contains: { nodes.$cluster_manager.ingest.processors: { type: grok } }
|
||||
- contains: { nodes.$cluster_manager.ingest.processors: { type: gsub } }
|
||||
- contains: { nodes.$cluster_manager.ingest.processors: { type: html_strip } }
|
||||
- contains: { nodes.$cluster_manager.ingest.processors: { type: join } }
|
||||
- contains: { nodes.$cluster_manager.ingest.processors: { type: json } }
|
||||
- contains: { nodes.$cluster_manager.ingest.processors: { type: kv } }
|
||||
- contains: { nodes.$cluster_manager.ingest.processors: { type: lowercase } }
|
||||
- contains: { nodes.$cluster_manager.ingest.processors: { type: remove } }
|
||||
- contains: { nodes.$cluster_manager.ingest.processors: { type: rename } }
|
||||
- contains: { nodes.$cluster_manager.ingest.processors: { type: script } }
|
||||
- contains: { nodes.$cluster_manager.ingest.processors: { type: set } }
|
||||
- contains: { nodes.$cluster_manager.ingest.processors: { type: sort } }
|
||||
- contains: { nodes.$cluster_manager.ingest.processors: { type: split } }
|
||||
- contains: { nodes.$cluster_manager.ingest.processors: { type: trim } }
|
||||
- contains: { nodes.$cluster_manager.ingest.processors: { type: uppercase } }
|
||||
|
|
|
@ -77,21 +77,21 @@ teardown:
|
|||
|
||||
- do:
|
||||
cluster.state: {}
|
||||
# Get master node id
|
||||
- set: { master_node: master }
|
||||
# Get cluster-manager node id
|
||||
- set: { cluster_manager_node: cluster_manager }
|
||||
|
||||
- do:
|
||||
nodes.stats:
|
||||
metric: [ ingest ]
|
||||
#we can't assert anything here since we might have more than one node in the cluster
|
||||
- gte: {nodes.$master.ingest.total.count: 0}
|
||||
- gte: {nodes.$master.ingest.total.failed: 0}
|
||||
- gte: {nodes.$master.ingest.total.time_in_millis: 0}
|
||||
- match: {nodes.$master.ingest.total.current: 0}
|
||||
- gte: {nodes.$master.ingest.pipelines.pipeline1.count: 0}
|
||||
- match: {nodes.$master.ingest.pipelines.pipeline1.failed: 0}
|
||||
- gte: {nodes.$master.ingest.pipelines.pipeline1.time_in_millis: 0}
|
||||
- match: {nodes.$master.ingest.pipelines.pipeline1.current: 0}
|
||||
- gte: {nodes.$cluster_manager.ingest.total.count: 0}
|
||||
- gte: {nodes.$cluster_manager.ingest.total.failed: 0}
|
||||
- gte: {nodes.$cluster_manager.ingest.total.time_in_millis: 0}
|
||||
- match: {nodes.$cluster_manager.ingest.total.current: 0}
|
||||
- gte: {nodes.$cluster_manager.ingest.pipelines.pipeline1.count: 0}
|
||||
- match: {nodes.$cluster_manager.ingest.pipelines.pipeline1.failed: 0}
|
||||
- gte: {nodes.$cluster_manager.ingest.pipelines.pipeline1.time_in_millis: 0}
|
||||
- match: {nodes.$cluster_manager.ingest.pipelines.pipeline1.current: 0}
|
||||
|
||||
---
|
||||
"Test bulk request with default pipeline":
|
||||
|
@ -113,21 +113,21 @@ teardown:
|
|||
|
||||
- do:
|
||||
cluster.state: {}
|
||||
# Get master node id
|
||||
- set: { master_node: master }
|
||||
# Get cluster-manager node id
|
||||
- set: { cluster_manager_node: cluster_manager }
|
||||
|
||||
- do:
|
||||
nodes.stats:
|
||||
metric: [ ingest ]
|
||||
#we can't assert anything here since we might have more than one node in the cluster
|
||||
- gte: {nodes.$master.ingest.total.count: 0}
|
||||
- gte: {nodes.$master.ingest.total.failed: 0}
|
||||
- gte: {nodes.$master.ingest.total.time_in_millis: 0}
|
||||
- match: {nodes.$master.ingest.total.current: 0}
|
||||
- gte: {nodes.$master.ingest.pipelines.pipeline2.count: 0}
|
||||
- match: {nodes.$master.ingest.pipelines.pipeline2.failed: 0}
|
||||
- gte: {nodes.$master.ingest.pipelines.pipeline2.time_in_millis: 0}
|
||||
- match: {nodes.$master.ingest.pipelines.pipeline2.current: 0}
|
||||
- gte: {nodes.$cluster_manager.ingest.total.count: 0}
|
||||
- gte: {nodes.$cluster_manager.ingest.total.failed: 0}
|
||||
- gte: {nodes.$cluster_manager.ingest.total.time_in_millis: 0}
|
||||
- match: {nodes.$cluster_manager.ingest.total.current: 0}
|
||||
- gte: {nodes.$cluster_manager.ingest.pipelines.pipeline2.count: 0}
|
||||
- match: {nodes.$cluster_manager.ingest.pipelines.pipeline2.failed: 0}
|
||||
- gte: {nodes.$cluster_manager.ingest.pipelines.pipeline2.time_in_millis: 0}
|
||||
- match: {nodes.$cluster_manager.ingest.pipelines.pipeline2.current: 0}
|
||||
|
||||
- do:
|
||||
get:
|
||||
|
|
|
@ -71,7 +71,7 @@ class DatabaseReaderLazyLoader implements Closeable {
|
|||
|
||||
/**
|
||||
* Read the database type from the database. We do this manually instead of relying on the built-in mechanism to avoid reading the
|
||||
* entire database into memory merely to read the type. This is especially important to maintain on master nodes where pipelines are
|
||||
* entire database into memory merely to read the type. This is especially important to maintain on cluster-manager nodes where pipelines are
|
||||
* validated. If we read the entire database into memory, we could potentially run into low-memory constraints on such nodes where
|
||||
* loading this data would otherwise be wasteful if they are not also ingest nodes.
|
||||
*
|
||||
|
|
|
@ -5,10 +5,10 @@
|
|||
- do:
|
||||
cluster.state: {}
|
||||
|
||||
- set: {master_node: master}
|
||||
- set: { cluster_manager_node: cluster_manager }
|
||||
|
||||
- do:
|
||||
nodes.info: {}
|
||||
|
||||
- contains: { nodes.$master.modules: { name: ingest-geoip } }
|
||||
- contains: { nodes.$master.ingest.processors: { type: geoip } }
|
||||
- contains: { nodes.$cluster_manager.modules: { name: ingest-geoip } }
|
||||
- contains: { nodes.$cluster_manager.ingest.processors: { type: geoip } }
|
||||
|
|
|
@ -5,10 +5,10 @@
|
|||
- do:
|
||||
cluster.state: {}
|
||||
|
||||
- set: {master_node: master}
|
||||
- set: { cluster_manager_node: cluster_manager }
|
||||
|
||||
- do:
|
||||
nodes.info: {}
|
||||
|
||||
- contains: { nodes.$master.modules: { name: ingest-user-agent } }
|
||||
- contains: { nodes.$master.ingest.processors: { type: user_agent } }
|
||||
- contains: { nodes.$cluster_manager.modules: { name: ingest-user-agent } }
|
||||
- contains: { nodes.$cluster_manager.ingest.processors: { type: user_agent } }
|
||||
|
|
|
@ -7,10 +7,10 @@
|
|||
- do:
|
||||
cluster.state: {}
|
||||
|
||||
# Get master node id
|
||||
- set: { master_node: master }
|
||||
# Get cluster-manager node id
|
||||
- set: { cluster_manager_node: cluster_manager }
|
||||
|
||||
- do:
|
||||
nodes.info: {}
|
||||
|
||||
- contains: { nodes.$master.modules: { name: lang-expression } }
|
||||
- contains: { nodes.$cluster_manager.modules: { name: lang-expression } }
|
||||
|
|
|
@ -7,10 +7,10 @@
|
|||
- do:
|
||||
cluster.state: {}
|
||||
|
||||
# Get master node id
|
||||
- set: { master_node: master }
|
||||
# Get cluster-manager node id
|
||||
- set: { cluster_manager_node: cluster_manager }
|
||||
|
||||
- do:
|
||||
nodes.info: {}
|
||||
|
||||
- contains: { nodes.$master.modules: { name: lang-mustache } }
|
||||
- contains: { nodes.$cluster_manager.modules: { name: lang-mustache } }
|
||||
|
|
|
@ -7,10 +7,10 @@
|
|||
- do:
|
||||
cluster.state: {}
|
||||
|
||||
# Get master node id
|
||||
- set: { master_node: master }
|
||||
# Get cluster-manager node id
|
||||
- set: { cluster_manager_node: cluster_manager }
|
||||
|
||||
- do:
|
||||
nodes.info: {}
|
||||
|
||||
- contains: { nodes.$master.modules: { name: lang-painless } }
|
||||
- contains: { nodes.$cluster_manager.modules: { name: lang-painless } }
|
||||
|
|
|
@ -229,7 +229,7 @@
|
|||
},
|
||||
"objects": {
|
||||
"properties": {
|
||||
"master": {
|
||||
"primary": {
|
||||
"type": "long"
|
||||
},
|
||||
"total": {
|
||||
|
@ -4601,7 +4601,7 @@
|
|||
"connected_replicas": {
|
||||
"type": "long"
|
||||
},
|
||||
"master_offset": {
|
||||
"primary_offset": {
|
||||
"type": "long"
|
||||
},
|
||||
"role": {
|
||||
|
|
|
@ -227,7 +227,7 @@ public class ChildrenIT extends AbstractParentChildTestCase {
|
|||
|
||||
public void testPostCollection() throws Exception {
|
||||
String indexName = "prodcatalog";
|
||||
String masterType = "masterprod";
|
||||
String mainType = "mainprod";
|
||||
String childType = "variantsku";
|
||||
assertAcked(
|
||||
prepareCreate(indexName).setSettings(
|
||||
|
@ -235,7 +235,7 @@ public class ChildrenIT extends AbstractParentChildTestCase {
|
|||
)
|
||||
.setMapping(
|
||||
addFieldMappings(
|
||||
buildParentJoinFieldMappingFromSimplifiedDef("join_field", true, masterType, childType),
|
||||
buildParentJoinFieldMappingFromSimplifiedDef("join_field", true, mainType, childType),
|
||||
"brand",
|
||||
"text",
|
||||
"name",
|
||||
|
@ -251,7 +251,7 @@ public class ChildrenIT extends AbstractParentChildTestCase {
|
|||
);
|
||||
|
||||
List<IndexRequestBuilder> requests = new ArrayList<>();
|
||||
requests.add(createIndexRequest(indexName, masterType, "1", null, "brand", "Levis", "name", "Style 501", "material", "Denim"));
|
||||
requests.add(createIndexRequest(indexName, mainType, "1", null, "brand", "Levis", "name", "Style 501", "material", "Denim"));
|
||||
requests.add(createIndexRequest(indexName, childType, "3", "1", "color", "blue", "size", "32"));
|
||||
requests.add(createIndexRequest(indexName, childType, "4", "1", "color", "blue", "size", "34"));
|
||||
requests.add(createIndexRequest(indexName, childType, "5", "1", "color", "blue", "size", "36"));
|
||||
|
@ -259,9 +259,7 @@ public class ChildrenIT extends AbstractParentChildTestCase {
|
|||
requests.add(createIndexRequest(indexName, childType, "7", "1", "color", "black", "size", "40"));
|
||||
requests.add(createIndexRequest(indexName, childType, "8", "1", "color", "gray", "size", "36"));
|
||||
|
||||
requests.add(
|
||||
createIndexRequest(indexName, masterType, "2", null, "brand", "Wrangler", "name", "Regular Cut", "material", "Leather")
|
||||
);
|
||||
requests.add(createIndexRequest(indexName, mainType, "2", null, "brand", "Wrangler", "name", "Regular Cut", "material", "Leather"));
|
||||
requests.add(createIndexRequest(indexName, childType, "9", "2", "color", "blue", "size", "32"));
|
||||
requests.add(createIndexRequest(indexName, childType, "10", "2", "color", "blue", "size", "34"));
|
||||
requests.add(createIndexRequest(indexName, childType, "12", "2", "color", "black", "size", "36"));
|
||||
|
|
|
@ -118,18 +118,18 @@ public class RetryTests extends OpenSearchIntegTestCase {
|
|||
public void testReindexFromRemote() throws Exception {
|
||||
Function<Client, AbstractBulkByScrollRequestBuilder<?, ?>> function = client -> {
|
||||
/*
|
||||
* Use the master node for the reindex from remote because that node
|
||||
* Use the cluster-manager node for the reindex from remote because that node
|
||||
* doesn't have a copy of the data on it.
|
||||
*/
|
||||
NodeInfo masterNode = null;
|
||||
NodeInfo clusterManagerNode = null;
|
||||
for (NodeInfo candidate : client.admin().cluster().prepareNodesInfo().get().getNodes()) {
|
||||
if (candidate.getNode().isMasterNode()) {
|
||||
masterNode = candidate;
|
||||
clusterManagerNode = candidate;
|
||||
}
|
||||
}
|
||||
assertNotNull(masterNode);
|
||||
assertNotNull(clusterManagerNode);
|
||||
|
||||
TransportAddress address = masterNode.getInfo(HttpInfo.class).getAddress().publishAddress();
|
||||
TransportAddress address = clusterManagerNode.getInfo(HttpInfo.class).getAddress().publishAddress();
|
||||
RemoteInfo remote = new RemoteInfo(
|
||||
"http",
|
||||
address.getAddress(),
|
||||
|
@ -262,8 +262,8 @@ public class RetryTests extends OpenSearchIntegTestCase {
|
|||
*/
|
||||
private BulkByScrollTask.Status taskStatus(String action) {
|
||||
/*
|
||||
* We always use the master client because we always start the test requests on the
|
||||
* master. We do this simply to make sure that the test request is not started on the
|
||||
* We always use the cluster-manager client because we always start the test requests on the
|
||||
* cluster-manager. We do this simply to make sure that the test request is not started on the
|
||||
* node who's queue we're manipulating.
|
||||
*/
|
||||
ListTasksResponse response = client().admin().cluster().prepareListTasks().setActions(action).setDetailed(true).get();
|
||||
|
|
|
@ -7,15 +7,15 @@
|
|||
body: { "text": "test" }
|
||||
refresh: true
|
||||
|
||||
# Fetch the http host. We use the host of the master because we know there will always be a master.
|
||||
# Fetch the http host. We use the host of the cluster-manager because we know there will always be a cluster-manager.
|
||||
- do:
|
||||
cluster.state: {}
|
||||
- set: { master_node: master }
|
||||
- set: { cluster_manager_node: cluster_manager }
|
||||
- do:
|
||||
nodes.info:
|
||||
metric: [ http ]
|
||||
- is_true: nodes.$master.http.publish_address
|
||||
- set: {nodes.$master.http.publish_address: host}
|
||||
- is_true: nodes.$cluster_manager.http.publish_address
|
||||
- set: {nodes.$cluster_manager.http.publish_address: host}
|
||||
- do:
|
||||
reindex:
|
||||
refresh: true
|
||||
|
@ -68,15 +68,15 @@
|
|||
- do:
|
||||
indices.refresh: {}
|
||||
|
||||
# Fetch the http host. We use the host of the master because we know there will always be a master.
|
||||
# Fetch the http host. We use the host of the cluster-manager because we know there will always be a cluster-manager.
|
||||
- do:
|
||||
cluster.state: {}
|
||||
- set: { master_node: master }
|
||||
- set: { cluster_manager_node: cluster_manager }
|
||||
- do:
|
||||
nodes.info:
|
||||
metric: [ http ]
|
||||
- is_true: nodes.$master.http.publish_address
|
||||
- set: {nodes.$master.http.publish_address: host}
|
||||
- is_true: nodes.$cluster_manager.http.publish_address
|
||||
- set: {nodes.$cluster_manager.http.publish_address: host}
|
||||
- do:
|
||||
reindex:
|
||||
refresh: true
|
||||
|
@ -118,15 +118,15 @@
|
|||
routing: foo
|
||||
refresh: true
|
||||
|
||||
# Fetch the http host. We use the host of the master because we know there will always be a master.
|
||||
# Fetch the http host. We use the host of the cluster-manager because we know there will always be a cluster-manager.
|
||||
- do:
|
||||
cluster.state: {}
|
||||
- set: { master_node: master }
|
||||
- set: { cluster_manager_node: cluster_manager }
|
||||
- do:
|
||||
nodes.info:
|
||||
metric: [ http ]
|
||||
- is_true: nodes.$master.http.publish_address
|
||||
- set: {nodes.$master.http.publish_address: host}
|
||||
- is_true: nodes.$cluster_manager.http.publish_address
|
||||
- set: {nodes.$cluster_manager.http.publish_address: host}
|
||||
- do:
|
||||
reindex:
|
||||
refresh: true
|
||||
|
@ -169,15 +169,15 @@
|
|||
body: { "text": "test" }
|
||||
refresh: true
|
||||
|
||||
# Fetch the http host. We use the host of the master because we know there will always be a master.
|
||||
# Fetch the http host. We use the host of the cluster-manager because we know there will always be a cluster-manager.
|
||||
- do:
|
||||
cluster.state: {}
|
||||
- set: { master_node: master }
|
||||
- set: { cluster_manager_node: cluster_manager }
|
||||
- do:
|
||||
nodes.info:
|
||||
metric: [ http ]
|
||||
- is_true: nodes.$master.http.publish_address
|
||||
- set: {nodes.$master.http.publish_address: host}
|
||||
- is_true: nodes.$cluster_manager.http.publish_address
|
||||
- set: {nodes.$cluster_manager.http.publish_address: host}
|
||||
- do:
|
||||
reindex:
|
||||
refresh: true
|
||||
|
@ -236,15 +236,15 @@
|
|||
body: { "text": "test" }
|
||||
refresh: true
|
||||
|
||||
# Fetch the http host. We use the host of the master because we know there will always be a master.
|
||||
# Fetch the http host. We use the host of the cluster-manager because we know there will always be a cluster-manager.
|
||||
- do:
|
||||
cluster.state: {}
|
||||
- set: { master_node: master }
|
||||
- set: { cluster_manager_node: cluster_manager }
|
||||
- do:
|
||||
nodes.info:
|
||||
metric: [ http ]
|
||||
- is_true: nodes.$master.http.publish_address
|
||||
- set: {nodes.$master.http.publish_address: host}
|
||||
- is_true: nodes.$cluster_manager.http.publish_address
|
||||
- set: {nodes.$cluster_manager.http.publish_address: host}
|
||||
- do:
|
||||
reindex:
|
||||
refresh: true
|
||||
|
@ -302,15 +302,15 @@
|
|||
body: { "text": "test" }
|
||||
refresh: true
|
||||
|
||||
# Fetch the http host. We use the host of the master because we know there will always be a master.
|
||||
# Fetch the http host. We use the host of the cluster-manager because we know there will always be a cluster-manager.
|
||||
- do:
|
||||
cluster.state: {}
|
||||
- set: { master_node: master }
|
||||
- set: { cluster_manager_node: cluster_manager }
|
||||
- do:
|
||||
nodes.info:
|
||||
metric: [ http ]
|
||||
- is_true: nodes.$master.http.publish_address
|
||||
- set: {nodes.$master.http.publish_address: host}
|
||||
- is_true: nodes.$cluster_manager.http.publish_address
|
||||
- set: {nodes.$cluster_manager.http.publish_address: host}
|
||||
- do:
|
||||
reindex:
|
||||
refresh: true
|
||||
|
@ -358,15 +358,15 @@
|
|||
body: { "text": "test" }
|
||||
refresh: true
|
||||
|
||||
# Fetch the http host. We use the host of the master because we know there will always be a master.
|
||||
# Fetch the http host. We use the host of the cluster-manager because we know there will always be a cluster-manager.
|
||||
- do:
|
||||
cluster.state: {}
|
||||
- set: { master_node: master }
|
||||
- set: { cluster_manager_node: cluster_manager }
|
||||
- do:
|
||||
nodes.info:
|
||||
metric: [ http ]
|
||||
- is_true: nodes.$master.http.publish_address
|
||||
- set: {nodes.$master.http.publish_address: host}
|
||||
- is_true: nodes.$cluster_manager.http.publish_address
|
||||
- set: {nodes.$cluster_manager.http.publish_address: host}
|
||||
- do:
|
||||
catch: /query malformed, no start_object after query name/
|
||||
reindex:
|
||||
|
@ -411,15 +411,15 @@
|
|||
body: { "text": "test", "filtered": "removed" }
|
||||
refresh: true
|
||||
|
||||
# Fetch the http host. We use the host of the master because we know there will always be a master.
|
||||
# Fetch the http host. We use the host of the cluster-manager because we know there will always be a cluster-manager.
|
||||
- do:
|
||||
cluster.state: {}
|
||||
- set: { master_node: master }
|
||||
- set: { cluster_manager_node: cluster_manager }
|
||||
- do:
|
||||
nodes.info:
|
||||
metric: [ http ]
|
||||
- is_true: nodes.$master.http.publish_address
|
||||
- set: {nodes.$master.http.publish_address: host}
|
||||
- is_true: nodes.$cluster_manager.http.publish_address
|
||||
- set: {nodes.$cluster_manager.http.publish_address: host}
|
||||
- do:
|
||||
reindex:
|
||||
refresh: true
|
||||
|
@ -480,15 +480,15 @@
|
|||
indices.refresh: {}
|
||||
|
||||
|
||||
# Fetch the http host. We use the host of the master because we know there will always be a master.
|
||||
# Fetch the http host. We use the host of the cluster-manager because we know there will always be a cluster-manager.
|
||||
- do:
|
||||
cluster.state: {}
|
||||
- set: { master_node: master }
|
||||
- set: { cluster_manager_node: cluster_manager }
|
||||
- do:
|
||||
nodes.info:
|
||||
metric: [ http ]
|
||||
- is_true: nodes.$master.http.publish_address
|
||||
- set: {nodes.$master.http.publish_address: host}
|
||||
- is_true: nodes.$cluster_manager.http.publish_address
|
||||
- set: {nodes.$cluster_manager.http.publish_address: host}
|
||||
- do:
|
||||
reindex:
|
||||
requests_per_second: .00000001 # About 9.5 years to complete the request
|
||||
|
|
|
@ -88,15 +88,15 @@ setup:
|
|||
|
||||
---
|
||||
"Reindex from remote with parent join field":
|
||||
# Fetch the http host. We use the host of the master because we know there will always be a master.
|
||||
# Fetch the http host. We use the host of the cluster-manager because we know there will always be a cluster-manager.
|
||||
- do:
|
||||
cluster.state: {}
|
||||
- set: { master_node: master }
|
||||
- set: { cluster_manager_node: cluster_manager }
|
||||
- do:
|
||||
nodes.info:
|
||||
metric: [ http ]
|
||||
- is_true: nodes.$master.http.publish_address
|
||||
- set: {nodes.$master.http.publish_address: host}
|
||||
- is_true: nodes.$cluster_manager.http.publish_address
|
||||
- set: {nodes.$cluster_manager.http.publish_address: host}
|
||||
- do:
|
||||
reindex:
|
||||
refresh: true
|
||||
|
|
|
@ -102,13 +102,13 @@ teardown:
|
|||
- do:
|
||||
cluster.state: {}
|
||||
|
||||
# Get master node id
|
||||
- set: { master_node: master }
|
||||
# Get cluster-manager node id
|
||||
- set: { cluster_manager_node: cluster_manager }
|
||||
|
||||
- do:
|
||||
nodes.info: {}
|
||||
|
||||
- contains: { nodes.$master.modules: { name: repository-url } }
|
||||
- contains: { nodes.$cluster_manager.modules: { name: repository-url } }
|
||||
|
||||
---
|
||||
"Restore with repository-url using http://":
|
||||
|
|
|
@ -7,13 +7,13 @@
|
|||
- do:
|
||||
cluster.state: {}
|
||||
|
||||
# Get master node id
|
||||
- set: { master_node: master }
|
||||
# Get cluster-manager node id
|
||||
- set: { cluster_manager_node: cluster_manager }
|
||||
|
||||
- do:
|
||||
nodes.info: {}
|
||||
|
||||
- contains: { nodes.$master.modules: { name: transport-netty4 } }
|
||||
- contains: { nodes.$cluster_manager.modules: { name: transport-netty4 } }
|
||||
|
||||
- do:
|
||||
cluster.stats: {}
|
||||
|
|
Loading…
Reference in New Issue