diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/AnalysisConfig.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/AnalysisConfig.java index 135ad755359..9068ffda4de 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/AnalysisConfig.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/AnalysisConfig.java @@ -162,9 +162,8 @@ public class AnalysisConfig implements ToXContentObject, Writeable { } // BWC for removed per-partition normalization - // Version check is temporarily against the latest to satisfy CI tests - // TODO change to V_6_5_0 after successful backport to 6.x - if (in.getVersion().before(Version.V_7_0_0_alpha1)) { + // TODO Remove in 7.0.0 + if (in.getVersion().before(Version.V_6_5_0)) { in.readBoolean(); } } @@ -197,9 +196,8 @@ public class AnalysisConfig implements ToXContentObject, Writeable { } // BWC for removed per-partition normalization - // Version check is temporarily against the latest to satisfy CI tests - // TODO change to V_6_5_0 after successful backport to 6.x - if (out.getVersion().before(Version.V_7_0_0_alpha1)) { + // TODO Remove in 7.0.0 + if (out.getVersion().before(Version.V_6_5_0)) { out.writeBoolean(false); } } diff --git a/x-pack/plugin/monitoring/src/main/resources/monitoring/watches/elasticsearch_cluster_status.json b/x-pack/plugin/monitoring/src/main/resources/monitoring/watches/elasticsearch_cluster_status.json index c0a13ea63a6..e1f418d5a8d 100644 --- a/x-pack/plugin/monitoring/src/main/resources/monitoring/watches/elasticsearch_cluster_status.json +++ b/x-pack/plugin/monitoring/src/main/resources/monitoring/watches/elasticsearch_cluster_status.json @@ -145,7 +145,7 @@ }, "transform": { "script": { - "source": "ctx.vars.email_recipient = (ctx.payload.kibana_settings.hits.total > 0) ? ctx.payload.kibana_settings.hits.hits[0]._source.kibana_settings.xpack.default_admin_email : null;ctx.vars.is_new = ctx.vars.fails_check && !ctx.vars.not_resolved;ctx.vars.is_resolved = !ctx.vars.fails_check && ctx.vars.not_resolved;def state = ctx.payload.check.hits.hits[0]._source.cluster_state.status;if (ctx.vars.not_resolved){ctx.payload = ctx.payload.alert.hits.hits[0]._source;if (ctx.vars.fails_check == false) {ctx.payload.resolved_timestamp = ctx.execution_time;}} else {ctx.payload = ['timestamp': ctx.execution_time, 'metadata': ctx.metadata.xpack];}if (ctx.vars.fails_check) {ctx.payload.prefix = 'Elasticsearch cluster status is ' + state + '.';if (state == 'red') {ctx.payload.message = 'Allocate missing primary shards and replica shards.';ctx.payload.metadata.severity = 2100;} else {ctx.payload.message = 'Allocate missing replica shards.';ctx.payload.metadata.severity = 1100;}}ctx.vars.state = state.toUpperCase();ctx.payload.update_timestamp = ctx.execution_time;return ctx.payload;" + "source": "ctx.vars.email_recipient = (ctx.payload.kibana_settings.hits.total > 0 && ctx.payload.kibana_settings.hits.hits[0]._source.kibana_settings.xpack != null) ? ctx.payload.kibana_settings.hits.hits[0]._source.kibana_settings.xpack.default_admin_email : null;ctx.vars.is_new = ctx.vars.fails_check && !ctx.vars.not_resolved;ctx.vars.is_resolved = !ctx.vars.fails_check && ctx.vars.not_resolved;def state = ctx.payload.check.hits.hits[0]._source.cluster_state.status;if (ctx.vars.not_resolved){ctx.payload = ctx.payload.alert.hits.hits[0]._source;if (ctx.vars.fails_check == false) {ctx.payload.resolved_timestamp = ctx.execution_time;}} else {ctx.payload = ['timestamp': ctx.execution_time, 'metadata': ctx.metadata.xpack];}if (ctx.vars.fails_check) {ctx.payload.prefix = 'Elasticsearch cluster status is ' + state + '.';if (state == 'red') {ctx.payload.message = 'Allocate missing primary shards and replica shards.';ctx.payload.metadata.severity = 2100;} else {ctx.payload.message = 'Allocate missing replica shards.';ctx.payload.metadata.severity = 1100;}}ctx.vars.state = state.toUpperCase();ctx.payload.update_timestamp = ctx.execution_time;return ctx.payload;" } }, "actions": { diff --git a/x-pack/plugin/monitoring/src/main/resources/monitoring/watches/elasticsearch_nodes.json b/x-pack/plugin/monitoring/src/main/resources/monitoring/watches/elasticsearch_nodes.json index a6bf7b6145c..5c0cb7f55b4 100644 --- a/x-pack/plugin/monitoring/src/main/resources/monitoring/watches/elasticsearch_nodes.json +++ b/x-pack/plugin/monitoring/src/main/resources/monitoring/watches/elasticsearch_nodes.json @@ -151,7 +151,7 @@ }, "transform": { "script": { - "source": "void formatResults(StringBuilder message, String type, Map typeMap) {if (typeMap.empty == false) {message.append(' Node');if (typeMap.size() != 1) {message.append('s were');} else {message.append(' was');}message.append(' ').append(type).append(' [').append(typeMap.size()).append(']: ').append(typeMap.values().stream().collect(Collectors.joining(', ', '[', ']'))).append('.');}}ctx.vars.email_recipient = (ctx.payload.kibana_settings.hits.total > 0) ? ctx.payload.kibana_settings.hits.hits[0]._source.kibana_settings.xpack.default_admin_email : null;def clusterState = ctx.payload.check.hits.hits[0]._source.cluster_state;def persistentUuidToName = [:];def latestNodes = clusterState.nodes;def ephemeralUuidToPersistentUuid = [:];def payload = ['timestamp': ctx.execution_time,'updated_timestamp': ctx.execution_time,'resolved_timestamp': ctx.execution_time,'metadata': ctx.metadata.xpack,'prefix': 'Elasticsearch cluster nodes have changed!','nodes': ['hash': clusterState.nodes_hash,'added': persistentUuidToName,'removed': [:],'restarted': [:]]];for (def latestNode : latestNodes.entrySet()) {persistentUuidToName[latestNode.key] = latestNode.value.name;ephemeralUuidToPersistentUuid[latestNode.value.ephemeral_id] = latestNode.key;}def previousNodes = ctx.payload.check.hits.hits[1]._source.cluster_state.nodes;def previousPersistentUuidToName = [:];for (def previousNode : previousNodes.entrySet()){if (persistentUuidToName.containsKey(previousNode.key) == false){payload.nodes.removed[previousNode.key] = previousNode.value.name;}else{if (ephemeralUuidToPersistentUuid.containsKey(previousNode.value.ephemeral_id) == false) {payload.nodes.restarted[previousNode.key] = persistentUuidToName[previousNode.key];}persistentUuidToName.remove(previousNode.key);}}StringBuilder message = new StringBuilder();formatResults(message, 'removed', payload.nodes.removed);formatResults(message, 'added', payload.nodes.added);formatResults(message, 'restarted', payload.nodes.restarted);payload.message = message.toString().trim();return payload;" + "source": "void formatResults(StringBuilder message, String type, Map typeMap) {if (typeMap.empty == false) {message.append(' Node');if (typeMap.size() != 1) {message.append('s were');} else {message.append(' was');}message.append(' ').append(type).append(' [').append(typeMap.size()).append(']: ').append(typeMap.values().stream().collect(Collectors.joining(', ', '[', ']'))).append('.');}}ctx.vars.email_recipient = (ctx.payload.kibana_settings.hits.total > 0 && ctx.payload.kibana_settings.hits.hits[0]._source.kibana_settings.xpack != null) ? ctx.payload.kibana_settings.hits.hits[0]._source.kibana_settings.xpack.default_admin_email : null;def clusterState = ctx.payload.check.hits.hits[0]._source.cluster_state;def persistentUuidToName = [:];def latestNodes = clusterState.nodes;def ephemeralUuidToPersistentUuid = [:];def payload = ['timestamp': ctx.execution_time,'updated_timestamp': ctx.execution_time,'resolved_timestamp': ctx.execution_time,'metadata': ctx.metadata.xpack,'prefix': 'Elasticsearch cluster nodes have changed!','nodes': ['hash': clusterState.nodes_hash,'added': persistentUuidToName,'removed': [:],'restarted': [:]]];for (def latestNode : latestNodes.entrySet()) {persistentUuidToName[latestNode.key] = latestNode.value.name;ephemeralUuidToPersistentUuid[latestNode.value.ephemeral_id] = latestNode.key;}def previousNodes = ctx.payload.check.hits.hits[1]._source.cluster_state.nodes;def previousPersistentUuidToName = [:];for (def previousNode : previousNodes.entrySet()){if (persistentUuidToName.containsKey(previousNode.key) == false){payload.nodes.removed[previousNode.key] = previousNode.value.name;}else{if (ephemeralUuidToPersistentUuid.containsKey(previousNode.value.ephemeral_id) == false) {payload.nodes.restarted[previousNode.key] = persistentUuidToName[previousNode.key];}persistentUuidToName.remove(previousNode.key);}}StringBuilder message = new StringBuilder();formatResults(message, 'removed', payload.nodes.removed);formatResults(message, 'added', payload.nodes.added);formatResults(message, 'restarted', payload.nodes.restarted);payload.message = message.toString().trim();return payload;" } }, "actions": { diff --git a/x-pack/plugin/monitoring/src/main/resources/monitoring/watches/elasticsearch_version_mismatch.json b/x-pack/plugin/monitoring/src/main/resources/monitoring/watches/elasticsearch_version_mismatch.json index 7e18c981f0f..051a3a9d409 100644 --- a/x-pack/plugin/monitoring/src/main/resources/monitoring/watches/elasticsearch_version_mismatch.json +++ b/x-pack/plugin/monitoring/src/main/resources/monitoring/watches/elasticsearch_version_mismatch.json @@ -141,7 +141,7 @@ }, "transform": { "script": { - "source": "ctx.vars.email_recipient = (ctx.payload.kibana_settings.hits.total > 0) ? ctx.payload.kibana_settings.hits.hits[0]._source.kibana_settings.xpack.default_admin_email : null;ctx.vars.is_new = ctx.vars.fails_check && !ctx.vars.not_resolved;ctx.vars.is_resolved = !ctx.vars.fails_check && ctx.vars.not_resolved;def versionMessage = null;if (ctx.vars.fails_check) {def versions = new ArrayList(ctx.payload.check.hits.hits[0]._source.cluster_stats.nodes.versions);Collections.sort(versions);versionMessage = 'Versions: [' + String.join(', ', versions) + '].';}if (ctx.vars.not_resolved) {ctx.payload = ctx.payload.alert.hits.hits[0]._source;if (ctx.vars.fails_check) {ctx.payload.message = versionMessage;} else {ctx.payload.resolved_timestamp = ctx.execution_time;}} else {ctx.payload = [ 'timestamp': ctx.execution_time, 'prefix': 'This cluster is running with multiple versions of Elasticsearch.', 'message': versionMessage, 'metadata': ctx.metadata.xpack ];}ctx.payload.update_timestamp = ctx.execution_time;return ctx.payload;" + "source": "ctx.vars.email_recipient = (ctx.payload.kibana_settings.hits.total > 0 && ctx.payload.kibana_settings.hits.hits[0]._source.kibana_settings.xpack != null) ? ctx.payload.kibana_settings.hits.hits[0]._source.kibana_settings.xpack.default_admin_email : null;ctx.vars.is_new = ctx.vars.fails_check && !ctx.vars.not_resolved;ctx.vars.is_resolved = !ctx.vars.fails_check && ctx.vars.not_resolved;def versionMessage = null;if (ctx.vars.fails_check) {def versions = new ArrayList(ctx.payload.check.hits.hits[0]._source.cluster_stats.nodes.versions);Collections.sort(versions);versionMessage = 'Versions: [' + String.join(', ', versions) + '].';}if (ctx.vars.not_resolved) {ctx.payload = ctx.payload.alert.hits.hits[0]._source;if (ctx.vars.fails_check) {ctx.payload.message = versionMessage;} else {ctx.payload.resolved_timestamp = ctx.execution_time;}} else {ctx.payload = [ 'timestamp': ctx.execution_time, 'prefix': 'This cluster is running with multiple versions of Elasticsearch.', 'message': versionMessage, 'metadata': ctx.metadata.xpack ];}ctx.payload.update_timestamp = ctx.execution_time;return ctx.payload;" } }, "actions": { diff --git a/x-pack/plugin/monitoring/src/main/resources/monitoring/watches/kibana_version_mismatch.json b/x-pack/plugin/monitoring/src/main/resources/monitoring/watches/kibana_version_mismatch.json index bf2da3ffb1d..b2acba610e1 100644 --- a/x-pack/plugin/monitoring/src/main/resources/monitoring/watches/kibana_version_mismatch.json +++ b/x-pack/plugin/monitoring/src/main/resources/monitoring/watches/kibana_version_mismatch.json @@ -161,7 +161,7 @@ }, "transform": { "script": { - "source": "ctx.vars.email_recipient = (ctx.payload.kibana_settings.hits.total > 0) ? ctx.payload.kibana_settings.hits.hits[0]._source.kibana_settings.xpack.default_admin_email : null;ctx.vars.is_new = ctx.vars.fails_check && !ctx.vars.not_resolved;ctx.vars.is_resolved = !ctx.vars.fails_check && ctx.vars.not_resolved;def versionMessage = null;if (ctx.vars.fails_check) {versionMessage = 'Versions: [' + String.join(', ', ctx.vars.versions) + '].';}if (ctx.vars.not_resolved) {ctx.payload = ctx.payload.alert.hits.hits[0]._source;if (ctx.vars.fails_check) {ctx.payload.message = versionMessage;} else {ctx.payload.resolved_timestamp = ctx.execution_time;}} else {ctx.payload = [ 'timestamp': ctx.execution_time, 'prefix': 'This cluster is running with multiple versions of Kibana.', 'message': versionMessage, 'metadata': ctx.metadata.xpack ];}ctx.payload.update_timestamp = ctx.execution_time;return ctx.payload;" + "source": "ctx.vars.email_recipient = (ctx.payload.kibana_settings.hits.total > 0 && ctx.payload.kibana_settings.hits.hits[0]._source.kibana_settings.xpack != null) ? ctx.payload.kibana_settings.hits.hits[0]._source.kibana_settings.xpack.default_admin_email : null;ctx.vars.is_new = ctx.vars.fails_check && !ctx.vars.not_resolved;ctx.vars.is_resolved = !ctx.vars.fails_check && ctx.vars.not_resolved;def versionMessage = null;if (ctx.vars.fails_check) {versionMessage = 'Versions: [' + String.join(', ', ctx.vars.versions) + '].';}if (ctx.vars.not_resolved) {ctx.payload = ctx.payload.alert.hits.hits[0]._source;if (ctx.vars.fails_check) {ctx.payload.message = versionMessage;} else {ctx.payload.resolved_timestamp = ctx.execution_time;}} else {ctx.payload = [ 'timestamp': ctx.execution_time, 'prefix': 'This cluster is running with multiple versions of Kibana.', 'message': versionMessage, 'metadata': ctx.metadata.xpack ];}ctx.payload.update_timestamp = ctx.execution_time;return ctx.payload;" } }, "actions": { diff --git a/x-pack/plugin/monitoring/src/main/resources/monitoring/watches/logstash_version_mismatch.json b/x-pack/plugin/monitoring/src/main/resources/monitoring/watches/logstash_version_mismatch.json index 71a0cfd46bf..cf1fdde606c 100644 --- a/x-pack/plugin/monitoring/src/main/resources/monitoring/watches/logstash_version_mismatch.json +++ b/x-pack/plugin/monitoring/src/main/resources/monitoring/watches/logstash_version_mismatch.json @@ -161,7 +161,7 @@ }, "transform": { "script": { - "source": "ctx.vars.email_recipient = (ctx.payload.kibana_settings.hits.total > 0) ? ctx.payload.kibana_settings.hits.hits[0]._source.kibana_settings.xpack.default_admin_email : null;ctx.vars.is_new = ctx.vars.fails_check && !ctx.vars.not_resolved;ctx.vars.is_resolved = !ctx.vars.fails_check && ctx.vars.not_resolved;def versionMessage = null;if (ctx.vars.fails_check) {versionMessage = 'Versions: [' + String.join(', ', ctx.vars.versions) + '].';}if (ctx.vars.not_resolved) {ctx.payload = ctx.payload.alert.hits.hits[0]._source;if (ctx.vars.fails_check) {ctx.payload.message = versionMessage;} else {ctx.payload.resolved_timestamp = ctx.execution_time;}} else {ctx.payload = [ 'timestamp': ctx.execution_time, 'prefix': 'This cluster is running with multiple versions of Logstash.', 'message': versionMessage, 'metadata': ctx.metadata.xpack ];}ctx.payload.update_timestamp = ctx.execution_time;return ctx.payload;" + "source": "ctx.vars.email_recipient = (ctx.payload.kibana_settings.hits.total > 0 && ctx.payload.kibana_settings.hits.hits[0]._source.kibana_settings.xpack != null) ? ctx.payload.kibana_settings.hits.hits[0]._source.kibana_settings.xpack.default_admin_email : null;ctx.vars.is_new = ctx.vars.fails_check && !ctx.vars.not_resolved;ctx.vars.is_resolved = !ctx.vars.fails_check && ctx.vars.not_resolved;def versionMessage = null;if (ctx.vars.fails_check) {versionMessage = 'Versions: [' + String.join(', ', ctx.vars.versions) + '].';}if (ctx.vars.not_resolved) {ctx.payload = ctx.payload.alert.hits.hits[0]._source;if (ctx.vars.fails_check) {ctx.payload.message = versionMessage;} else {ctx.payload.resolved_timestamp = ctx.execution_time;}} else {ctx.payload = [ 'timestamp': ctx.execution_time, 'prefix': 'This cluster is running with multiple versions of Logstash.', 'message': versionMessage, 'metadata': ctx.metadata.xpack ];}ctx.payload.update_timestamp = ctx.execution_time;return ctx.payload;" } }, "actions": { diff --git a/x-pack/plugin/monitoring/src/main/resources/monitoring/watches/xpack_license_expiration.json b/x-pack/plugin/monitoring/src/main/resources/monitoring/watches/xpack_license_expiration.json index a05198a15eb..7eb0d59167d 100644 --- a/x-pack/plugin/monitoring/src/main/resources/monitoring/watches/xpack_license_expiration.json +++ b/x-pack/plugin/monitoring/src/main/resources/monitoring/watches/xpack_license_expiration.json @@ -134,7 +134,7 @@ }, "transform": { "script": { - "source": "ctx.vars.email_recipient = (ctx.payload.kibana_settings.hits.total > 0) ? ctx.payload.kibana_settings.hits.hits[0]._source.kibana_settings.xpack.default_admin_email : null;ctx.vars.is_new = ctx.vars.fails_check && !ctx.vars.not_resolved;ctx.vars.is_resolved = !ctx.vars.fails_check && ctx.vars.not_resolved;def alertMessage = null;if (ctx.vars.fails_check) { alertMessage = 'Update your license.';} if (ctx.vars.not_resolved) { ctx.payload = ctx.payload.alert.hits.hits[0]._source;ctx.payload.metadata = ctx.metadata.xpack;if (ctx.vars.fails_check == false) { ctx.payload.resolved_timestamp = ctx.execution_time;} } else { ctx.payload = [ 'timestamp': ctx.execution_time, 'prefix': 'This cluster\\'s license is going to expire in {{#relativeTime}}metadata.time{{/relativeTime}} at {{#absoluteTime}}metadata.time{{/absoluteTime}}.', 'message': alertMessage, 'metadata': ctx.metadata.xpack ];} if (ctx.vars.fails_check) { ctx.payload.metadata.time = ctx.vars.expiry.toString();} ctx.payload.update_timestamp = ctx.execution_time;return ctx.payload;" + "source": "ctx.vars.email_recipient = (ctx.payload.kibana_settings.hits.total > 0 && ctx.payload.kibana_settings.hits.hits[0]._source.kibana_settings.xpack != null) ? ctx.payload.kibana_settings.hits.hits[0]._source.kibana_settings.xpack.default_admin_email : null;ctx.vars.is_new = ctx.vars.fails_check && !ctx.vars.not_resolved;ctx.vars.is_resolved = !ctx.vars.fails_check && ctx.vars.not_resolved;def alertMessage = null;if (ctx.vars.fails_check) { alertMessage = 'Update your license.';} if (ctx.vars.not_resolved) { ctx.payload = ctx.payload.alert.hits.hits[0]._source;ctx.payload.metadata = ctx.metadata.xpack;if (ctx.vars.fails_check == false) { ctx.payload.resolved_timestamp = ctx.execution_time;} } else { ctx.payload = [ 'timestamp': ctx.execution_time, 'prefix': 'This cluster\\'s license is going to expire in {{#relativeTime}}metadata.time{{/relativeTime}} at {{#absoluteTime}}metadata.time{{/absoluteTime}}.', 'message': alertMessage, 'metadata': ctx.metadata.xpack ];} if (ctx.vars.fails_check) { ctx.payload.metadata.time = ctx.vars.expiry.toString();} ctx.payload.update_timestamp = ctx.execution_time;return ctx.payload;" } }, "actions": { diff --git a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/30_ml_jobs_crud.yml b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/30_ml_jobs_crud.yml index cb036b9d13a..ba0f4d5091e 100644 --- a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/30_ml_jobs_crud.yml +++ b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/30_ml_jobs_crud.yml @@ -1,9 +1,3 @@ ---- -setup: - - skip: - version: "all" - reason: "Temporarily disabled while backporting https://github.com/elastic/elasticsearch/pull/32816" - --- "Test get old cluster job": - do: diff --git a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/30_ml_jobs_crud.yml b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/30_ml_jobs_crud.yml index 061a242a78d..3a3334f6907 100644 --- a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/30_ml_jobs_crud.yml +++ b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/30_ml_jobs_crud.yml @@ -1,9 +1,3 @@ ---- -setup: - - skip: - version: "all" - reason: "Temporarily disabled while backporting https://github.com/elastic/elasticsearch/pull/32816" - --- "Put job on the old cluster and post some data": diff --git a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/30_ml_jobs_crud.yml b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/30_ml_jobs_crud.yml index 1da16e79cbe..bb47524b41d 100644 --- a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/30_ml_jobs_crud.yml +++ b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/30_ml_jobs_crud.yml @@ -1,8 +1,4 @@ setup: - - skip: - version: "all" - reason: "Temporarily disabled while backporting https://github.com/elastic/elasticsearch/pull/32816" - - do: cluster.health: wait_for_status: green