Use special XContent registry for node tool (#54050)

Fixes an issue where the elasticsearch-node command-line tools would not work correctly
because PersistentTasksCustomMetaData contains named XContent from plugins. This PR
makes it so that the parsing for all custom metadata is skipped, even if the core system would
know how to handle it.

Closes #53549
This commit is contained in:
Yannick Welsch 2020-03-24 17:21:39 +01:00
parent 88b1b2f36f
commit e006d1f6cf
15 changed files with 327 additions and 251 deletions

View File

@ -25,31 +25,36 @@ import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.Logger;
import org.apache.lucene.store.LockObtainFailedException; import org.apache.lucene.store.LockObtainFailedException;
import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.Version;
import org.elasticsearch.action.admin.indices.rollover.Condition;
import org.elasticsearch.cli.EnvironmentAwareCommand; import org.elasticsearch.cli.EnvironmentAwareCommand;
import org.elasticsearch.cli.Terminal; import org.elasticsearch.cli.Terminal;
import org.elasticsearch.cli.UserException; import org.elasticsearch.cli.UserException;
import org.elasticsearch.cluster.ClusterModule;
import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterName;
import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.Diff;
import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.collect.Tuple;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.ClusterSettings;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.BigArrays;
import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.NamedXContentRegistry;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.env.Environment; import org.elasticsearch.env.Environment;
import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.env.NodeEnvironment;
import org.elasticsearch.env.NodeMetaData; import org.elasticsearch.env.NodeMetaData;
import org.elasticsearch.gateway.PersistedClusterStateService; import org.elasticsearch.gateway.PersistedClusterStateService;
import org.elasticsearch.indices.IndicesModule;
import java.io.IOException; import java.io.IOException;
import java.nio.file.Files; import java.nio.file.Files;
import java.nio.file.Path; import java.nio.file.Path;
import java.util.Arrays; import java.util.Arrays;
import java.util.Collections;
import java.util.EnumSet;
import java.util.Map;
import java.util.Objects; import java.util.Objects;
import java.util.function.Function;
import java.util.stream.Collectors;
import java.util.stream.Stream;
public abstract class ElasticsearchNodeCommand extends EnvironmentAwareCommand { public abstract class ElasticsearchNodeCommand extends EnvironmentAwareCommand {
private static final Logger logger = LogManager.getLogger(ElasticsearchNodeCommand.class); private static final Logger logger = LogManager.getLogger(ElasticsearchNodeCommand.class);
@ -67,10 +72,34 @@ public abstract class ElasticsearchNodeCommand extends EnvironmentAwareCommand {
protected static final String CS_MISSING_MSG = protected static final String CS_MISSING_MSG =
"cluster state is empty, cluster has never been bootstrapped?"; "cluster state is empty, cluster has never been bootstrapped?";
protected static final NamedXContentRegistry namedXContentRegistry = new NamedXContentRegistry( // fake the registry here, as command-line tools are not loading plugins, and ensure that it preserves the parsed XContent
Stream.of(ClusterModule.getNamedXWriteables().stream(), IndicesModule.getNamedXContents().stream()) public static final NamedXContentRegistry namedXContentRegistry = new NamedXContentRegistry(Collections.emptyList()) {
.flatMap(Function.identity())
.collect(Collectors.toList())); @SuppressWarnings("unchecked")
@Override
public <T, C> T parseNamedObject(Class<T> categoryClass, String name, XContentParser parser, C context) throws IOException {
// Currently, two unknown top-level objects are present
if (MetaData.Custom.class.isAssignableFrom(categoryClass)) {
return (T) new UnknownMetaDataCustom(name, parser.mapOrdered());
}
if (Condition.class.isAssignableFrom(categoryClass)) {
// The parsing for conditions is a bit weird as these represent JSON primitives (strings or numbers)
// TODO: Make Condition non-pluggable
assert parser.currentToken() == XContentParser.Token.FIELD_NAME : parser.currentToken();
if (parser.currentToken() != XContentParser.Token.FIELD_NAME) {
throw new UnsupportedOperationException("Unexpected token for Condition: " + parser.currentToken());
}
parser.nextToken();
assert parser.currentToken().isValue() : parser.currentToken();
if (parser.currentToken().isValue() == false) {
throw new UnsupportedOperationException("Unexpected token for Condition: " + parser.currentToken());
}
return (T) new UnknownCondition(name, parser.objectText());
}
assert false : "Unexpected category class " + categoryClass + " for name " + name;
throw new UnsupportedOperationException("Unexpected category class " + categoryClass + " for name " + name);
}
};
public ElasticsearchNodeCommand(String description) { public ElasticsearchNodeCommand(String description) {
super(description); super(description);
@ -86,7 +115,7 @@ public abstract class ElasticsearchNodeCommand extends EnvironmentAwareCommand {
String nodeId = nodeMetaData.nodeId(); String nodeId = nodeMetaData.nodeId();
return new PersistedClusterStateService(dataPaths, nodeId, namedXContentRegistry, BigArrays.NON_RECYCLING_INSTANCE, return new PersistedClusterStateService(dataPaths, nodeId, namedXContentRegistry, BigArrays.NON_RECYCLING_INSTANCE,
new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), () -> 0L, true); new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), () -> 0L);
} }
public static ClusterState clusterState(Environment environment, PersistedClusterStateService.OnDiskState onDiskState) { public static ClusterState clusterState(Environment environment, PersistedClusterStateService.OnDiskState onDiskState) {
@ -176,4 +205,78 @@ public abstract class ElasticsearchNodeCommand extends EnvironmentAwareCommand {
OptionParser getParser() { OptionParser getParser() {
return parser; return parser;
} }
public static class UnknownMetaDataCustom implements MetaData.Custom {
private final String name;
private final Map<String, Object> contents;
public UnknownMetaDataCustom(String name, Map<String, Object> contents) {
this.name = name;
this.contents = contents;
}
@Override
public EnumSet<MetaData.XContentContext> context() {
return EnumSet.of(MetaData.XContentContext.API, MetaData.XContentContext.GATEWAY);
}
@Override
public Diff<MetaData.Custom> diff(MetaData.Custom previousState) {
assert false;
throw new UnsupportedOperationException();
}
@Override
public String getWriteableName() {
return name;
}
@Override
public Version getMinimalSupportedVersion() {
assert false;
throw new UnsupportedOperationException();
}
@Override
public void writeTo(StreamOutput out) throws IOException {
assert false;
throw new UnsupportedOperationException();
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
return builder.mapContents(contents);
}
}
public static class UnknownCondition extends Condition<Object> {
public UnknownCondition(String name, Object value) {
super(name);
this.value = value;
}
@Override
public String getWriteableName() {
return name;
}
@Override
public void writeTo(StreamOutput out) throws IOException {
assert false;
throw new UnsupportedOperationException();
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
return builder.field(name, value);
}
@Override
public Result evaluate(Stats stats) {
assert false;
throw new UnsupportedOperationException();
}
}
} }

View File

@ -866,7 +866,7 @@ public class MetaData implements Iterable<IndexMetaData>, Diffable<MetaData>, To
} }
public static MetaData fromXContent(XContentParser parser) throws IOException { public static MetaData fromXContent(XContentParser parser) throws IOException {
return Builder.fromXContent(parser, false); return Builder.fromXContent(parser);
} }
@Override @Override
@ -1504,7 +1504,7 @@ public class MetaData implements Iterable<IndexMetaData>, Diffable<MetaData>, To
builder.endObject(); builder.endObject();
} }
public static MetaData fromXContent(XContentParser parser, boolean preserveUnknownCustoms) throws IOException { public static MetaData fromXContent(XContentParser parser) throws IOException {
Builder builder = new Builder(); Builder builder = new Builder();
// we might get here after the meta-data element, or on a fresh parser // we might get here after the meta-data element, or on a fresh parser
@ -1554,15 +1554,10 @@ public class MetaData implements Iterable<IndexMetaData>, Diffable<MetaData>, To
Custom custom = parser.namedObject(Custom.class, currentFieldName, null); Custom custom = parser.namedObject(Custom.class, currentFieldName, null);
builder.putCustom(custom.getWriteableName(), custom); builder.putCustom(custom.getWriteableName(), custom);
} catch (NamedObjectNotFoundException ex) { } catch (NamedObjectNotFoundException ex) {
if (preserveUnknownCustoms) {
logger.warn("Adding unknown custom object with type {}", currentFieldName);
builder.putCustom(currentFieldName, new UnknownGatewayOnlyCustom(parser.mapOrdered()));
} else {
logger.warn("Skipping unknown custom object with type {}", currentFieldName); logger.warn("Skipping unknown custom object with type {}", currentFieldName);
parser.skipChildren(); parser.skipChildren();
} }
} }
}
} else if (token.isValue()) { } else if (token.isValue()) {
if ("version".equals(currentFieldName)) { if ("version".equals(currentFieldName)) {
builder.version = parser.longValue(); builder.version = parser.longValue();
@ -1581,45 +1576,6 @@ public class MetaData implements Iterable<IndexMetaData>, Diffable<MetaData>, To
} }
} }
public static class UnknownGatewayOnlyCustom implements Custom {
private final Map<String, Object> contents;
UnknownGatewayOnlyCustom(Map<String, Object> contents) {
this.contents = contents;
}
@Override
public EnumSet<XContentContext> context() {
return EnumSet.of(MetaData.XContentContext.API, MetaData.XContentContext.GATEWAY);
}
@Override
public Diff<Custom> diff(Custom previousState) {
throw new UnsupportedOperationException();
}
@Override
public String getWriteableName() {
throw new UnsupportedOperationException();
}
@Override
public Version getMinimalSupportedVersion() {
throw new UnsupportedOperationException();
}
@Override
public void writeTo(StreamOutput out) throws IOException {
throw new UnsupportedOperationException();
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
return builder.mapContents(contents);
}
}
private static final ToXContent.Params FORMAT_PARAMS; private static final ToXContent.Params FORMAT_PARAMS;
static { static {
Map<String, String> params = new HashMap<>(2); Map<String, String> params = new HashMap<>(2);
@ -1631,15 +1587,7 @@ public class MetaData implements Iterable<IndexMetaData>, Diffable<MetaData>, To
/** /**
* State format for {@link MetaData} to write to and load from disk * State format for {@link MetaData} to write to and load from disk
*/ */
public static final MetaDataStateFormat<MetaData> FORMAT = createMetaDataStateFormat(false); public static final MetaDataStateFormat<MetaData> FORMAT = new MetaDataStateFormat<MetaData>(GLOBAL_STATE_FILE_PREFIX) {
/**
* Special state format for {@link MetaData} to write to and load from disk, preserving unknown customs
*/
public static final MetaDataStateFormat<MetaData> FORMAT_PRESERVE_CUSTOMS = createMetaDataStateFormat(true);
private static MetaDataStateFormat<MetaData> createMetaDataStateFormat(boolean preserveUnknownCustoms) {
return new MetaDataStateFormat<MetaData>(GLOBAL_STATE_FILE_PREFIX) {
@Override @Override
public void toXContent(XContentBuilder builder, MetaData state) throws IOException { public void toXContent(XContentBuilder builder, MetaData state) throws IOException {
@ -1648,8 +1596,8 @@ public class MetaData implements Iterable<IndexMetaData>, Diffable<MetaData>, To
@Override @Override
public MetaData fromXContent(XContentParser parser) throws IOException { public MetaData fromXContent(XContentParser parser) throws IOException {
return Builder.fromXContent(parser, preserveUnknownCustoms); return Builder.fromXContent(parser);
} }
}; };
}
} }

View File

@ -136,7 +136,6 @@ public class PersistedClusterStateService {
private final String nodeId; private final String nodeId;
private final NamedXContentRegistry namedXContentRegistry; private final NamedXContentRegistry namedXContentRegistry;
private final BigArrays bigArrays; private final BigArrays bigArrays;
private final boolean preserveUnknownCustoms;
private final LongSupplier relativeTimeMillisSupplier; private final LongSupplier relativeTimeMillisSupplier;
private volatile TimeValue slowWriteLoggingThreshold; private volatile TimeValue slowWriteLoggingThreshold;
@ -144,18 +143,16 @@ public class PersistedClusterStateService {
public PersistedClusterStateService(NodeEnvironment nodeEnvironment, NamedXContentRegistry namedXContentRegistry, BigArrays bigArrays, public PersistedClusterStateService(NodeEnvironment nodeEnvironment, NamedXContentRegistry namedXContentRegistry, BigArrays bigArrays,
ClusterSettings clusterSettings, LongSupplier relativeTimeMillisSupplier) { ClusterSettings clusterSettings, LongSupplier relativeTimeMillisSupplier) {
this(nodeEnvironment.nodeDataPaths(), nodeEnvironment.nodeId(), namedXContentRegistry, bigArrays, clusterSettings, this(nodeEnvironment.nodeDataPaths(), nodeEnvironment.nodeId(), namedXContentRegistry, bigArrays, clusterSettings,
relativeTimeMillisSupplier, false); relativeTimeMillisSupplier);
} }
public PersistedClusterStateService(Path[] dataPaths, String nodeId, NamedXContentRegistry namedXContentRegistry, BigArrays bigArrays, public PersistedClusterStateService(Path[] dataPaths, String nodeId, NamedXContentRegistry namedXContentRegistry, BigArrays bigArrays,
ClusterSettings clusterSettings, LongSupplier relativeTimeMillisSupplier, ClusterSettings clusterSettings, LongSupplier relativeTimeMillisSupplier) {
boolean preserveUnknownCustoms) {
this.dataPaths = dataPaths; this.dataPaths = dataPaths;
this.nodeId = nodeId; this.nodeId = nodeId;
this.namedXContentRegistry = namedXContentRegistry; this.namedXContentRegistry = namedXContentRegistry;
this.bigArrays = bigArrays; this.bigArrays = bigArrays;
this.relativeTimeMillisSupplier = relativeTimeMillisSupplier; this.relativeTimeMillisSupplier = relativeTimeMillisSupplier;
this.preserveUnknownCustoms = preserveUnknownCustoms;
this.slowWriteLoggingThreshold = clusterSettings.get(SLOW_WRITE_LOGGING_THRESHOLD); this.slowWriteLoggingThreshold = clusterSettings.get(SLOW_WRITE_LOGGING_THRESHOLD);
clusterSettings.addSettingsUpdateConsumer(SLOW_WRITE_LOGGING_THRESHOLD, this::setSlowWriteLoggingThreshold); clusterSettings.addSettingsUpdateConsumer(SLOW_WRITE_LOGGING_THRESHOLD, this::setSlowWriteLoggingThreshold);
} }
@ -383,8 +380,7 @@ public class PersistedClusterStateService {
consumeFromType(searcher, GLOBAL_TYPE_NAME, bytes -> consumeFromType(searcher, GLOBAL_TYPE_NAME, bytes ->
{ {
final MetaData metaData = MetaData.Builder.fromXContent(XContentFactory.xContent(XContentType.SMILE) final MetaData metaData = MetaData.Builder.fromXContent(XContentFactory.xContent(XContentType.SMILE)
.createParser(namedXContentRegistry, LoggingDeprecationHandler.INSTANCE, bytes.bytes, bytes.offset, bytes.length), .createParser(namedXContentRegistry, LoggingDeprecationHandler.INSTANCE, bytes.bytes, bytes.offset, bytes.length));
preserveUnknownCustoms);
logger.trace("found global metadata with last-accepted term [{}]", metaData.coordinationMetaData().term()); logger.trace("found global metadata with last-accepted term [{}]", metaData.coordinationMetaData().term());
if (builderReference.get() != null) { if (builderReference.get() != null) {
throw new IllegalStateException("duplicate global metadata found in [" + dataPath + "]"); throw new IllegalStateException("duplicate global metadata found in [" + dataPath + "]");

View File

@ -0,0 +1,151 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.cluster.coordination;
import org.elasticsearch.Version;
import org.elasticsearch.action.admin.indices.rollover.MaxAgeCondition;
import org.elasticsearch.action.admin.indices.rollover.MaxDocsCondition;
import org.elasticsearch.action.admin.indices.rollover.MaxSizeCondition;
import org.elasticsearch.action.admin.indices.rollover.RolloverInfo;
import org.elasticsearch.cluster.ClusterModule;
import org.elasticsearch.cluster.metadata.IndexGraveyard;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.common.UUIDs;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.collect.ImmutableOpenMap;
import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.xcontent.NamedXContentRegistry;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.common.xcontent.json.JsonXContent;
import org.elasticsearch.index.Index;
import org.elasticsearch.indices.IndicesModule;
import org.elasticsearch.test.ESTestCase;
import java.io.IOException;
import java.nio.file.Path;
import java.util.Arrays;
import java.util.function.Function;
import java.util.stream.Collectors;
import java.util.stream.Stream;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.instanceOf;
import static org.hamcrest.Matchers.not;
import static org.hamcrest.Matchers.notNullValue;
public class ElasticsearchNodeCommandTests extends ESTestCase {
public void testLoadStateWithoutMissingCustoms() throws IOException {
runLoadStateTest(false, false);
}
public void testLoadStateWithoutMissingCustomsButPreserved() throws IOException {
runLoadStateTest(false, true);
}
public void testLoadStateWithMissingCustomsButPreserved() throws IOException {
runLoadStateTest(true, true);
}
public void testLoadStateWithMissingCustomsAndNotPreserved() throws IOException {
runLoadStateTest(true, false);
}
private void runLoadStateTest(boolean hasMissingCustoms, boolean preserveUnknownCustoms) throws IOException {
final MetaData latestMetaData = randomMeta();
final XContentBuilder builder = JsonXContent.contentBuilder();
builder.startObject();
latestMetaData.toXContent(builder, ToXContent.EMPTY_PARAMS);
builder.endObject();
MetaData loadedMetaData;
try (XContentParser parser = createParser(hasMissingCustoms ? ElasticsearchNodeCommand.namedXContentRegistry : xContentRegistry(),
JsonXContent.jsonXContent, BytesReference.bytes(builder))) {
loadedMetaData = MetaData.fromXContent(parser);
}
assertThat(loadedMetaData.clusterUUID(), not(equalTo("_na_")));
assertThat(loadedMetaData.clusterUUID(), equalTo(latestMetaData.clusterUUID()));
ImmutableOpenMap<String, IndexMetaData> indices = loadedMetaData.indices();
assertThat(indices.size(), equalTo(latestMetaData.indices().size()));
for (IndexMetaData original : latestMetaData) {
IndexMetaData deserialized = indices.get(original.getIndex().getName());
assertThat(deserialized, notNullValue());
assertThat(deserialized.getVersion(), equalTo(original.getVersion()));
assertThat(deserialized.getMappingVersion(), equalTo(original.getMappingVersion()));
assertThat(deserialized.getSettingsVersion(), equalTo(original.getSettingsVersion()));
assertThat(deserialized.getNumberOfReplicas(), equalTo(original.getNumberOfReplicas()));
assertThat(deserialized.getNumberOfShards(), equalTo(original.getNumberOfShards()));
}
// make sure the index tombstones are the same too
if (hasMissingCustoms) {
assertNotNull(loadedMetaData.custom(IndexGraveyard.TYPE));
assertThat(loadedMetaData.custom(IndexGraveyard.TYPE), instanceOf(ElasticsearchNodeCommand.UnknownMetaDataCustom.class));
if (preserveUnknownCustoms) {
// check that we reserialize unknown metadata correctly again
final Path tempdir = createTempDir();
MetaData.FORMAT.write(loadedMetaData, tempdir);
final MetaData reloadedMetaData = MetaData.FORMAT.loadLatestState(logger, xContentRegistry(), tempdir);
assertThat(reloadedMetaData.indexGraveyard(), equalTo(latestMetaData.indexGraveyard()));
}
} else {
assertThat(loadedMetaData.indexGraveyard(), equalTo(latestMetaData.indexGraveyard()));
}
}
private MetaData randomMeta() {
int numIndices = randomIntBetween(1, 10);
MetaData.Builder mdBuilder = MetaData.builder();
mdBuilder.generateClusterUuidIfNeeded();
for (int i = 0; i < numIndices; i++) {
mdBuilder.put(indexBuilder(randomAlphaOfLength(10) + "idx-"+i));
}
int numDelIndices = randomIntBetween(0, 5);
final IndexGraveyard.Builder graveyard = IndexGraveyard.builder();
for (int i = 0; i < numDelIndices; i++) {
graveyard.addTombstone(new Index(randomAlphaOfLength(10) + "del-idx-" + i, UUIDs.randomBase64UUID()));
}
mdBuilder.indexGraveyard(graveyard.build());
return mdBuilder.build();
}
private IndexMetaData.Builder indexBuilder(String index) {
return IndexMetaData.builder(index)
.settings(settings(Version.CURRENT).put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, randomIntBetween(1, 10))
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, randomIntBetween(0, 5)))
.putRolloverInfo(new RolloverInfo("test", randomSubsetOf(Arrays.asList(
new MaxAgeCondition(TimeValue.timeValueSeconds(100)),
new MaxDocsCondition(100L),
new MaxSizeCondition(new ByteSizeValue(100))
)), 0));
}
@Override
protected NamedXContentRegistry xContentRegistry() {
return new NamedXContentRegistry(Stream.of(ClusterModule.getNamedXWriteables().stream(), IndicesModule.getNamedXContents().stream())
.flatMap(Function.identity())
.collect(Collectors.toList()));
}
}

View File

@ -399,7 +399,7 @@ public class MetaDataTests extends ESTestCase {
.endObject() .endObject()
.endObject()); .endObject());
try (XContentParser parser = createParser(JsonXContent.jsonXContent, metadata)) { try (XContentParser parser = createParser(JsonXContent.jsonXContent, metadata)) {
MetaData.Builder.fromXContent(parser, randomBoolean()); MetaData.Builder.fromXContent(parser);
fail(); fail();
} catch (IllegalArgumentException e) { } catch (IllegalArgumentException e) {
assertEquals("Unexpected field [random]", e.getMessage()); assertEquals("Unexpected field [random]", e.getMessage());

View File

@ -164,7 +164,7 @@ public class ToAndFromJsonMetaDataTests extends ESTestCase {
String metaDataSource = MetaData.Builder.toXContent(metaData); String metaDataSource = MetaData.Builder.toXContent(metaData);
MetaData parsedMetaData = MetaData.Builder.fromXContent(createParser(JsonXContent.jsonXContent, metaDataSource), false); MetaData parsedMetaData = MetaData.Builder.fromXContent(createParser(JsonXContent.jsonXContent, metaDataSource));
IndexMetaData indexMetaData = parsedMetaData.index("test1"); IndexMetaData indexMetaData = parsedMetaData.index("test1");
assertThat(indexMetaData.primaryTerm(0), equalTo(1L)); assertThat(indexMetaData.primaryTerm(0), equalTo(1L));

View File

@ -72,7 +72,7 @@ public class NodeRepurposeCommandTests extends ESTestCase {
final String nodeId = randomAlphaOfLength(10); final String nodeId = randomAlphaOfLength(10);
try (PersistedClusterStateService.Writer writer = new PersistedClusterStateService(nodePaths, nodeId, try (PersistedClusterStateService.Writer writer = new PersistedClusterStateService(nodePaths, nodeId,
xContentRegistry(), BigArrays.NON_RECYCLING_INSTANCE, xContentRegistry(), BigArrays.NON_RECYCLING_INSTANCE,
new ClusterSettings(dataMasterSettings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), () -> 0L, true).createWriter()) { new ClusterSettings(dataMasterSettings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), () -> 0L).createWriter()) {
writer.writeFullStateAndCommit(1L, ClusterState.EMPTY_STATE); writer.writeFullStateAndCommit(1L, ClusterState.EMPTY_STATE);
} }
} }

View File

@ -58,7 +58,7 @@ public class OverrideNodeVersionCommandTests extends ESTestCase {
try (PersistedClusterStateService.Writer writer = new PersistedClusterStateService(nodePaths, nodeId, try (PersistedClusterStateService.Writer writer = new PersistedClusterStateService(nodePaths, nodeId,
xContentRegistry(), BigArrays.NON_RECYCLING_INSTANCE, xContentRegistry(), BigArrays.NON_RECYCLING_INSTANCE,
new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), () -> 0L, true).createWriter()) { new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), () -> 0L).createWriter()) {
writer.writeFullStateAndCommit(1L, ClusterState.builder(ClusterName.DEFAULT).metaData(MetaData.builder() writer.writeFullStateAndCommit(1L, ClusterState.builder(ClusterName.DEFAULT).metaData(MetaData.builder()
.persistentSettings(Settings.builder().put(MetaData.SETTING_READ_ONLY_SETTING.getKey(), true).build()).build()) .persistentSettings(Settings.builder().put(MetaData.SETTING_READ_ONLY_SETTING.getKey(), true).build()).build())
.build()); .build());
@ -70,7 +70,7 @@ public class OverrideNodeVersionCommandTests extends ESTestCase {
public void checkClusterStateIntact() throws IOException { public void checkClusterStateIntact() throws IOException {
assertTrue(MetaData.SETTING_READ_ONLY_SETTING.get(new PersistedClusterStateService(nodePaths, nodeId, assertTrue(MetaData.SETTING_READ_ONLY_SETTING.get(new PersistedClusterStateService(nodePaths, nodeId,
xContentRegistry(), BigArrays.NON_RECYCLING_INSTANCE, xContentRegistry(), BigArrays.NON_RECYCLING_INSTANCE,
new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), () -> 0L, true) new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), () -> 0L)
.loadBestOnDiskState().metaData.persistentSettings())); .loadBestOnDiskState().metaData.persistentSettings()));
} }

View File

@ -27,23 +27,12 @@ import org.apache.lucene.store.IndexInput;
import org.apache.lucene.store.MockDirectoryWrapper; import org.apache.lucene.store.MockDirectoryWrapper;
import org.apache.lucene.store.SimpleFSDirectory; import org.apache.lucene.store.SimpleFSDirectory;
import org.apache.lucene.util.LuceneTestCase; import org.apache.lucene.util.LuceneTestCase;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.ExceptionsHelper;
import org.elasticsearch.Version;
import org.elasticsearch.cluster.ClusterModule; import org.elasticsearch.cluster.ClusterModule;
import org.elasticsearch.cluster.metadata.IndexGraveyard;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.cluster.metadata.RepositoriesMetaData;
import org.elasticsearch.common.ParseField;
import org.elasticsearch.common.UUIDs;
import org.elasticsearch.common.collect.ImmutableOpenMap;
import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.NamedXContentRegistry;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.ToXContentFragment; import org.elasticsearch.common.xcontent.ToXContentFragment;
import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.index.Index;
import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.ESTestCase;
import java.io.IOException; import java.io.IOException;
@ -55,19 +44,12 @@ import java.nio.file.DirectoryStream;
import java.nio.file.Files; import java.nio.file.Files;
import java.nio.file.Path; import java.nio.file.Path;
import java.nio.file.StandardOpenOption; import java.nio.file.StandardOpenOption;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashSet; import java.util.HashSet;
import java.util.List;
import java.util.Set; import java.util.Set;
import java.util.stream.StreamSupport; import java.util.stream.StreamSupport;
import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.hasSize;
import static org.hamcrest.Matchers.instanceOf;
import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.is;
import static org.hamcrest.Matchers.not;
import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.notNullValue;
@LuceneTestCase.SuppressFileSystems("ExtrasFS") // TODO: fix test to work with ExtrasFS @LuceneTestCase.SuppressFileSystems("ExtrasFS") // TODO: fix test to work with ExtrasFS
@ -85,7 +67,7 @@ public class MetaDataStateFormatTests extends ESTestCase {
@Override @Override
public MetaData fromXContent(XContentParser parser) throws IOException { public MetaData fromXContent(XContentParser parser) throws IOException {
return MetaData.Builder.fromXContent(parser, false); return MetaData.Builder.fromXContent(parser);
} }
}; };
Path tmp = createTempDir(); Path tmp = createTempDir();
@ -238,99 +220,6 @@ public class MetaDataStateFormatTests extends ESTestCase {
} }
} }
public void testLoadStateWithoutMissingCustoms() throws IOException {
runLoadStateTest(false, false);
}
public void testLoadStateWithoutMissingCustomsButPreserved() throws IOException {
runLoadStateTest(false, true);
}
public void testLoadStateWithMissingCustomsButPreserved() throws IOException {
runLoadStateTest(true, true);
}
public void testLoadStateWithMissingCustomsAndNotPreserved() throws IOException {
runLoadStateTest(true, false);
}
private void runLoadStateTest(boolean hasMissingCustoms, boolean preserveUnknownCustoms) throws IOException {
final Path[] dirs = new Path[randomIntBetween(1, 5)];
int numStates = randomIntBetween(1, 5);
List<MetaData> meta = new ArrayList<>();
for (int i = 0; i < numStates; i++) {
meta.add(randomMeta());
}
Set<Path> corruptedFiles = new HashSet<>();
MetaDataStateFormat<MetaData> format = metaDataFormat(preserveUnknownCustoms);
for (int i = 0; i < dirs.length; i++) {
dirs[i] = createTempDir();
Files.createDirectories(dirs[i].resolve(MetaDataStateFormat.STATE_DIR_NAME));
for (int j = 0; j < numStates; j++) {
format.writeAndCleanup(meta.get(j), dirs[i]);
if (randomBoolean() && (j < numStates - 1 || dirs.length > 0 && i != 0)) { // corrupt a file that we do not necessarily
// need here....
Path file = dirs[i].resolve(MetaDataStateFormat.STATE_DIR_NAME).resolve("global-" + j + ".st");
corruptedFiles.add(file);
MetaDataStateFormatTests.corruptFile(file, logger);
}
}
}
List<Path> dirList = Arrays.asList(dirs);
Collections.shuffle(dirList, random());
MetaData loadedMetaData = format.loadLatestState(logger, hasMissingCustoms ?
xContentRegistryWithMissingCustoms() : xContentRegistry(), dirList.toArray(new Path[0]));
MetaData latestMetaData = meta.get(numStates-1);
assertThat(loadedMetaData.clusterUUID(), not(equalTo("_na_")));
assertThat(loadedMetaData.clusterUUID(), equalTo(latestMetaData.clusterUUID()));
ImmutableOpenMap<String, IndexMetaData> indices = loadedMetaData.indices();
assertThat(indices.size(), equalTo(latestMetaData.indices().size()));
for (IndexMetaData original : latestMetaData) {
IndexMetaData deserialized = indices.get(original.getIndex().getName());
assertThat(deserialized, notNullValue());
assertThat(deserialized.getVersion(), equalTo(original.getVersion()));
assertThat(deserialized.getMappingVersion(), equalTo(original.getMappingVersion()));
assertThat(deserialized.getSettingsVersion(), equalTo(original.getSettingsVersion()));
assertThat(deserialized.getNumberOfReplicas(), equalTo(original.getNumberOfReplicas()));
assertThat(deserialized.getNumberOfShards(), equalTo(original.getNumberOfShards()));
}
// make sure the index tombstones are the same too
if (hasMissingCustoms) {
if (preserveUnknownCustoms) {
assertNotNull(loadedMetaData.custom(IndexGraveyard.TYPE));
assertThat(loadedMetaData.custom(IndexGraveyard.TYPE), instanceOf(MetaData.UnknownGatewayOnlyCustom.class));
// check that we reserialize unknown metadata correctly again
final Path tempdir = createTempDir();
metaDataFormat(randomBoolean()).write(loadedMetaData, tempdir);
final MetaData reloadedMetaData = metaDataFormat(randomBoolean()).loadLatestState(logger, xContentRegistry(), tempdir);
assertThat(reloadedMetaData.indexGraveyard(), equalTo(latestMetaData.indexGraveyard()));
} else {
assertNotNull(loadedMetaData.indexGraveyard());
assertThat(loadedMetaData.indexGraveyard().getTombstones(), hasSize(0));
}
} else {
assertThat(loadedMetaData.indexGraveyard(), equalTo(latestMetaData.indexGraveyard()));
}
// now corrupt all the latest ones and make sure we fail to load the state
for (int i = 0; i < dirs.length; i++) {
Path file = dirs[i].resolve(MetaDataStateFormat.STATE_DIR_NAME).resolve("global-" + (numStates-1) + ".st");
if (corruptedFiles.contains(file)) {
continue;
}
MetaDataStateFormatTests.corruptFile(file, logger);
}
try {
format.loadLatestState(logger, xContentRegistry(), dirList.toArray(new Path[0]));
fail("latest version can not be read");
} catch (ElasticsearchException ex) {
assertThat(ExceptionsHelper.unwrap(ex, CorruptStateException.class), notNullValue());
}
}
private DummyState writeAndReadStateSuccessfully(Format format, Path... paths) throws IOException { private DummyState writeAndReadStateSuccessfully(Format format, Path... paths) throws IOException {
format.noFailures(); format.noFailures();
DummyState state = new DummyState(randomRealisticUnicodeOfCodepointLengthBetween(1, 100), randomInt(), randomLong(), DummyState state = new DummyState(randomRealisticUnicodeOfCodepointLengthBetween(1, 100), randomInt(), randomLong(),
@ -457,43 +346,6 @@ public class MetaDataStateFormatTests extends ESTestCase {
writeAndReadStateSuccessfully(format, paths); writeAndReadStateSuccessfully(format, paths);
} }
private static MetaDataStateFormat<MetaData> metaDataFormat(boolean preserveUnknownCustoms) {
return new MetaDataStateFormat<MetaData>(MetaData.GLOBAL_STATE_FILE_PREFIX) {
@Override
public void toXContent(XContentBuilder builder, MetaData state) throws IOException {
MetaData.Builder.toXContent(state, builder, ToXContent.EMPTY_PARAMS);
}
@Override
public MetaData fromXContent(XContentParser parser) throws IOException {
return MetaData.Builder.fromXContent(parser, preserveUnknownCustoms);
}
};
}
private MetaData randomMeta() throws IOException {
int numIndices = randomIntBetween(1, 10);
MetaData.Builder mdBuilder = MetaData.builder();
mdBuilder.generateClusterUuidIfNeeded();
for (int i = 0; i < numIndices; i++) {
mdBuilder.put(indexBuilder(randomAlphaOfLength(10) + "idx-"+i));
}
int numDelIndices = randomIntBetween(0, 5);
final IndexGraveyard.Builder graveyard = IndexGraveyard.builder();
for (int i = 0; i < numDelIndices; i++) {
graveyard.addTombstone(new Index(randomAlphaOfLength(10) + "del-idx-" + i, UUIDs.randomBase64UUID()));
}
mdBuilder.indexGraveyard(graveyard.build());
return mdBuilder.build();
}
private IndexMetaData.Builder indexBuilder(String index) throws IOException {
return IndexMetaData.builder(index)
.settings(settings(Version.CURRENT).put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, randomIntBetween(1, 10))
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, randomIntBetween(0, 5)));
}
private static class Format extends MetaDataStateFormat<DummyState> { private static class Format extends MetaDataStateFormat<DummyState> {
private enum FailureMode { private enum FailureMode {
NO_FAILURES, NO_FAILURES,
@ -716,13 +568,4 @@ public class MetaDataStateFormatTests extends ESTestCase {
protected final NamedXContentRegistry xContentRegistry() { protected final NamedXContentRegistry xContentRegistry() {
return new NamedXContentRegistry(ClusterModule.getNamedXWriteables()); return new NamedXContentRegistry(ClusterModule.getNamedXWriteables());
} }
/**
* The {@link NamedXContentRegistry} to use for {@link XContentParser}s that should be
* missing all of the normal cluster state parsers.
*/
protected NamedXContentRegistry xContentRegistryWithMissingCustoms() {
return new NamedXContentRegistry(Arrays.asList(
new NamedXContentRegistry.Entry(MetaData.Custom.class, new ParseField("garbage"), RepositoriesMetaData::fromXContent)));
}
} }

View File

@ -230,8 +230,8 @@ public class PersistedClusterStateServiceTests extends ESTestCase {
final String message = expectThrows(IllegalStateException.class, final String message = expectThrows(IllegalStateException.class,
() -> new PersistedClusterStateService(Stream.of(combinedPaths).map(path -> NodeEnvironment.resolveNodePath(path, 0)) () -> new PersistedClusterStateService(Stream.of(combinedPaths).map(path -> NodeEnvironment.resolveNodePath(path, 0))
.toArray(Path[]::new), nodeIds[0], xContentRegistry(), BigArrays.NON_RECYCLING_INSTANCE, .toArray(Path[]::new), nodeIds[0], xContentRegistry(), BigArrays.NON_RECYCLING_INSTANCE,
new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), () -> 0L, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), () -> 0L
randomBoolean()).loadBestOnDiskState()).getMessage(); ).loadBestOnDiskState()).getMessage();
assertThat(message, assertThat(message,
allOf(containsString("unexpected node ID in metadata"), containsString(nodeIds[0]), containsString(nodeIds[1]))); allOf(containsString("unexpected node ID in metadata"), containsString(nodeIds[0]), containsString(nodeIds[1])));
assertTrue("[" + message + "] should match " + Arrays.toString(dataPaths2), assertTrue("[" + message + "] should match " + Arrays.toString(dataPaths2),

View File

@ -129,7 +129,7 @@ public class RemoveCorruptedShardDataCommandTests extends IndexShardTestCase {
final Path[] dataPaths = Arrays.stream(lock.getNodePaths()).filter(Objects::nonNull).map(p -> p.path).toArray(Path[]::new); final Path[] dataPaths = Arrays.stream(lock.getNodePaths()).filter(Objects::nonNull).map(p -> p.path).toArray(Path[]::new);
try (PersistedClusterStateService.Writer writer = new PersistedClusterStateService(dataPaths, nodeId, try (PersistedClusterStateService.Writer writer = new PersistedClusterStateService(dataPaths, nodeId,
xContentRegistry(), BigArrays.NON_RECYCLING_INSTANCE, xContentRegistry(), BigArrays.NON_RECYCLING_INSTANCE,
new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), () -> 0L, true).createWriter()) { new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), () -> 0L).createWriter()) {
writer.writeFullStateAndCommit(1L, clusterState); writer.writeFullStateAndCommit(1L, clusterState);
} }
} }

View File

@ -68,6 +68,7 @@ import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.RestoreInProgress; import org.elasticsearch.cluster.RestoreInProgress;
import org.elasticsearch.cluster.SnapshotDeletionsInProgress; import org.elasticsearch.cluster.SnapshotDeletionsInProgress;
import org.elasticsearch.cluster.SnapshotsInProgress; import org.elasticsearch.cluster.SnapshotsInProgress;
import org.elasticsearch.cluster.coordination.ElasticsearchNodeCommand;
import org.elasticsearch.cluster.health.ClusterHealthStatus; import org.elasticsearch.cluster.health.ClusterHealthStatus;
import org.elasticsearch.cluster.metadata.IndexGraveyard; import org.elasticsearch.cluster.metadata.IndexGraveyard;
import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.IndexMetaData;
@ -83,6 +84,7 @@ import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.Priority; import org.elasticsearch.common.Priority;
import org.elasticsearch.common.Strings; import org.elasticsearch.common.Strings;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.collect.Tuple;
import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
import org.elasticsearch.common.network.NetworkAddress; import org.elasticsearch.common.network.NetworkAddress;
@ -98,8 +100,11 @@ import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException;
import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.common.util.concurrent.ThreadContext;
import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.NamedXContentRegistry;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.common.xcontent.json.JsonXContent;
import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.core.internal.io.IOUtils;
import org.elasticsearch.discovery.Discovery; import org.elasticsearch.discovery.Discovery;
import org.elasticsearch.discovery.zen.ElectMasterService; import org.elasticsearch.discovery.zen.ElectMasterService;
@ -571,6 +576,7 @@ public abstract class ESIntegTestCase extends ESTestCase {
} }
ensureClusterSizeConsistency(); ensureClusterSizeConsistency();
ensureClusterStateConsistency(); ensureClusterStateConsistency();
ensureClusterStateCanBeReadByNodeTool();
if (isInternalCluster()) { if (isInternalCluster()) {
// check no pending cluster states are leaked // check no pending cluster states are leaked
for (Discovery discovery : internalCluster().getInstances(Discovery.class)) { for (Discovery discovery : internalCluster().getInstances(Discovery.class)) {
@ -1094,6 +1100,27 @@ public abstract class ESIntegTestCase extends ESTestCase {
} }
protected void ensureClusterStateCanBeReadByNodeTool() throws IOException {
if (cluster() != null && cluster().size() > 0) {
final Client masterClient = client();
MetaData metaData = masterClient.admin().cluster().prepareState().all().get().getState().metaData();
final XContentBuilder builder = JsonXContent.contentBuilder();
builder.startObject();
metaData.toXContent(builder, ToXContent.EMPTY_PARAMS);
builder.endObject();
final MetaData loadedMetaData;
try (XContentParser parser = createParser(ElasticsearchNodeCommand.namedXContentRegistry,
JsonXContent.jsonXContent, BytesReference.bytes(builder))) {
loadedMetaData = MetaData.fromXContent(parser);
}
assertNull(
"cluster state JSON serialization does not match",
differenceBetweenMapsIgnoringArrayOrder(convertToMap(metaData), convertToMap(loadedMetaData)));
}
}
/** /**
* Tests if the client is a transport client or wraps a transport client. * Tests if the client is a transport client or wraps a transport client.
* *

View File

@ -1273,6 +1273,14 @@ public abstract class ESTestCase extends LuceneTestCase {
return xContent.createParser(xContentRegistry(), LoggingDeprecationHandler.INSTANCE, data.streamInput()); return xContent.createParser(xContentRegistry(), LoggingDeprecationHandler.INSTANCE, data.streamInput());
} }
/**
* Create a new {@link XContentParser}.
*/
protected final XContentParser createParser(NamedXContentRegistry namedXContentRegistry, XContent xContent,
BytesReference data) throws IOException {
return xContent.createParser(namedXContentRegistry, LoggingDeprecationHandler.INSTANCE, data.streamInput());
}
/** /**
* The {@link NamedXContentRegistry} to use for this test. Subclasses should override and use liberally. * The {@link NamedXContentRegistry} to use for this test. Subclasses should override and use liberally.
*/ */

View File

@ -80,7 +80,7 @@ public class LicensesMetaDataSerializationTests extends ESTestCase {
builder = metaDataBuilder.build().toXContent(builder, params); builder = metaDataBuilder.build().toXContent(builder, params);
builder.endObject(); builder.endObject();
// deserialize metadata again // deserialize metadata again
MetaData metaData = MetaData.Builder.fromXContent(createParser(builder), randomBoolean()); MetaData metaData = MetaData.Builder.fromXContent(createParser(builder));
// check that custom metadata still present // check that custom metadata still present
assertThat(metaData.custom(licensesMetaData.getWriteableName()), notNullValue()); assertThat(metaData.custom(licensesMetaData.getWriteableName()), notNullValue());
assertThat(metaData.custom(repositoriesMetaData.getWriteableName()), notNullValue()); assertThat(metaData.custom(repositoriesMetaData.getWriteableName()), notNullValue());

View File

@ -64,7 +64,7 @@ public class WatcherMetaDataSerializationTests extends ESTestCase {
builder = metaDataBuilder.build().toXContent(builder, params); builder = metaDataBuilder.build().toXContent(builder, params);
builder.endObject(); builder.endObject();
// deserialize metadata again // deserialize metadata again
MetaData metaData = MetaData.Builder.fromXContent(createParser(builder), randomBoolean()); MetaData metaData = MetaData.Builder.fromXContent(createParser(builder));
// check that custom metadata still present // check that custom metadata still present
assertThat(metaData.custom(watcherMetaData.getWriteableName()), notNullValue()); assertThat(metaData.custom(watcherMetaData.getWriteableName()), notNullValue());
assertThat(metaData.custom(repositoriesMetaData.getWriteableName()), notNullValue()); assertThat(metaData.custom(repositoriesMetaData.getWriteableName()), notNullValue());