Atomic mapping updates across types (#22220)

This commit makes mapping updates atomic when multiple types in an index are updated. Mappings for an index are now applied in a single atomic operation, which also allows to optimize some of the cross-type updates and checks.
This commit is contained in:
Yannick Welsch 2016-12-19 14:39:50 +01:00 committed by GitHub
parent 1cabf66bd5
commit 63af03a104
17 changed files with 256 additions and 224 deletions

View File

@ -67,6 +67,7 @@ import org.elasticsearch.index.IndexService;
import org.elasticsearch.index.mapper.DocumentMapper;
import org.elasticsearch.index.mapper.MapperParsingException;
import org.elasticsearch.index.mapper.MapperService;
import org.elasticsearch.index.mapper.MapperService.MergeReason;
import org.elasticsearch.index.query.QueryShardContext;
import org.elasticsearch.indices.IndexCreationException;
import org.elasticsearch.indices.IndicesService;
@ -356,10 +357,10 @@ public class MetaDataCreateIndexService extends AbstractComponent {
// now add the mappings
MapperService mapperService = indexService.mapperService();
try {
mapperService.merge(mappings, request.updateAllTypes());
} catch (MapperParsingException mpe) {
mapperService.merge(mappings, MergeReason.MAPPING_UPDATE, request.updateAllTypes());
} catch (Exception e) {
removalExtraInfo = "failed on parsing default mapping/mappings on index creation";
throw mpe;
throw e;
}
// the context is only used for validation so it's fine to pass fake values for the shard id and the current

View File

@ -141,15 +141,11 @@ public class MetaDataIndexAliasesService extends AbstractComponent {
// temporarily create the index and add mappings so we can parse the filter
try {
indexService = indicesService.createIndex(index, emptyList(), shardId -> {});
indicesToClose.add(index.getIndex());
} catch (IOException e) {
throw new ElasticsearchException("Failed to create temporary index for parsing the alias", e);
}
for (ObjectCursor<MappingMetaData> cursor : index.getMappings().values()) {
MappingMetaData mappingMetaData = cursor.value;
indexService.mapperService().merge(mappingMetaData.type(), mappingMetaData.source(),
MapperService.MergeReason.MAPPING_RECOVERY, false);
}
indicesToClose.add(index.getIndex());
indexService.mapperService().merge(index, MapperService.MergeReason.MAPPING_RECOVERY, false);
}
indices.put(action.getIndex(), indexService);
}

View File

@ -39,6 +39,7 @@ import org.elasticsearch.index.Index;
import org.elasticsearch.index.IndexService;
import org.elasticsearch.index.mapper.MapperParsingException;
import org.elasticsearch.index.mapper.MapperService;
import org.elasticsearch.index.mapper.MapperService.MergeReason;
import org.elasticsearch.indices.IndexTemplateMissingException;
import org.elasticsearch.indices.IndicesService;
import org.elasticsearch.indices.InvalidIndexTemplateException;
@ -222,7 +223,7 @@ public class MetaDataIndexTemplateService extends AbstractComponent {
mappingsForValidation.put(entry.getKey(), MapperService.parseMapping(entry.getValue()));
}
dummyIndexService.mapperService().merge(mappingsForValidation, false);
dummyIndexService.mapperService().merge(mappingsForValidation, MergeReason.MAPPING_UPDATE, false);
} finally {
if (createdIndex != null) {

View File

@ -147,10 +147,7 @@ public class MetaDataIndexUpgradeService extends AbstractComponent {
};
try (IndexAnalyzers fakeIndexAnalzyers = new IndexAnalyzers(indexSettings, fakeDefault, fakeDefault, fakeDefault, analyzerMap)) {
MapperService mapperService = new MapperService(indexSettings, fakeIndexAnalzyers, similarityService, mapperRegistry, () -> null);
for (ObjectCursor<MappingMetaData> cursor : indexMetaData.getMappings().values()) {
MappingMetaData mappingMetaData = cursor.value;
mapperService.merge(mappingMetaData.type(), mappingMetaData.source(), MapperService.MergeReason.MAPPING_RECOVERY, false);
}
mapperService.merge(indexMetaData, MapperService.MergeReason.MAPPING_RECOVERY, false);
}
} catch (Exception ex) {
// Wrap the inner exception so we have the index name in the exception message

View File

@ -43,6 +43,7 @@ import org.elasticsearch.index.Index;
import org.elasticsearch.index.IndexService;
import org.elasticsearch.index.mapper.DocumentMapper;
import org.elasticsearch.index.mapper.MapperService;
import org.elasticsearch.index.mapper.MapperService.MergeReason;
import org.elasticsearch.indices.IndicesService;
import org.elasticsearch.indices.InvalidTypeNameException;
@ -146,10 +147,7 @@ public class MetaDataMappingService extends AbstractComponent {
// we need to create the index here, and add the current mapping to it, so we can merge
indexService = indicesService.createIndex(indexMetaData, Collections.emptyList(), shardId -> {});
removeIndex = true;
for (ObjectCursor<MappingMetaData> metaData : indexMetaData.getMappings().values()) {
// don't apply the default mapping, it has been applied when the mapping was created
indexService.mapperService().merge(metaData.value.type(), metaData.value.source(), MapperService.MergeReason.MAPPING_RECOVERY, true);
}
indexService.mapperService().merge(indexMetaData, MergeReason.MAPPING_RECOVERY, true);
}
IndexMetaData.Builder builder = IndexMetaData.builder(indexMetaData);
@ -226,10 +224,7 @@ public class MetaDataMappingService extends AbstractComponent {
MapperService mapperService = indicesService.createIndexMapperService(indexMetaData);
indexMapperServices.put(index, mapperService);
// add mappings for all types, we need them for cross-type validation
for (ObjectCursor<MappingMetaData> mapping : indexMetaData.getMappings().values()) {
mapperService.merge(mapping.value.type(), mapping.value.source(),
MapperService.MergeReason.MAPPING_RECOVERY, request.updateAllTypes());
}
mapperService.merge(indexMetaData, MergeReason.MAPPING_RECOVERY, request.updateAllTypes());
}
}
currentState = applyRequest(currentState, request, indexMapperServices);
@ -313,7 +308,7 @@ public class MetaDataMappingService extends AbstractComponent {
if (existingMapper != null) {
existingSource = existingMapper.mappingSource();
}
DocumentMapper mergedMapper = mapperService.merge(mappingType, mappingUpdateSource, MapperService.MergeReason.MAPPING_UPDATE, request.updateAllTypes());
DocumentMapper mergedMapper = mapperService.merge(mappingType, mappingUpdateSource, MergeReason.MAPPING_UPDATE, request.updateAllTypes());
CompressedXContent updatedSource = mergedMapper.mappingSource();
if (existingSource != null) {

View File

@ -28,6 +28,7 @@ import org.elasticsearch.ElasticsearchGenerationException;
import org.elasticsearch.Version;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.metadata.MappingMetaData;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.compress.CompressedXContent;
import org.elasticsearch.common.regex.Regex;
import org.elasticsearch.common.settings.Setting;
@ -51,6 +52,7 @@ import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import java.util.Set;
@ -61,7 +63,6 @@ import java.util.stream.Collectors;
import static java.util.Collections.emptyMap;
import static java.util.Collections.emptySet;
import static java.util.Collections.unmodifiableMap;
import static org.elasticsearch.common.collect.MapBuilder.newMapBuilder;
public class MapperService extends AbstractIndexComponent implements Closeable {
@ -191,101 +192,158 @@ public class MapperService extends AbstractIndexComponent implements Closeable {
}
}
/**
* Update mapping by only merging the metadata that is different between received and stored entries
*/
public boolean updateMapping(IndexMetaData indexMetaData) throws IOException {
assert indexMetaData.getIndex().equals(index()) : "index mismatch: expected " + index() + " but was " + indexMetaData.getIndex();
// go over and add the relevant mappings (or update them)
final Set<String> existingMappers = new HashSet<>(mappers.keySet());
final Map<String, DocumentMapper> updatedEntries;
try {
// only update entries if needed
updatedEntries = internalMerge(indexMetaData, MergeReason.MAPPING_RECOVERY, true, true);
} catch (Exception e) {
logger.warn((org.apache.logging.log4j.util.Supplier<?>) () -> new ParameterizedMessage("[{}] failed to apply mappings", index()), e);
throw e;
}
boolean requireRefresh = false;
for (ObjectCursor<MappingMetaData> cursor : indexMetaData.getMappings().values()) {
MappingMetaData mappingMd = cursor.value;
String mappingType = mappingMd.type();
CompressedXContent mappingSource = mappingMd.source();
for (DocumentMapper documentMapper : updatedEntries.values()) {
String mappingType = documentMapper.type();
CompressedXContent incomingMappingSource = indexMetaData.mapping(mappingType).source();
String op = existingMappers.contains(mappingType) ? "updated" : "added";
if (logger.isDebugEnabled() && incomingMappingSource.compressed().length < 512) {
logger.debug("[{}] {} mapping [{}], source [{}]", index(), op, mappingType, incomingMappingSource.string());
} else if (logger.isTraceEnabled()) {
logger.trace("[{}] {} mapping [{}], source [{}]", index(), op, mappingType, incomingMappingSource.string());
} else {
logger.debug("[{}] {} mapping [{}] (source suppressed due to length, use TRACE level if needed)", index(), op, mappingType);
}
// refresh mapping can happen when the parsing/merging of the mapping from the metadata doesn't result in the same
// mapping, in this case, we send to the master to refresh its own version of the mappings (to conform with the
// merge version of it, which it does when refreshing the mappings), and warn log it.
try {
DocumentMapper existingMapper = documentMapper(mappingType);
if (documentMapper(mappingType).mappingSource().equals(incomingMappingSource) == false) {
logger.debug("[{}] parsed mapping [{}], and got different sources\noriginal:\n{}\nparsed:\n{}", index(), mappingType,
incomingMappingSource, documentMapper(mappingType).mappingSource());
if (existingMapper == null || mappingSource.equals(existingMapper.mappingSource()) == false) {
String op = existingMapper == null ? "adding" : "updating";
if (logger.isDebugEnabled() && mappingSource.compressed().length < 512) {
logger.debug("[{}] {} mapping [{}], source [{}]", index(), op, mappingType, mappingSource.string());
} else if (logger.isTraceEnabled()) {
logger.trace("[{}] {} mapping [{}], source [{}]", index(), op, mappingType, mappingSource.string());
} else {
logger.debug("[{}] {} mapping [{}] (source suppressed due to length, use TRACE level if needed)", index(), op,
mappingType);
}
merge(mappingType, mappingSource, MergeReason.MAPPING_RECOVERY, true);
if (!documentMapper(mappingType).mappingSource().equals(mappingSource)) {
logger.debug("[{}] parsed mapping [{}], and got different sources\noriginal:\n{}\nparsed:\n{}", index(),
mappingType, mappingSource, documentMapper(mappingType).mappingSource());
requireRefresh = true;
}
}
} catch (Exception e) {
logger.warn(
(org.apache.logging.log4j.util.Supplier<?>)
() -> new ParameterizedMessage("[{}] failed to add mapping [{}], source [{}]", index(), mappingType, mappingSource),
e);
throw e;
}
}
return requireRefresh;
}
//TODO: make this atomic
public void merge(Map<String, Map<String, Object>> mappings, boolean updateAllTypes) throws MapperParsingException {
// first, add the default mapping
if (mappings.containsKey(DEFAULT_MAPPING)) {
try {
this.merge(DEFAULT_MAPPING, new CompressedXContent(XContentFactory.jsonBuilder().map(mappings.get(DEFAULT_MAPPING)).string()), MergeReason.MAPPING_UPDATE, updateAllTypes);
} catch (Exception e) {
throw new MapperParsingException("Failed to parse mapping [{}]: {}", e, DEFAULT_MAPPING, e.getMessage());
}
}
public void merge(Map<String, Map<String, Object>> mappings, MergeReason reason, boolean updateAllTypes) {
Map<String, CompressedXContent> mappingSourcesCompressed = new LinkedHashMap<>(mappings.size());
for (Map.Entry<String, Map<String, Object>> entry : mappings.entrySet()) {
if (entry.getKey().equals(DEFAULT_MAPPING)) {
continue;
}
try {
// apply the default here, its the first time we parse it
this.merge(entry.getKey(), new CompressedXContent(XContentFactory.jsonBuilder().map(entry.getValue()).string()), MergeReason.MAPPING_UPDATE, updateAllTypes);
mappingSourcesCompressed.put(entry.getKey(), new CompressedXContent(XContentFactory.jsonBuilder().map(entry.getValue()).string()));
} catch (Exception e) {
throw new MapperParsingException("Failed to parse mapping [{}]: {}", e, entry.getKey(), e.getMessage());
}
}
internalMerge(mappingSourcesCompressed, reason, updateAllTypes);
}
public void merge(IndexMetaData indexMetaData, MergeReason reason, boolean updateAllTypes) {
internalMerge(indexMetaData, reason, updateAllTypes, false);
}
public DocumentMapper merge(String type, CompressedXContent mappingSource, MergeReason reason, boolean updateAllTypes) {
if (DEFAULT_MAPPING.equals(type)) {
return internalMerge(Collections.singletonMap(type, mappingSource), reason, updateAllTypes).get(type);
}
private synchronized Map<String, DocumentMapper> internalMerge(IndexMetaData indexMetaData, MergeReason reason, boolean updateAllTypes,
boolean onlyUpdateIfNeeded) {
Map<String, CompressedXContent> map = new LinkedHashMap<>();
for (ObjectCursor<MappingMetaData> cursor : indexMetaData.getMappings().values()) {
MappingMetaData mappingMetaData = cursor.value;
if (onlyUpdateIfNeeded) {
DocumentMapper existingMapper = documentMapper(mappingMetaData.type());
if (existingMapper == null || mappingMetaData.source().equals(existingMapper.mappingSource()) == false) {
map.put(mappingMetaData.type(), mappingMetaData.source());
}
} else {
map.put(mappingMetaData.type(), mappingMetaData.source());
}
}
return internalMerge(map, reason, updateAllTypes);
}
private synchronized Map<String, DocumentMapper> internalMerge(Map<String, CompressedXContent> mappings, MergeReason reason, boolean updateAllTypes) {
DocumentMapper defaultMapper = null;
String defaultMappingSource = null;
if (mappings.containsKey(DEFAULT_MAPPING)) {
// verify we can parse it
// NOTE: never apply the default here
DocumentMapper mapper = documentParser.parse(type, mappingSource);
// still add it as a document mapper so we have it registered and, for example, persisted back into
// the cluster meta data if needed, or checked for existence
synchronized (this) {
mappers = newMapBuilder(mappers).put(type, mapper).map();
try {
defaultMapper = documentParser.parse(DEFAULT_MAPPING, mappings.get(DEFAULT_MAPPING));
} catch (Exception e) {
throw new MapperParsingException("Failed to parse mapping [{}]: {}", e, DEFAULT_MAPPING, e.getMessage());
}
try {
defaultMappingSource = mappingSource.string();
defaultMappingSource = mappings.get(DEFAULT_MAPPING).string();
} catch (IOException e) {
throw new ElasticsearchGenerationException("failed to un-compress", e);
}
return mapper;
}
final String defaultMappingSourceOrLastStored;
if (defaultMappingSource != null) {
defaultMappingSourceOrLastStored = defaultMappingSource;
} else {
synchronized (this) {
defaultMappingSourceOrLastStored = this.defaultMappingSource;
}
List<DocumentMapper> documentMappers = new ArrayList<>();
for (Map.Entry<String, CompressedXContent> entry : mappings.entrySet()) {
String type = entry.getKey();
if (type.equals(DEFAULT_MAPPING)) {
continue;
}
final boolean applyDefault =
// the default was already applied if we are recovering
reason != MergeReason.MAPPING_RECOVERY
// only apply the default mapping if we don't have the type yet
&& mappers.containsKey(type) == false;
DocumentMapper mergeWith = parse(type, mappingSource, applyDefault);
return merge(mergeWith, reason, updateAllTypes);
}
try {
DocumentMapper documentMapper = documentParser.parse(type, entry.getValue(), applyDefault ? defaultMappingSourceOrLastStored : null);
documentMappers.add(documentMapper);
} catch (Exception e) {
throw new MapperParsingException("Failed to parse mapping [{}]: {}", e, entry.getKey(), e.getMessage());
}
}
private synchronized DocumentMapper merge(DocumentMapper mapper, MergeReason reason, boolean updateAllTypes) {
return internalMerge(defaultMapper, defaultMappingSource, documentMappers, reason, updateAllTypes);
}
private synchronized Map<String, DocumentMapper> internalMerge(@Nullable DocumentMapper defaultMapper, @Nullable String defaultMappingSource,
List<DocumentMapper> documentMappers, MergeReason reason, boolean updateAllTypes) {
boolean hasNested = this.hasNested;
boolean allEnabled = this.allEnabled;
Map<String, ObjectMapper> fullPathObjectMappers = this.fullPathObjectMappers;
FieldTypeLookup fieldTypes = this.fieldTypes;
Set<String> parentTypes = this.parentTypes;
Map<String, DocumentMapper> mappers = new HashMap<>(this.mappers);
Map<String, DocumentMapper> results = new LinkedHashMap<>(documentMappers.size() + 1);
if (defaultMapper != null) {
assert defaultMapper.type().equals(DEFAULT_MAPPING);
mappers.put(DEFAULT_MAPPING, defaultMapper);
results.put(DEFAULT_MAPPING, defaultMapper);
}
for (DocumentMapper mapper : documentMappers) {
// check naming
if (mapper.type().length() == 0) {
throw new InvalidTypeNameException("mapping type name is empty");
}
@ -308,7 +366,7 @@ public class MapperService extends AbstractIndexComponent implements Closeable {
throw new IllegalArgumentException("mapping type name [" + mapper.type() + "] must not start with a '.'");
}
// 1. compute the merged DocumentMapper
// compute the merged DocumentMapper
DocumentMapper oldMapper = mappers.get(mapper.type());
DocumentMapper newMapper;
if (oldMapper != null) {
@ -317,27 +375,52 @@ public class MapperService extends AbstractIndexComponent implements Closeable {
newMapper = mapper;
}
// 2. check basic sanity of the new mapping
// check basic sanity of the new mapping
List<ObjectMapper> objectMappers = new ArrayList<>();
List<FieldMapper> fieldMappers = new ArrayList<>();
Collections.addAll(fieldMappers, newMapper.mapping().metadataMappers);
MapperUtils.collect(newMapper.mapping().root(), objectMappers, fieldMappers);
checkFieldUniqueness(newMapper.type(), objectMappers, fieldMappers);
checkObjectsCompatibility(objectMappers, updateAllTypes);
checkFieldUniqueness(newMapper.type(), objectMappers, fieldMappers, fullPathObjectMappers, fieldTypes);
checkObjectsCompatibility(objectMappers, updateAllTypes, fullPathObjectMappers);
// 3. update lookup data-structures
// update lookup data-structures
// this will in particular make sure that the merged fields are compatible with other types
FieldTypeLookup fieldTypes = this.fieldTypes.copyAndAddAll(newMapper.type(), fieldMappers, updateAllTypes);
fieldTypes = fieldTypes.copyAndAddAll(newMapper.type(), fieldMappers, updateAllTypes);
boolean hasNested = this.hasNested;
Map<String, ObjectMapper> fullPathObjectMappers = new HashMap<>(this.fullPathObjectMappers);
for (ObjectMapper objectMapper : objectMappers) {
if (fullPathObjectMappers == this.fullPathObjectMappers) {
fullPathObjectMappers = new HashMap<>(this.fullPathObjectMappers);
}
fullPathObjectMappers.put(objectMapper.fullPath(), objectMapper);
if (objectMapper.nested().isNested()) {
hasNested = true;
}
}
fullPathObjectMappers = Collections.unmodifiableMap(fullPathObjectMappers);
if (reason == MergeReason.MAPPING_UPDATE) {
// this check will only be performed on the master node when there is
// a call to the update mapping API. For all other cases like
// the master node restoring mappings from disk or data nodes
// deserializing cluster state that was sent by the master node,
// this check will be skipped.
checkTotalFieldsLimit(objectMappers.size() + fieldMappers.size());
}
if (oldMapper == null && newMapper.parentFieldMapper().active()) {
if (parentTypes == this.parentTypes) {
parentTypes = new HashSet<>(this.parentTypes);
}
parentTypes.add(mapper.parentFieldMapper().type());
}
// this is only correct because types cannot be removed and we do not
// allow to disable an existing _all field
allEnabled |= mapper.allFieldMapper().enabled();
results.put(newMapper.type(), newMapper);
mappers.put(newMapper.type(), newMapper);
}
if (reason == MergeReason.MAPPING_UPDATE) {
// this check will only be performed on the master node when there is
@ -346,45 +429,46 @@ public class MapperService extends AbstractIndexComponent implements Closeable {
// deserializing cluster state that was sent by the master node,
// this check will be skipped.
checkNestedFieldsLimit(fullPathObjectMappers);
checkTotalFieldsLimit(objectMappers.size() + fieldMappers.size());
checkDepthLimit(fullPathObjectMappers.keySet());
}
Set<String> parentTypes = this.parentTypes;
if (oldMapper == null && newMapper.parentFieldMapper().active()) {
parentTypes = new HashSet<>(parentTypes.size() + 1);
parentTypes.addAll(this.parentTypes);
parentTypes.add(mapper.parentFieldMapper().type());
parentTypes = Collections.unmodifiableSet(parentTypes);
}
Map<String, DocumentMapper> mappers = new HashMap<>(this.mappers);
mappers.put(newMapper.type(), newMapper);
for (Map.Entry<String, DocumentMapper> entry : mappers.entrySet()) {
if (entry.getKey().equals(DEFAULT_MAPPING)) {
continue;
}
DocumentMapper m = entry.getValue();
DocumentMapper documentMapper = entry.getValue();
// apply changes to the field types back
m = m.updateFieldType(fieldTypes.fullNameToFieldType);
entry.setValue(m);
DocumentMapper updatedDocumentMapper = documentMapper.updateFieldType(fieldTypes.fullNameToFieldType);
if (updatedDocumentMapper != documentMapper) {
// update both mappers and result
entry.setValue(updatedDocumentMapper);
if (results.containsKey(updatedDocumentMapper.type())) {
results.put(updatedDocumentMapper.type(), updatedDocumentMapper);
}
}
}
mappers = Collections.unmodifiableMap(mappers);
// 4. commit the change
// make structures immutable
mappers = Collections.unmodifiableMap(mappers);
results = Collections.unmodifiableMap(results);
parentTypes = Collections.unmodifiableSet(parentTypes);
fullPathObjectMappers = Collections.unmodifiableMap(fullPathObjectMappers);
// commit the change
if (defaultMappingSource != null) {
this.defaultMappingSource = defaultMappingSource;
}
this.mappers = mappers;
this.fieldTypes = fieldTypes;
this.hasNested = hasNested;
this.fullPathObjectMappers = fullPathObjectMappers;
this.parentTypes = parentTypes;
// this is only correct because types cannot be removed and we do not
// allow to disable an existing _all field
this.allEnabled |= mapper.allFieldMapper().enabled();
this.allEnabled = allEnabled;
assert assertSerialization(newMapper);
assert assertMappersShareSameFieldType();
assert results.values().stream().allMatch(this::assertSerialization);
return newMapper;
return results;
}
private boolean assertMappersShareSameFieldType() {
@ -421,8 +505,8 @@ public class MapperService extends AbstractIndexComponent implements Closeable {
return true;
}
private void checkFieldUniqueness(String type, Collection<ObjectMapper> objectMappers, Collection<FieldMapper> fieldMappers) {
assert Thread.holdsLock(this);
private static void checkFieldUniqueness(String type, Collection<ObjectMapper> objectMappers, Collection<FieldMapper> fieldMappers,
Map<String, ObjectMapper> fullPathObjectMappers, FieldTypeLookup fieldTypes) {
// first check within mapping
final Set<String> objectFullNames = new HashSet<>();
@ -459,9 +543,8 @@ public class MapperService extends AbstractIndexComponent implements Closeable {
}
}
private void checkObjectsCompatibility(Collection<ObjectMapper> objectMappers, boolean updateAllTypes) {
assert Thread.holdsLock(this);
private static void checkObjectsCompatibility(Collection<ObjectMapper> objectMappers, boolean updateAllTypes,
Map<String, ObjectMapper> fullPathObjectMappers) {
for (ObjectMapper newObjectMapper : objectMappers) {
ObjectMapper existingObjectMapper = fullPathObjectMappers.get(newObjectMapper.fullPath());
if (existingObjectMapper != null) {

View File

@ -26,8 +26,7 @@ import org.apache.lucene.store.IOContext;
import org.apache.lucene.store.IndexOutput;
import org.apache.lucene.store.Lock;
import org.apache.lucene.store.NoLockFactory;
import org.elasticsearch.cluster.metadata.MappingMetaData;
import org.elasticsearch.common.collect.ImmutableOpenMap;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.index.Index;
import org.elasticsearch.index.store.Store;
@ -123,8 +122,8 @@ final class LocalShardSnapshot implements Closeable {
}
}
ImmutableOpenMap<String, MappingMetaData> getMappings() {
return shard.indexSettings.getIndexMetaData().getMappings();
IndexMetaData getIndexMetaData() {
return shard.indexSettings.getIndexMetaData();
}
@Override

View File

@ -104,12 +104,11 @@ final class StoreRecovery {
if (indices.size() > 1) {
throw new IllegalArgumentException("can't add shards from more than one index");
}
for (ObjectObjectCursor<String, MappingMetaData> mapping : shards.get(0).getMappings()) {
IndexMetaData indexMetaData = shards.get(0).getIndexMetaData();
for (ObjectObjectCursor<String, MappingMetaData> mapping : indexMetaData.getMappings()) {
mappingUpdateConsumer.accept(mapping.key, mapping.value);
}
for (ObjectObjectCursor<String, MappingMetaData> mapping : shards.get(0).getMappings()) {
indexShard.mapperService().merge(mapping.key,mapping.value.source(), MapperService.MergeReason.MAPPING_RECOVERY, true);
}
indexShard.mapperService().merge(indexMetaData, MapperService.MergeReason.MAPPING_RECOVERY, true);
return executeRecovery(indexShard, () -> {
logger.debug("starting recovery from local shards {}", shards);
try {

View File

@ -485,11 +485,7 @@ public class IndicesService extends AbstractLifecycleComponent
final IndexService service =
createIndexService("metadata verification", metaData, indicesQueryCache, indicesFieldDataCache, emptyList(), s -> {});
closeables.add(() -> service.close("metadata verification", false));
for (ObjectCursor<MappingMetaData> typeMapping : metaData.getMappings().values()) {
// don't apply the default mapping, it has been applied when the mapping was created
service.mapperService().merge(typeMapping.value.type(), typeMapping.value.source(),
MapperService.MergeReason.MAPPING_RECOVERY, true);
}
service.mapperService().merge(metaData, MapperService.MergeReason.MAPPING_RECOVERY, true);
if (metaData.equals(metaDataUpdate) == false) {
service.updateMetaData(metaDataUpdate);
}

View File

@ -39,7 +39,6 @@ import org.elasticsearch.common.collect.ImmutableOpenMap;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.index.IndexNotFoundException;
import org.elasticsearch.index.mapper.MapperParsingException;
import org.elasticsearch.index.query.RangeQueryBuilder;
import org.elasticsearch.index.query.TermsQueryBuilder;
import org.elasticsearch.test.ESIntegTestCase;
@ -277,15 +276,8 @@ public class CreateIndexIT extends ESIntegTestCase {
.startObject("text")
.field("type", "text")
.endObject().endObject().endObject());
try {
b.get();
} catch (MapperParsingException e) {
StringBuilder messages = new StringBuilder();
for (Exception rootCause: e.guessRootCauses()) {
messages.append(rootCause.getMessage());
}
assertThat(messages.toString(), containsString("mapper [text] is used by multiple types"));
}
IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> b.get());
assertThat(e.getMessage(), containsString("mapper [text] is used by multiple types"));
}
public void testRestartIndexCreationAfterFullClusterRestart() throws Exception {

View File

@ -58,6 +58,7 @@ import static org.elasticsearch.action.support.WriteRequest.RefreshPolicy.IMMEDI
import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount;
import static org.hamcrest.Matchers.containsString;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.nullValue;
@ -491,7 +492,7 @@ public class GatewayIndexStateIT extends ESIntegTestCase {
assertEquals(ex.getMessage(), "Failed to verify index " + metaData.getIndex());
assertNotNull(ex.getCause());
assertEquals(MapperParsingException.class, ex.getCause().getClass());
assertEquals(ex.getCause().getMessage(), "analyzer [test] not found for field [field1]");
assertThat(ex.getCause().getMessage(), containsString("analyzer [test] not found for field [field1]"));
}
public void testArchiveBrokenClusterSettings() throws Exception {

View File

@ -22,27 +22,17 @@ import org.apache.lucene.spatial.prefix.PrefixTreeStrategy;
import org.apache.lucene.spatial.prefix.RecursivePrefixTreeStrategy;
import org.apache.lucene.spatial.prefix.tree.GeohashPrefixTree;
import org.apache.lucene.spatial.prefix.tree.QuadPrefixTree;
import org.elasticsearch.Version;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.common.compress.CompressedXContent;
import org.elasticsearch.common.geo.GeoUtils;
import org.elasticsearch.common.geo.builders.ShapeBuilder;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.index.mapper.DocumentMapper;
import org.elasticsearch.index.mapper.DocumentMapperParser;
import org.elasticsearch.index.mapper.FieldMapper;
import org.elasticsearch.index.mapper.GeoShapeFieldMapper;
import org.elasticsearch.index.mapper.MapperService;
import org.elasticsearch.plugins.Plugin;
import org.elasticsearch.test.ESSingleNodeTestCase;
import org.elasticsearch.test.InternalSettingsPlugin;
import org.elasticsearch.test.VersionUtils;
import java.io.IOException;
import java.util.Collection;
import static com.carrotsearch.randomizedtesting.RandomizedTest.getRandom;
import static org.hamcrest.Matchers.containsString;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.instanceOf;

View File

@ -49,7 +49,7 @@ public class MapperServiceTests extends ESSingleNodeTestCase {
String index = "test-index";
String type = ".test-type";
String field = "field";
MapperParsingException e = expectThrows(MapperParsingException.class, () -> {
IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> {
client().admin().indices().prepareCreate(index)
.addMapping(type, field, "type=text")
.execute().actionGet();
@ -62,7 +62,7 @@ public class MapperServiceTests extends ESSingleNodeTestCase {
String field = "field";
String type = new String(new char[256]).replace("\0", "a");
MapperParsingException e = expectThrows(MapperParsingException.class, () -> {
MapperException e = expectThrows(MapperException.class, () -> {
client().admin().indices().prepareCreate(index)
.addMapping(type, field, "type=text")
.execute().actionGet();
@ -175,14 +175,14 @@ public class MapperServiceTests extends ESSingleNodeTestCase {
mappings.put(MapperService.DEFAULT_MAPPING, MapperService.parseMapping("{}"));
MapperException e = expectThrows(MapperParsingException.class,
() -> mapperService.merge(mappings, false));
() -> mapperService.merge(mappings, MergeReason.MAPPING_UPDATE, false));
assertThat(e.getMessage(), startsWith("Failed to parse mapping [" + MapperService.DEFAULT_MAPPING + "]: "));
mappings.clear();
mappings.put("type1", MapperService.parseMapping("{}"));
e = expectThrows( MapperParsingException.class,
() -> mapperService.merge(mappings, false));
() -> mapperService.merge(mappings, MergeReason.MAPPING_UPDATE, false));
assertThat(e.getMessage(), startsWith("Failed to parse mapping [type1]: "));
}

View File

@ -19,17 +19,11 @@
package org.elasticsearch.index.mapper;
import org.elasticsearch.Version;
import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsResponse;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.common.compress.CompressedXContent;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.index.IndexService;
import org.elasticsearch.index.mapper.DocumentMapper;
import org.elasticsearch.index.mapper.FieldMapper;
import org.elasticsearch.index.mapper.MapperService;
import org.elasticsearch.index.mapper.MapperService.MergeReason;
import org.elasticsearch.plugins.Plugin;
import org.elasticsearch.test.ESSingleNodeTestCase;
@ -37,9 +31,7 @@ import org.elasticsearch.test.InternalSettingsPlugin;
import java.io.IOException;
import java.util.Collection;
import java.util.LinkedHashMap;
import static org.elasticsearch.test.StreamsUtils.copyToStringFromClasspath;
import static org.hamcrest.CoreMatchers.containsString;
import static org.hamcrest.CoreMatchers.equalTo;

View File

@ -111,14 +111,9 @@ public class ChildQuerySearchIT extends ESIntegTestCase {
}
public void testSelfReferentialIsForbidden() {
try {
prepareCreate("test").addMapping("type", "_parent", "type=type").get();
fail("self referential should be forbidden");
} catch (Exception e) {
Throwable cause = e.getCause();
assertThat(cause, instanceOf(IllegalArgumentException.class));
assertThat(cause.getMessage(), equalTo("The [_parent.type] option can't point to the same type"));
}
IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () ->
prepareCreate("test").addMapping("type", "_parent", "type=type").get());
assertThat(e.getMessage(), equalTo("The [_parent.type] option can't point to the same type"));
}
public void testMultiLevelChild() throws Exception {

View File

@ -332,12 +332,9 @@ public class PercolatorFieldMapperTests extends ESSingleNodeTestCase {
String percolatorMapper = XContentFactory.jsonBuilder().startObject().startObject(typeName)
.startObject("properties").startObject(fieldName).field("type", "percolator").field("index", "no").endObject().endObject()
.endObject().endObject().string();
try {
mapperService.merge(typeName, new CompressedXContent(percolatorMapper), MapperService.MergeReason.MAPPING_UPDATE, true);
fail("MapperParsingException expected");
} catch (MapperParsingException e) {
assertThat(e.getMessage(), equalTo("Mapping definition for [" + fieldName + "] has unsupported parameters: [index : no]"));
}
MapperParsingException e = expectThrows(MapperParsingException.class, () ->
mapperService.merge(typeName, new CompressedXContent(percolatorMapper), MapperService.MergeReason.MAPPING_UPDATE, true));
assertThat(e.getMessage(), containsString("Mapping definition for [" + fieldName + "] has unsupported parameters: [index : no]"));
}
// multiple percolator fields are allowed in the mapping, but only one field can be used at index time.

View File

@ -263,9 +263,7 @@ public abstract class IndexShardTestCase extends ESTestCase {
try {
IndexCache indexCache = new IndexCache(indexSettings, new DisabledQueryCache(indexSettings), null);
MapperService mapperService = MapperTestUtils.newMapperService(createTempDir(), indexSettings.getSettings());
for (ObjectObjectCursor<String, MappingMetaData> typeMapping : indexMetaData.getMappings()) {
mapperService.merge(typeMapping.key, typeMapping.value.source(), MapperService.MergeReason.MAPPING_RECOVERY, true);
}
mapperService.merge(indexMetaData, MapperService.MergeReason.MAPPING_RECOVERY, true);
SimilarityService similarityService = new SimilarityService(indexSettings, Collections.emptyMap());
final IndexEventListener indexEventListener = new IndexEventListener() {
};