MetaDataMappingService should call MapperService.merge with the original mapping update.

Currently MetaDataMappingService parses the mapping updates, reserializes it and
finally calls MapperService.merge with the serialized mapping. Given that mapping
serialization only writes differences from the default, this is a bit unfair to
parsers since they can't know whether some option has been explicitly set or not.

Furthermore this can cause bugs with metadata fields given that these fields use
existing field types as defaults.

This commit changes MetaDataMappingService to call MapperService.merge with the
original mapping update.
This commit is contained in:
Adrien Grand 2015-12-17 13:03:08 +01:00
parent 6ccc759691
commit 86d6e28b7f
2 changed files with 72 additions and 74 deletions

View File

@ -236,8 +236,8 @@ public class MetaDataMappingService extends AbstractComponent {
}
private ClusterState applyRequest(ClusterState currentState, PutMappingClusterStateUpdateRequest request) throws IOException {
Map<String, DocumentMapper> newMappers = new HashMap<>();
Map<String, DocumentMapper> existingMappers = new HashMap<>();
String mappingType = request.type();
CompressedXContent mappingUpdateSource = new CompressedXContent(request.source());
for (String index : request.indices()) {
IndexService indexService = indicesService.indexServiceSafe(index);
// try and parse it (no need to add it here) so we can bail early in case of parsing exception
@ -245,9 +245,9 @@ public class MetaDataMappingService extends AbstractComponent {
DocumentMapper existingMapper = indexService.mapperService().documentMapper(request.type());
if (MapperService.DEFAULT_MAPPING.equals(request.type())) {
// _default_ types do not go through merging, but we do test the new settings. Also don't apply the old default
newMapper = indexService.mapperService().parse(request.type(), new CompressedXContent(request.source()), false);
newMapper = indexService.mapperService().parse(request.type(), mappingUpdateSource, false);
} else {
newMapper = indexService.mapperService().parse(request.type(), new CompressedXContent(request.source()), existingMapper == null);
newMapper = indexService.mapperService().parse(request.type(), mappingUpdateSource, existingMapper == null);
if (existingMapper != null) {
// first, simulate
// this will just throw exceptions in case of problems
@ -270,36 +270,31 @@ public class MetaDataMappingService extends AbstractComponent {
}
}
}
newMappers.put(index, newMapper);
if (existingMapper != null) {
existingMappers.put(index, existingMapper);
if (mappingType == null) {
mappingType = newMapper.type();
} else if (mappingType.equals(newMapper.type()) == false) {
throw new InvalidTypeNameException("Type name provided does not match type name within mapping definition");
}
}
assert mappingType != null;
String mappingType = request.type();
if (mappingType == null) {
mappingType = newMappers.values().iterator().next().type();
} else if (!mappingType.equals(newMappers.values().iterator().next().type())) {
throw new InvalidTypeNameException("Type name provided does not match type name within mapping definition");
}
if (!MapperService.DEFAULT_MAPPING.equals(mappingType) && !PercolatorService.TYPE_NAME.equals(mappingType) && mappingType.charAt(0) == '_') {
throw new InvalidTypeNameException("Document mapping type name can't start with '_'");
}
final Map<String, MappingMetaData> mappings = new HashMap<>();
for (Map.Entry<String, DocumentMapper> entry : newMappers.entrySet()) {
String index = entry.getKey();
for (String index : request.indices()) {
// do the actual merge here on the master, and update the mapping source
DocumentMapper newMapper = entry.getValue();
IndexService indexService = indicesService.indexService(index);
if (indexService == null) {
continue;
}
CompressedXContent existingSource = null;
if (existingMappers.containsKey(entry.getKey())) {
existingSource = existingMappers.get(entry.getKey()).mappingSource();
DocumentMapper existingMapper = indexService.mapperService().documentMapper(mappingType);
if (existingMapper != null) {
existingSource = existingMapper.mappingSource();
}
DocumentMapper mergedMapper = indexService.mapperService().merge(newMapper.type(), newMapper.mappingSource(), false, request.updateAllTypes());
DocumentMapper mergedMapper = indexService.mapperService().merge(mappingType, mappingUpdateSource, true, request.updateAllTypes());
CompressedXContent updatedSource = mergedMapper.mappingSource();
if (existingSource != null) {
@ -318,9 +313,9 @@ public class MetaDataMappingService extends AbstractComponent {
} else {
mappings.put(index, new MappingMetaData(mergedMapper));
if (logger.isDebugEnabled()) {
logger.debug("[{}] create_mapping [{}] with source [{}]", index, newMapper.type(), updatedSource);
logger.debug("[{}] create_mapping [{}] with source [{}]", index, mappingType, updatedSource);
} else if (logger.isInfoEnabled()) {
logger.info("[{}] create_mapping [{}]", index, newMapper.type());
logger.info("[{}] create_mapping [{}]", index, mappingType);
}
}
}

View File

@ -198,6 +198,7 @@ public class MapperService extends AbstractIndexComponent implements Closeable {
public DocumentMapper merge(String type, CompressedXContent mappingSource, boolean applyDefault, boolean updateAllTypes) {
if (DEFAULT_MAPPING.equals(type)) {
// verify we can parse it
// NOTE: never apply the default here
DocumentMapper mapper = documentParser.parseCompressed(type, mappingSource);
// still add it as a document mapper so we have it registered and, for example, persisted back into
// the cluster meta data if needed, or checked for existence
@ -211,68 +212,70 @@ public class MapperService extends AbstractIndexComponent implements Closeable {
}
return mapper;
} else {
return merge(parse(type, mappingSource, applyDefault), updateAllTypes);
try (ReleasableLock lock = mappingWriteLock.acquire()) {
// only apply the default mapping if we don't have the type yet
applyDefault &= mappers.containsKey(type) == false;
return merge(parse(type, mappingSource, applyDefault), updateAllTypes);
}
}
}
// never expose this to the outside world, we need to reparse the doc mapper so we get fresh
// instances of field mappers to properly remove existing doc mapper
private DocumentMapper merge(DocumentMapper mapper, boolean updateAllTypes) {
try (ReleasableLock lock = mappingWriteLock.acquire()) {
if (mapper.type().length() == 0) {
throw new InvalidTypeNameException("mapping type name is empty");
}
if (indexSettings.getIndexVersionCreated().onOrAfter(Version.V_2_0_0_beta1) && mapper.type().length() > 255) {
throw new InvalidTypeNameException("mapping type name [" + mapper.type() + "] is too long; limit is length 255 but was [" + mapper.type().length() + "]");
}
if (mapper.type().charAt(0) == '_') {
throw new InvalidTypeNameException("mapping type name [" + mapper.type() + "] can't start with '_'");
}
if (mapper.type().contains("#")) {
throw new InvalidTypeNameException("mapping type name [" + mapper.type() + "] should not include '#' in it");
}
if (mapper.type().contains(",")) {
throw new InvalidTypeNameException("mapping type name [" + mapper.type() + "] should not include ',' in it");
}
if (mapper.type().equals(mapper.parentFieldMapper().type())) {
throw new IllegalArgumentException("The [_parent.type] option can't point to the same type");
}
if (typeNameStartsWithIllegalDot(mapper)) {
if (indexSettings.getIndexVersionCreated().onOrAfter(Version.V_2_0_0_beta1)) {
throw new IllegalArgumentException("mapping type name [" + mapper.type() + "] must not start with a '.'");
} else {
logger.warn("Type [{}] starts with a '.', it is recommended not to start a type name with a '.'", mapper.type());
}
}
// we can add new field/object mappers while the old ones are there
// since we get new instances of those, and when we remove, we remove
// by instance equality
DocumentMapper oldMapper = mappers.get(mapper.type());
if (oldMapper != null) {
oldMapper.merge(mapper.mapping(), false, updateAllTypes);
return oldMapper;
if (mapper.type().length() == 0) {
throw new InvalidTypeNameException("mapping type name is empty");
}
if (indexSettings.getIndexVersionCreated().onOrAfter(Version.V_2_0_0_beta1) && mapper.type().length() > 255) {
throw new InvalidTypeNameException("mapping type name [" + mapper.type() + "] is too long; limit is length 255 but was [" + mapper.type().length() + "]");
}
if (mapper.type().charAt(0) == '_') {
throw new InvalidTypeNameException("mapping type name [" + mapper.type() + "] can't start with '_'");
}
if (mapper.type().contains("#")) {
throw new InvalidTypeNameException("mapping type name [" + mapper.type() + "] should not include '#' in it");
}
if (mapper.type().contains(",")) {
throw new InvalidTypeNameException("mapping type name [" + mapper.type() + "] should not include ',' in it");
}
if (mapper.type().equals(mapper.parentFieldMapper().type())) {
throw new IllegalArgumentException("The [_parent.type] option can't point to the same type");
}
if (typeNameStartsWithIllegalDot(mapper)) {
if (indexSettings.getIndexVersionCreated().onOrAfter(Version.V_2_0_0_beta1)) {
throw new IllegalArgumentException("mapping type name [" + mapper.type() + "] must not start with a '.'");
} else {
Tuple<Collection<ObjectMapper>, Collection<FieldMapper>> newMappers = checkMappersCompatibility(
mapper.type(), mapper.mapping(), updateAllTypes);
Collection<ObjectMapper> newObjectMappers = newMappers.v1();
Collection<FieldMapper> newFieldMappers = newMappers.v2();
addMappers(mapper.type(), newObjectMappers, newFieldMappers);
for (DocumentTypeListener typeListener : typeListeners) {
typeListener.beforeCreate(mapper);
}
mappers = newMapBuilder(mappers).put(mapper.type(), mapper).map();
if (mapper.parentFieldMapper().active()) {
Set<String> newParentTypes = new HashSet<>(parentTypes.size() + 1);
newParentTypes.addAll(parentTypes);
newParentTypes.add(mapper.parentFieldMapper().type());
parentTypes = unmodifiableSet(newParentTypes);
}
assert assertSerialization(mapper);
return mapper;
logger.warn("Type [{}] starts with a '.', it is recommended not to start a type name with a '.'", mapper.type());
}
}
// we can add new field/object mappers while the old ones are there
// since we get new instances of those, and when we remove, we remove
// by instance equality
DocumentMapper oldMapper = mappers.get(mapper.type());
if (oldMapper != null) {
oldMapper.merge(mapper.mapping(), false, updateAllTypes);
return oldMapper;
} else {
Tuple<Collection<ObjectMapper>, Collection<FieldMapper>> newMappers = checkMappersCompatibility(
mapper.type(), mapper.mapping(), updateAllTypes);
Collection<ObjectMapper> newObjectMappers = newMappers.v1();
Collection<FieldMapper> newFieldMappers = newMappers.v2();
addMappers(mapper.type(), newObjectMappers, newFieldMappers);
for (DocumentTypeListener typeListener : typeListeners) {
typeListener.beforeCreate(mapper);
}
mappers = newMapBuilder(mappers).put(mapper.type(), mapper).map();
if (mapper.parentFieldMapper().active()) {
Set<String> newParentTypes = new HashSet<>(parentTypes.size() + 1);
newParentTypes.addAll(parentTypes);
newParentTypes.add(mapper.parentFieldMapper().type());
parentTypes = unmodifiableSet(newParentTypes);
}
assert assertSerialization(mapper);
return mapper;
}
}
private boolean typeNameStartsWithIllegalDot(DocumentMapper mapper) {