Only update DocumentMapper if field type changes (#22165)

Merging mappings ensures that fields are used consistently across mapping types. Disabling norms for a specific field in one mapping type for example also disables norms for the same field in other mapping types of that index. The logic that ensures this while merging mappings currently always creates a fresh document mapper for all existing mapping types, even if no change occurred. Creating such a fresh document mapper does not come for free though as it involves recompressing the source. Making a mapping change to one type of an index with 100 types will thus re-serialize and recompress all 100 types, independent of any changes made to those types.

This commit fixes the update logic to only create a new DocumentMapper if a field type actually changes.
This commit is contained in:
Yannick Welsch 2016-12-15 09:20:28 +01:00 committed by GitHub
parent cdd5fbe3a1
commit b9600c7891
5 changed files with 64 additions and 20 deletions

View File

@ -327,6 +327,11 @@ public class DocumentMapper implements ToXContent {
*/
public DocumentMapper updateFieldType(Map<String, MappedFieldType> fullNameToFieldType) {
Mapping updated = this.mapping.updateFieldType(fullNameToFieldType);
if (updated == this.mapping) {
// no change
return this;
}
assert updated == updated.updateFieldType(fullNameToFieldType) : "updateFieldType operation is not idempotent";
return new DocumentMapper(mapperService, updated);
}

View File

@ -93,7 +93,7 @@ class FieldTypeLookup implements Iterable<MappedFieldType> {
// is the update even legal?
checkCompatibility(type, fieldMapper, updateAllTypes);
if (fieldType != fullNameFieldType) {
if (fieldType.equals(fullNameFieldType) == false) {
fullName = fullName.copyAndPut(fieldType.name(), fieldMapper.fieldType());
}

View File

@ -104,12 +104,22 @@ public final class Mapping implements ToXContent {
* Recursively update sub field types.
*/
public Mapping updateFieldType(Map<String, MappedFieldType> fullNameToFieldType) {
final MetadataFieldMapper[] updatedMeta = Arrays.copyOf(metadataMappers, metadataMappers.length);
for (int i = 0; i < updatedMeta.length; ++i) {
updatedMeta[i] = (MetadataFieldMapper) updatedMeta[i].updateFieldType(fullNameToFieldType);
MetadataFieldMapper[] updatedMeta = null;
for (int i = 0; i < metadataMappers.length; ++i) {
MetadataFieldMapper currentFieldMapper = metadataMappers[i];
MetadataFieldMapper updatedFieldMapper = (MetadataFieldMapper) currentFieldMapper.updateFieldType(fullNameToFieldType);
if (updatedFieldMapper != currentFieldMapper) {
if (updatedMeta == null) {
updatedMeta = Arrays.copyOf(metadataMappers, metadataMappers.length);
}
updatedMeta[i] = updatedFieldMapper;
}
}
RootObjectMapper updatedRoot = root.updateFieldType(fullNameToFieldType);
return new Mapping(indexCreated, updatedRoot, updatedMeta, meta);
if (updatedMeta == null && updatedRoot == root) {
return this;
}
return new Mapping(indexCreated, updatedRoot, updatedMeta == null ? metadataMappers : updatedMeta, meta);
}
@Override

View File

@ -72,11 +72,12 @@ public class FieldTypeLookupTests extends ESTestCase {
MockFieldMapper f = new MockFieldMapper("foo");
MockFieldMapper f2 = new MockFieldMapper("foo");
FieldTypeLookup lookup = new FieldTypeLookup();
lookup = lookup.copyAndAddAll("type1", newList(f), randomBoolean());
FieldTypeLookup lookup2 = lookup.copyAndAddAll("type2", newList(f2), randomBoolean());
lookup = lookup.copyAndAddAll("type1", newList(f), true);
FieldTypeLookup lookup2 = lookup.copyAndAddAll("type2", newList(f2), true);
assertSame(f2.fieldType(), lookup2.get("foo"));
assertEquals(1, size(lookup2.iterator()));
assertSame(f.fieldType(), lookup2.get("foo"));
assertEquals(f2.fieldType(), lookup2.get("foo"));
}
public void testAddExistingIndexName() {

View File

@ -19,16 +19,6 @@
package org.elasticsearch.index.mapper;
import java.io.IOException;
import java.io.UncheckedIOException;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Map;
import java.util.concurrent.ExecutionException;
import java.util.function.Function;
import org.elasticsearch.ExceptionsHelper;
import org.elasticsearch.common.compress.CompressedXContent;
import org.elasticsearch.common.settings.Settings;
@ -39,8 +29,17 @@ import org.elasticsearch.index.mapper.MapperService.MergeReason;
import org.elasticsearch.index.mapper.NumberFieldMapper.NumberFieldType;
import org.elasticsearch.test.ESSingleNodeTestCase;
import java.io.IOException;
import java.io.UncheckedIOException;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Map;
import java.util.concurrent.ExecutionException;
import java.util.function.Function;
import static org.hamcrest.CoreMatchers.containsString;
import static org.hamcrest.Matchers.hasToString;
import static org.hamcrest.Matchers.instanceOf;
import static org.hamcrest.Matchers.startsWith;
@ -169,7 +168,6 @@ public class MapperServiceTests extends ESSingleNodeTestCase {
assertThat(mapperService.unmappedFieldType("string"), instanceOf(KeywordFieldType.class));
}
public void testMergeWithMap() throws Throwable {
IndexService indexService1 = createIndex("index1");
MapperService mapperService = indexService1.mapperService();
@ -187,4 +185,34 @@ public class MapperServiceTests extends ESSingleNodeTestCase {
() -> mapperService.merge(mappings, false));
assertThat(e.getMessage(), startsWith("Failed to parse mapping [type1]: "));
}
public void testOtherDocumentMappersOnlyUpdatedWhenChangingFieldType() throws IOException {
IndexService indexService = createIndex("test");
CompressedXContent simpleMapping = new CompressedXContent(XContentFactory.jsonBuilder().startObject()
.startObject("properties")
.startObject("field")
.field("type", "text")
.endObject()
.endObject().endObject().bytes());
indexService.mapperService().merge("type1", simpleMapping, MergeReason.MAPPING_UPDATE, true);
DocumentMapper documentMapper = indexService.mapperService().documentMapper("type1");
indexService.mapperService().merge("type2", simpleMapping, MergeReason.MAPPING_UPDATE, true);
assertSame(indexService.mapperService().documentMapper("type1"), documentMapper);
CompressedXContent normsDisabledMapping = new CompressedXContent(XContentFactory.jsonBuilder().startObject()
.startObject("properties")
.startObject("field")
.field("type", "text")
.startObject("norms")
.field("enabled", false)
.endObject()
.endObject()
.endObject().endObject().bytes());
indexService.mapperService().merge("type3", normsDisabledMapping, MergeReason.MAPPING_UPDATE, true);
assertNotSame(indexService.mapperService().documentMapper("type1"), documentMapper);
}
}