Atomic mapping updates across types (#22220)

This commit makes mapping updates atomic when multiple types in an index are updated. Mappings for an index are now applied in a single atomic operation, which also allows to optimize some of the cross-type updates and checks.
This commit is contained in:
Yannick Welsch 2016-12-19 14:39:50 +01:00 committed by GitHub
parent 1cabf66bd5
commit 63af03a104
17 changed files with 256 additions and 224 deletions

View File

@ -67,6 +67,7 @@ import org.elasticsearch.index.IndexService;
import org.elasticsearch.index.mapper.DocumentMapper; import org.elasticsearch.index.mapper.DocumentMapper;
import org.elasticsearch.index.mapper.MapperParsingException; import org.elasticsearch.index.mapper.MapperParsingException;
import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.MapperService;
import org.elasticsearch.index.mapper.MapperService.MergeReason;
import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.index.query.QueryShardContext;
import org.elasticsearch.indices.IndexCreationException; import org.elasticsearch.indices.IndexCreationException;
import org.elasticsearch.indices.IndicesService; import org.elasticsearch.indices.IndicesService;
@ -356,10 +357,10 @@ public class MetaDataCreateIndexService extends AbstractComponent {
// now add the mappings // now add the mappings
MapperService mapperService = indexService.mapperService(); MapperService mapperService = indexService.mapperService();
try { try {
mapperService.merge(mappings, request.updateAllTypes()); mapperService.merge(mappings, MergeReason.MAPPING_UPDATE, request.updateAllTypes());
} catch (MapperParsingException mpe) { } catch (Exception e) {
removalExtraInfo = "failed on parsing default mapping/mappings on index creation"; removalExtraInfo = "failed on parsing default mapping/mappings on index creation";
throw mpe; throw e;
} }
// the context is only used for validation so it's fine to pass fake values for the shard id and the current // the context is only used for validation so it's fine to pass fake values for the shard id and the current

View File

@ -141,15 +141,11 @@ public class MetaDataIndexAliasesService extends AbstractComponent {
// temporarily create the index and add mappings so we can parse the filter // temporarily create the index and add mappings so we can parse the filter
try { try {
indexService = indicesService.createIndex(index, emptyList(), shardId -> {}); indexService = indicesService.createIndex(index, emptyList(), shardId -> {});
indicesToClose.add(index.getIndex());
} catch (IOException e) { } catch (IOException e) {
throw new ElasticsearchException("Failed to create temporary index for parsing the alias", e); throw new ElasticsearchException("Failed to create temporary index for parsing the alias", e);
} }
for (ObjectCursor<MappingMetaData> cursor : index.getMappings().values()) { indexService.mapperService().merge(index, MapperService.MergeReason.MAPPING_RECOVERY, false);
MappingMetaData mappingMetaData = cursor.value;
indexService.mapperService().merge(mappingMetaData.type(), mappingMetaData.source(),
MapperService.MergeReason.MAPPING_RECOVERY, false);
}
indicesToClose.add(index.getIndex());
} }
indices.put(action.getIndex(), indexService); indices.put(action.getIndex(), indexService);
} }

View File

@ -39,6 +39,7 @@ import org.elasticsearch.index.Index;
import org.elasticsearch.index.IndexService; import org.elasticsearch.index.IndexService;
import org.elasticsearch.index.mapper.MapperParsingException; import org.elasticsearch.index.mapper.MapperParsingException;
import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.MapperService;
import org.elasticsearch.index.mapper.MapperService.MergeReason;
import org.elasticsearch.indices.IndexTemplateMissingException; import org.elasticsearch.indices.IndexTemplateMissingException;
import org.elasticsearch.indices.IndicesService; import org.elasticsearch.indices.IndicesService;
import org.elasticsearch.indices.InvalidIndexTemplateException; import org.elasticsearch.indices.InvalidIndexTemplateException;
@ -222,7 +223,7 @@ public class MetaDataIndexTemplateService extends AbstractComponent {
mappingsForValidation.put(entry.getKey(), MapperService.parseMapping(entry.getValue())); mappingsForValidation.put(entry.getKey(), MapperService.parseMapping(entry.getValue()));
} }
dummyIndexService.mapperService().merge(mappingsForValidation, false); dummyIndexService.mapperService().merge(mappingsForValidation, MergeReason.MAPPING_UPDATE, false);
} finally { } finally {
if (createdIndex != null) { if (createdIndex != null) {

View File

@ -147,10 +147,7 @@ public class MetaDataIndexUpgradeService extends AbstractComponent {
}; };
try (IndexAnalyzers fakeIndexAnalzyers = new IndexAnalyzers(indexSettings, fakeDefault, fakeDefault, fakeDefault, analyzerMap)) { try (IndexAnalyzers fakeIndexAnalzyers = new IndexAnalyzers(indexSettings, fakeDefault, fakeDefault, fakeDefault, analyzerMap)) {
MapperService mapperService = new MapperService(indexSettings, fakeIndexAnalzyers, similarityService, mapperRegistry, () -> null); MapperService mapperService = new MapperService(indexSettings, fakeIndexAnalzyers, similarityService, mapperRegistry, () -> null);
for (ObjectCursor<MappingMetaData> cursor : indexMetaData.getMappings().values()) { mapperService.merge(indexMetaData, MapperService.MergeReason.MAPPING_RECOVERY, false);
MappingMetaData mappingMetaData = cursor.value;
mapperService.merge(mappingMetaData.type(), mappingMetaData.source(), MapperService.MergeReason.MAPPING_RECOVERY, false);
}
} }
} catch (Exception ex) { } catch (Exception ex) {
// Wrap the inner exception so we have the index name in the exception message // Wrap the inner exception so we have the index name in the exception message

View File

@ -43,6 +43,7 @@ import org.elasticsearch.index.Index;
import org.elasticsearch.index.IndexService; import org.elasticsearch.index.IndexService;
import org.elasticsearch.index.mapper.DocumentMapper; import org.elasticsearch.index.mapper.DocumentMapper;
import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.MapperService;
import org.elasticsearch.index.mapper.MapperService.MergeReason;
import org.elasticsearch.indices.IndicesService; import org.elasticsearch.indices.IndicesService;
import org.elasticsearch.indices.InvalidTypeNameException; import org.elasticsearch.indices.InvalidTypeNameException;
@ -146,10 +147,7 @@ public class MetaDataMappingService extends AbstractComponent {
// we need to create the index here, and add the current mapping to it, so we can merge // we need to create the index here, and add the current mapping to it, so we can merge
indexService = indicesService.createIndex(indexMetaData, Collections.emptyList(), shardId -> {}); indexService = indicesService.createIndex(indexMetaData, Collections.emptyList(), shardId -> {});
removeIndex = true; removeIndex = true;
for (ObjectCursor<MappingMetaData> metaData : indexMetaData.getMappings().values()) { indexService.mapperService().merge(indexMetaData, MergeReason.MAPPING_RECOVERY, true);
// don't apply the default mapping, it has been applied when the mapping was created
indexService.mapperService().merge(metaData.value.type(), metaData.value.source(), MapperService.MergeReason.MAPPING_RECOVERY, true);
}
} }
IndexMetaData.Builder builder = IndexMetaData.builder(indexMetaData); IndexMetaData.Builder builder = IndexMetaData.builder(indexMetaData);
@ -226,10 +224,7 @@ public class MetaDataMappingService extends AbstractComponent {
MapperService mapperService = indicesService.createIndexMapperService(indexMetaData); MapperService mapperService = indicesService.createIndexMapperService(indexMetaData);
indexMapperServices.put(index, mapperService); indexMapperServices.put(index, mapperService);
// add mappings for all types, we need them for cross-type validation // add mappings for all types, we need them for cross-type validation
for (ObjectCursor<MappingMetaData> mapping : indexMetaData.getMappings().values()) { mapperService.merge(indexMetaData, MergeReason.MAPPING_RECOVERY, request.updateAllTypes());
mapperService.merge(mapping.value.type(), mapping.value.source(),
MapperService.MergeReason.MAPPING_RECOVERY, request.updateAllTypes());
}
} }
} }
currentState = applyRequest(currentState, request, indexMapperServices); currentState = applyRequest(currentState, request, indexMapperServices);
@ -313,7 +308,7 @@ public class MetaDataMappingService extends AbstractComponent {
if (existingMapper != null) { if (existingMapper != null) {
existingSource = existingMapper.mappingSource(); existingSource = existingMapper.mappingSource();
} }
DocumentMapper mergedMapper = mapperService.merge(mappingType, mappingUpdateSource, MapperService.MergeReason.MAPPING_UPDATE, request.updateAllTypes()); DocumentMapper mergedMapper = mapperService.merge(mappingType, mappingUpdateSource, MergeReason.MAPPING_UPDATE, request.updateAllTypes());
CompressedXContent updatedSource = mergedMapper.mappingSource(); CompressedXContent updatedSource = mergedMapper.mappingSource();
if (existingSource != null) { if (existingSource != null) {

View File

@ -28,6 +28,7 @@ import org.elasticsearch.ElasticsearchGenerationException;
import org.elasticsearch.Version; import org.elasticsearch.Version;
import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.metadata.MappingMetaData; import org.elasticsearch.cluster.metadata.MappingMetaData;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.common.compress.CompressedXContent;
import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.regex.Regex;
import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting;
@ -51,6 +52,7 @@ import java.util.Collection;
import java.util.Collections; import java.util.Collections;
import java.util.HashMap; import java.util.HashMap;
import java.util.HashSet; import java.util.HashSet;
import java.util.LinkedHashMap;
import java.util.List; import java.util.List;
import java.util.Map; import java.util.Map;
import java.util.Set; import java.util.Set;
@ -61,7 +63,6 @@ import java.util.stream.Collectors;
import static java.util.Collections.emptyMap; import static java.util.Collections.emptyMap;
import static java.util.Collections.emptySet; import static java.util.Collections.emptySet;
import static java.util.Collections.unmodifiableMap; import static java.util.Collections.unmodifiableMap;
import static org.elasticsearch.common.collect.MapBuilder.newMapBuilder;
public class MapperService extends AbstractIndexComponent implements Closeable { public class MapperService extends AbstractIndexComponent implements Closeable {
@ -191,153 +192,235 @@ public class MapperService extends AbstractIndexComponent implements Closeable {
} }
} }
/**
* Update mapping by only merging the metadata that is different between received and stored entries
*/
public boolean updateMapping(IndexMetaData indexMetaData) throws IOException { public boolean updateMapping(IndexMetaData indexMetaData) throws IOException {
assert indexMetaData.getIndex().equals(index()) : "index mismatch: expected " + index() + " but was " + indexMetaData.getIndex(); assert indexMetaData.getIndex().equals(index()) : "index mismatch: expected " + index() + " but was " + indexMetaData.getIndex();
// go over and add the relevant mappings (or update them) // go over and add the relevant mappings (or update them)
final Set<String> existingMappers = new HashSet<>(mappers.keySet());
final Map<String, DocumentMapper> updatedEntries;
try {
// only update entries if needed
updatedEntries = internalMerge(indexMetaData, MergeReason.MAPPING_RECOVERY, true, true);
} catch (Exception e) {
logger.warn((org.apache.logging.log4j.util.Supplier<?>) () -> new ParameterizedMessage("[{}] failed to apply mappings", index()), e);
throw e;
}
boolean requireRefresh = false; boolean requireRefresh = false;
for (ObjectCursor<MappingMetaData> cursor : indexMetaData.getMappings().values()) {
MappingMetaData mappingMd = cursor.value; for (DocumentMapper documentMapper : updatedEntries.values()) {
String mappingType = mappingMd.type(); String mappingType = documentMapper.type();
CompressedXContent mappingSource = mappingMd.source(); CompressedXContent incomingMappingSource = indexMetaData.mapping(mappingType).source();
String op = existingMappers.contains(mappingType) ? "updated" : "added";
if (logger.isDebugEnabled() && incomingMappingSource.compressed().length < 512) {
logger.debug("[{}] {} mapping [{}], source [{}]", index(), op, mappingType, incomingMappingSource.string());
} else if (logger.isTraceEnabled()) {
logger.trace("[{}] {} mapping [{}], source [{}]", index(), op, mappingType, incomingMappingSource.string());
} else {
logger.debug("[{}] {} mapping [{}] (source suppressed due to length, use TRACE level if needed)", index(), op, mappingType);
}
// refresh mapping can happen when the parsing/merging of the mapping from the metadata doesn't result in the same // refresh mapping can happen when the parsing/merging of the mapping from the metadata doesn't result in the same
// mapping, in this case, we send to the master to refresh its own version of the mappings (to conform with the // mapping, in this case, we send to the master to refresh its own version of the mappings (to conform with the
// merge version of it, which it does when refreshing the mappings), and warn log it. // merge version of it, which it does when refreshing the mappings), and warn log it.
try { if (documentMapper(mappingType).mappingSource().equals(incomingMappingSource) == false) {
DocumentMapper existingMapper = documentMapper(mappingType); logger.debug("[{}] parsed mapping [{}], and got different sources\noriginal:\n{}\nparsed:\n{}", index(), mappingType,
incomingMappingSource, documentMapper(mappingType).mappingSource());
if (existingMapper == null || mappingSource.equals(existingMapper.mappingSource()) == false) { requireRefresh = true;
String op = existingMapper == null ? "adding" : "updating";
if (logger.isDebugEnabled() && mappingSource.compressed().length < 512) {
logger.debug("[{}] {} mapping [{}], source [{}]", index(), op, mappingType, mappingSource.string());
} else if (logger.isTraceEnabled()) {
logger.trace("[{}] {} mapping [{}], source [{}]", index(), op, mappingType, mappingSource.string());
} else {
logger.debug("[{}] {} mapping [{}] (source suppressed due to length, use TRACE level if needed)", index(), op,
mappingType);
}
merge(mappingType, mappingSource, MergeReason.MAPPING_RECOVERY, true);
if (!documentMapper(mappingType).mappingSource().equals(mappingSource)) {
logger.debug("[{}] parsed mapping [{}], and got different sources\noriginal:\n{}\nparsed:\n{}", index(),
mappingType, mappingSource, documentMapper(mappingType).mappingSource());
requireRefresh = true;
}
}
} catch (Exception e) {
logger.warn(
(org.apache.logging.log4j.util.Supplier<?>)
() -> new ParameterizedMessage("[{}] failed to add mapping [{}], source [{}]", index(), mappingType, mappingSource),
e);
throw e;
} }
} }
return requireRefresh; return requireRefresh;
} }
//TODO: make this atomic public void merge(Map<String, Map<String, Object>> mappings, MergeReason reason, boolean updateAllTypes) {
public void merge(Map<String, Map<String, Object>> mappings, boolean updateAllTypes) throws MapperParsingException { Map<String, CompressedXContent> mappingSourcesCompressed = new LinkedHashMap<>(mappings.size());
// first, add the default mapping
if (mappings.containsKey(DEFAULT_MAPPING)) {
try {
this.merge(DEFAULT_MAPPING, new CompressedXContent(XContentFactory.jsonBuilder().map(mappings.get(DEFAULT_MAPPING)).string()), MergeReason.MAPPING_UPDATE, updateAllTypes);
} catch (Exception e) {
throw new MapperParsingException("Failed to parse mapping [{}]: {}", e, DEFAULT_MAPPING, e.getMessage());
}
}
for (Map.Entry<String, Map<String, Object>> entry : mappings.entrySet()) { for (Map.Entry<String, Map<String, Object>> entry : mappings.entrySet()) {
if (entry.getKey().equals(DEFAULT_MAPPING)) {
continue;
}
try { try {
// apply the default here, its the first time we parse it mappingSourcesCompressed.put(entry.getKey(), new CompressedXContent(XContentFactory.jsonBuilder().map(entry.getValue()).string()));
this.merge(entry.getKey(), new CompressedXContent(XContentFactory.jsonBuilder().map(entry.getValue()).string()), MergeReason.MAPPING_UPDATE, updateAllTypes);
} catch (Exception e) { } catch (Exception e) {
throw new MapperParsingException("Failed to parse mapping [{}]: {}", e, entry.getKey(), e.getMessage()); throw new MapperParsingException("Failed to parse mapping [{}]: {}", e, entry.getKey(), e.getMessage());
} }
} }
internalMerge(mappingSourcesCompressed, reason, updateAllTypes);
}
public void merge(IndexMetaData indexMetaData, MergeReason reason, boolean updateAllTypes) {
internalMerge(indexMetaData, reason, updateAllTypes, false);
} }
public DocumentMapper merge(String type, CompressedXContent mappingSource, MergeReason reason, boolean updateAllTypes) { public DocumentMapper merge(String type, CompressedXContent mappingSource, MergeReason reason, boolean updateAllTypes) {
if (DEFAULT_MAPPING.equals(type)) { return internalMerge(Collections.singletonMap(type, mappingSource), reason, updateAllTypes).get(type);
}
private synchronized Map<String, DocumentMapper> internalMerge(IndexMetaData indexMetaData, MergeReason reason, boolean updateAllTypes,
boolean onlyUpdateIfNeeded) {
Map<String, CompressedXContent> map = new LinkedHashMap<>();
for (ObjectCursor<MappingMetaData> cursor : indexMetaData.getMappings().values()) {
MappingMetaData mappingMetaData = cursor.value;
if (onlyUpdateIfNeeded) {
DocumentMapper existingMapper = documentMapper(mappingMetaData.type());
if (existingMapper == null || mappingMetaData.source().equals(existingMapper.mappingSource()) == false) {
map.put(mappingMetaData.type(), mappingMetaData.source());
}
} else {
map.put(mappingMetaData.type(), mappingMetaData.source());
}
}
return internalMerge(map, reason, updateAllTypes);
}
private synchronized Map<String, DocumentMapper> internalMerge(Map<String, CompressedXContent> mappings, MergeReason reason, boolean updateAllTypes) {
DocumentMapper defaultMapper = null;
String defaultMappingSource = null;
if (mappings.containsKey(DEFAULT_MAPPING)) {
// verify we can parse it // verify we can parse it
// NOTE: never apply the default here // NOTE: never apply the default here
DocumentMapper mapper = documentParser.parse(type, mappingSource); try {
// still add it as a document mapper so we have it registered and, for example, persisted back into defaultMapper = documentParser.parse(DEFAULT_MAPPING, mappings.get(DEFAULT_MAPPING));
// the cluster meta data if needed, or checked for existence } catch (Exception e) {
synchronized (this) { throw new MapperParsingException("Failed to parse mapping [{}]: {}", e, DEFAULT_MAPPING, e.getMessage());
mappers = newMapBuilder(mappers).put(type, mapper).map();
} }
try { try {
defaultMappingSource = mappingSource.string(); defaultMappingSource = mappings.get(DEFAULT_MAPPING).string();
} catch (IOException e) { } catch (IOException e) {
throw new ElasticsearchGenerationException("failed to un-compress", e); throw new ElasticsearchGenerationException("failed to un-compress", e);
} }
return mapper; }
final String defaultMappingSourceOrLastStored;
if (defaultMappingSource != null) {
defaultMappingSourceOrLastStored = defaultMappingSource;
} else { } else {
synchronized (this) { defaultMappingSourceOrLastStored = this.defaultMappingSource;
final boolean applyDefault = }
// the default was already applied if we are recovering
reason != MergeReason.MAPPING_RECOVERY List<DocumentMapper> documentMappers = new ArrayList<>();
// only apply the default mapping if we don't have the type yet for (Map.Entry<String, CompressedXContent> entry : mappings.entrySet()) {
&& mappers.containsKey(type) == false; String type = entry.getKey();
DocumentMapper mergeWith = parse(type, mappingSource, applyDefault); if (type.equals(DEFAULT_MAPPING)) {
return merge(mergeWith, reason, updateAllTypes); continue;
}
final boolean applyDefault =
// the default was already applied if we are recovering
reason != MergeReason.MAPPING_RECOVERY
// only apply the default mapping if we don't have the type yet
&& mappers.containsKey(type) == false;
try {
DocumentMapper documentMapper = documentParser.parse(type, entry.getValue(), applyDefault ? defaultMappingSourceOrLastStored : null);
documentMappers.add(documentMapper);
} catch (Exception e) {
throw new MapperParsingException("Failed to parse mapping [{}]: {}", e, entry.getKey(), e.getMessage());
} }
} }
return internalMerge(defaultMapper, defaultMappingSource, documentMappers, reason, updateAllTypes);
} }
private synchronized DocumentMapper merge(DocumentMapper mapper, MergeReason reason, boolean updateAllTypes) { private synchronized Map<String, DocumentMapper> internalMerge(@Nullable DocumentMapper defaultMapper, @Nullable String defaultMappingSource,
if (mapper.type().length() == 0) { List<DocumentMapper> documentMappers, MergeReason reason, boolean updateAllTypes) {
throw new InvalidTypeNameException("mapping type name is empty");
}
if (mapper.type().length() > 255) {
throw new InvalidTypeNameException("mapping type name [" + mapper.type() + "] is too long; limit is length 255 but was [" + mapper.type().length() + "]");
}
if (mapper.type().charAt(0) == '_') {
throw new InvalidTypeNameException("mapping type name [" + mapper.type() + "] can't start with '_'");
}
if (mapper.type().contains("#")) {
throw new InvalidTypeNameException("mapping type name [" + mapper.type() + "] should not include '#' in it");
}
if (mapper.type().contains(",")) {
throw new InvalidTypeNameException("mapping type name [" + mapper.type() + "] should not include ',' in it");
}
if (mapper.type().equals(mapper.parentFieldMapper().type())) {
throw new IllegalArgumentException("The [_parent.type] option can't point to the same type");
}
if (typeNameStartsWithIllegalDot(mapper)) {
throw new IllegalArgumentException("mapping type name [" + mapper.type() + "] must not start with a '.'");
}
// 1. compute the merged DocumentMapper
DocumentMapper oldMapper = mappers.get(mapper.type());
DocumentMapper newMapper;
if (oldMapper != null) {
newMapper = oldMapper.merge(mapper.mapping(), updateAllTypes);
} else {
newMapper = mapper;
}
// 2. check basic sanity of the new mapping
List<ObjectMapper> objectMappers = new ArrayList<>();
List<FieldMapper> fieldMappers = new ArrayList<>();
Collections.addAll(fieldMappers, newMapper.mapping().metadataMappers);
MapperUtils.collect(newMapper.mapping().root(), objectMappers, fieldMappers);
checkFieldUniqueness(newMapper.type(), objectMappers, fieldMappers);
checkObjectsCompatibility(objectMappers, updateAllTypes);
// 3. update lookup data-structures
// this will in particular make sure that the merged fields are compatible with other types
FieldTypeLookup fieldTypes = this.fieldTypes.copyAndAddAll(newMapper.type(), fieldMappers, updateAllTypes);
boolean hasNested = this.hasNested; boolean hasNested = this.hasNested;
Map<String, ObjectMapper> fullPathObjectMappers = new HashMap<>(this.fullPathObjectMappers); boolean allEnabled = this.allEnabled;
for (ObjectMapper objectMapper : objectMappers) { Map<String, ObjectMapper> fullPathObjectMappers = this.fullPathObjectMappers;
fullPathObjectMappers.put(objectMapper.fullPath(), objectMapper); FieldTypeLookup fieldTypes = this.fieldTypes;
if (objectMapper.nested().isNested()) { Set<String> parentTypes = this.parentTypes;
hasNested = true; Map<String, DocumentMapper> mappers = new HashMap<>(this.mappers);
}
Map<String, DocumentMapper> results = new LinkedHashMap<>(documentMappers.size() + 1);
if (defaultMapper != null) {
assert defaultMapper.type().equals(DEFAULT_MAPPING);
mappers.put(DEFAULT_MAPPING, defaultMapper);
results.put(DEFAULT_MAPPING, defaultMapper);
}
for (DocumentMapper mapper : documentMappers) {
// check naming
if (mapper.type().length() == 0) {
throw new InvalidTypeNameException("mapping type name is empty");
}
if (mapper.type().length() > 255) {
throw new InvalidTypeNameException("mapping type name [" + mapper.type() + "] is too long; limit is length 255 but was [" + mapper.type().length() + "]");
}
if (mapper.type().charAt(0) == '_') {
throw new InvalidTypeNameException("mapping type name [" + mapper.type() + "] can't start with '_'");
}
if (mapper.type().contains("#")) {
throw new InvalidTypeNameException("mapping type name [" + mapper.type() + "] should not include '#' in it");
}
if (mapper.type().contains(",")) {
throw new InvalidTypeNameException("mapping type name [" + mapper.type() + "] should not include ',' in it");
}
if (mapper.type().equals(mapper.parentFieldMapper().type())) {
throw new IllegalArgumentException("The [_parent.type] option can't point to the same type");
}
if (typeNameStartsWithIllegalDot(mapper)) {
throw new IllegalArgumentException("mapping type name [" + mapper.type() + "] must not start with a '.'");
}
// compute the merged DocumentMapper
DocumentMapper oldMapper = mappers.get(mapper.type());
DocumentMapper newMapper;
if (oldMapper != null) {
newMapper = oldMapper.merge(mapper.mapping(), updateAllTypes);
} else {
newMapper = mapper;
}
// check basic sanity of the new mapping
List<ObjectMapper> objectMappers = new ArrayList<>();
List<FieldMapper> fieldMappers = new ArrayList<>();
Collections.addAll(fieldMappers, newMapper.mapping().metadataMappers);
MapperUtils.collect(newMapper.mapping().root(), objectMappers, fieldMappers);
checkFieldUniqueness(newMapper.type(), objectMappers, fieldMappers, fullPathObjectMappers, fieldTypes);
checkObjectsCompatibility(objectMappers, updateAllTypes, fullPathObjectMappers);
// update lookup data-structures
// this will in particular make sure that the merged fields are compatible with other types
fieldTypes = fieldTypes.copyAndAddAll(newMapper.type(), fieldMappers, updateAllTypes);
for (ObjectMapper objectMapper : objectMappers) {
if (fullPathObjectMappers == this.fullPathObjectMappers) {
fullPathObjectMappers = new HashMap<>(this.fullPathObjectMappers);
}
fullPathObjectMappers.put(objectMapper.fullPath(), objectMapper);
if (objectMapper.nested().isNested()) {
hasNested = true;
}
}
if (reason == MergeReason.MAPPING_UPDATE) {
// this check will only be performed on the master node when there is
// a call to the update mapping API. For all other cases like
// the master node restoring mappings from disk or data nodes
// deserializing cluster state that was sent by the master node,
// this check will be skipped.
checkTotalFieldsLimit(objectMappers.size() + fieldMappers.size());
}
if (oldMapper == null && newMapper.parentFieldMapper().active()) {
if (parentTypes == this.parentTypes) {
parentTypes = new HashSet<>(this.parentTypes);
}
parentTypes.add(mapper.parentFieldMapper().type());
}
// this is only correct because types cannot be removed and we do not
// allow to disable an existing _all field
allEnabled |= mapper.allFieldMapper().enabled();
results.put(newMapper.type(), newMapper);
mappers.put(newMapper.type(), newMapper);
} }
fullPathObjectMappers = Collections.unmodifiableMap(fullPathObjectMappers);
if (reason == MergeReason.MAPPING_UPDATE) { if (reason == MergeReason.MAPPING_UPDATE) {
// this check will only be performed on the master node when there is // this check will only be performed on the master node when there is
@ -346,45 +429,46 @@ public class MapperService extends AbstractIndexComponent implements Closeable {
// deserializing cluster state that was sent by the master node, // deserializing cluster state that was sent by the master node,
// this check will be skipped. // this check will be skipped.
checkNestedFieldsLimit(fullPathObjectMappers); checkNestedFieldsLimit(fullPathObjectMappers);
checkTotalFieldsLimit(objectMappers.size() + fieldMappers.size());
checkDepthLimit(fullPathObjectMappers.keySet()); checkDepthLimit(fullPathObjectMappers.keySet());
} }
Set<String> parentTypes = this.parentTypes;
if (oldMapper == null && newMapper.parentFieldMapper().active()) {
parentTypes = new HashSet<>(parentTypes.size() + 1);
parentTypes.addAll(this.parentTypes);
parentTypes.add(mapper.parentFieldMapper().type());
parentTypes = Collections.unmodifiableSet(parentTypes);
}
Map<String, DocumentMapper> mappers = new HashMap<>(this.mappers);
mappers.put(newMapper.type(), newMapper);
for (Map.Entry<String, DocumentMapper> entry : mappers.entrySet()) { for (Map.Entry<String, DocumentMapper> entry : mappers.entrySet()) {
if (entry.getKey().equals(DEFAULT_MAPPING)) { if (entry.getKey().equals(DEFAULT_MAPPING)) {
continue; continue;
} }
DocumentMapper m = entry.getValue(); DocumentMapper documentMapper = entry.getValue();
// apply changes to the field types back // apply changes to the field types back
m = m.updateFieldType(fieldTypes.fullNameToFieldType); DocumentMapper updatedDocumentMapper = documentMapper.updateFieldType(fieldTypes.fullNameToFieldType);
entry.setValue(m); if (updatedDocumentMapper != documentMapper) {
// update both mappers and result
entry.setValue(updatedDocumentMapper);
if (results.containsKey(updatedDocumentMapper.type())) {
results.put(updatedDocumentMapper.type(), updatedDocumentMapper);
}
}
} }
mappers = Collections.unmodifiableMap(mappers);
// 4. commit the change // make structures immutable
mappers = Collections.unmodifiableMap(mappers);
results = Collections.unmodifiableMap(results);
parentTypes = Collections.unmodifiableSet(parentTypes);
fullPathObjectMappers = Collections.unmodifiableMap(fullPathObjectMappers);
// commit the change
if (defaultMappingSource != null) {
this.defaultMappingSource = defaultMappingSource;
}
this.mappers = mappers; this.mappers = mappers;
this.fieldTypes = fieldTypes; this.fieldTypes = fieldTypes;
this.hasNested = hasNested; this.hasNested = hasNested;
this.fullPathObjectMappers = fullPathObjectMappers; this.fullPathObjectMappers = fullPathObjectMappers;
this.parentTypes = parentTypes; this.parentTypes = parentTypes;
// this is only correct because types cannot be removed and we do not this.allEnabled = allEnabled;
// allow to disable an existing _all field
this.allEnabled |= mapper.allFieldMapper().enabled();
assert assertSerialization(newMapper);
assert assertMappersShareSameFieldType(); assert assertMappersShareSameFieldType();
assert results.values().stream().allMatch(this::assertSerialization);
return newMapper; return results;
} }
private boolean assertMappersShareSameFieldType() { private boolean assertMappersShareSameFieldType() {
@ -421,8 +505,8 @@ public class MapperService extends AbstractIndexComponent implements Closeable {
return true; return true;
} }
private void checkFieldUniqueness(String type, Collection<ObjectMapper> objectMappers, Collection<FieldMapper> fieldMappers) { private static void checkFieldUniqueness(String type, Collection<ObjectMapper> objectMappers, Collection<FieldMapper> fieldMappers,
assert Thread.holdsLock(this); Map<String, ObjectMapper> fullPathObjectMappers, FieldTypeLookup fieldTypes) {
// first check within mapping // first check within mapping
final Set<String> objectFullNames = new HashSet<>(); final Set<String> objectFullNames = new HashSet<>();
@ -459,9 +543,8 @@ public class MapperService extends AbstractIndexComponent implements Closeable {
} }
} }
private void checkObjectsCompatibility(Collection<ObjectMapper> objectMappers, boolean updateAllTypes) { private static void checkObjectsCompatibility(Collection<ObjectMapper> objectMappers, boolean updateAllTypes,
assert Thread.holdsLock(this); Map<String, ObjectMapper> fullPathObjectMappers) {
for (ObjectMapper newObjectMapper : objectMappers) { for (ObjectMapper newObjectMapper : objectMappers) {
ObjectMapper existingObjectMapper = fullPathObjectMappers.get(newObjectMapper.fullPath()); ObjectMapper existingObjectMapper = fullPathObjectMappers.get(newObjectMapper.fullPath());
if (existingObjectMapper != null) { if (existingObjectMapper != null) {

View File

@ -26,8 +26,7 @@ import org.apache.lucene.store.IOContext;
import org.apache.lucene.store.IndexOutput; import org.apache.lucene.store.IndexOutput;
import org.apache.lucene.store.Lock; import org.apache.lucene.store.Lock;
import org.apache.lucene.store.NoLockFactory; import org.apache.lucene.store.NoLockFactory;
import org.elasticsearch.cluster.metadata.MappingMetaData; import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.common.collect.ImmutableOpenMap;
import org.elasticsearch.index.Index; import org.elasticsearch.index.Index;
import org.elasticsearch.index.store.Store; import org.elasticsearch.index.store.Store;
@ -123,8 +122,8 @@ final class LocalShardSnapshot implements Closeable {
} }
} }
ImmutableOpenMap<String, MappingMetaData> getMappings() { IndexMetaData getIndexMetaData() {
return shard.indexSettings.getIndexMetaData().getMappings(); return shard.indexSettings.getIndexMetaData();
} }
@Override @Override

View File

@ -104,12 +104,11 @@ final class StoreRecovery {
if (indices.size() > 1) { if (indices.size() > 1) {
throw new IllegalArgumentException("can't add shards from more than one index"); throw new IllegalArgumentException("can't add shards from more than one index");
} }
for (ObjectObjectCursor<String, MappingMetaData> mapping : shards.get(0).getMappings()) { IndexMetaData indexMetaData = shards.get(0).getIndexMetaData();
for (ObjectObjectCursor<String, MappingMetaData> mapping : indexMetaData.getMappings()) {
mappingUpdateConsumer.accept(mapping.key, mapping.value); mappingUpdateConsumer.accept(mapping.key, mapping.value);
} }
for (ObjectObjectCursor<String, MappingMetaData> mapping : shards.get(0).getMappings()) { indexShard.mapperService().merge(indexMetaData, MapperService.MergeReason.MAPPING_RECOVERY, true);
indexShard.mapperService().merge(mapping.key,mapping.value.source(), MapperService.MergeReason.MAPPING_RECOVERY, true);
}
return executeRecovery(indexShard, () -> { return executeRecovery(indexShard, () -> {
logger.debug("starting recovery from local shards {}", shards); logger.debug("starting recovery from local shards {}", shards);
try { try {

View File

@ -485,11 +485,7 @@ public class IndicesService extends AbstractLifecycleComponent
final IndexService service = final IndexService service =
createIndexService("metadata verification", metaData, indicesQueryCache, indicesFieldDataCache, emptyList(), s -> {}); createIndexService("metadata verification", metaData, indicesQueryCache, indicesFieldDataCache, emptyList(), s -> {});
closeables.add(() -> service.close("metadata verification", false)); closeables.add(() -> service.close("metadata verification", false));
for (ObjectCursor<MappingMetaData> typeMapping : metaData.getMappings().values()) { service.mapperService().merge(metaData, MapperService.MergeReason.MAPPING_RECOVERY, true);
// don't apply the default mapping, it has been applied when the mapping was created
service.mapperService().merge(typeMapping.value.type(), typeMapping.value.source(),
MapperService.MergeReason.MAPPING_RECOVERY, true);
}
if (metaData.equals(metaDataUpdate) == false) { if (metaData.equals(metaDataUpdate) == false) {
service.updateMetaData(metaDataUpdate); service.updateMetaData(metaDataUpdate);
} }

View File

@ -39,7 +39,6 @@ import org.elasticsearch.common.collect.ImmutableOpenMap;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.index.IndexNotFoundException;
import org.elasticsearch.index.mapper.MapperParsingException;
import org.elasticsearch.index.query.RangeQueryBuilder; import org.elasticsearch.index.query.RangeQueryBuilder;
import org.elasticsearch.index.query.TermsQueryBuilder; import org.elasticsearch.index.query.TermsQueryBuilder;
import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.ESIntegTestCase;
@ -277,15 +276,8 @@ public class CreateIndexIT extends ESIntegTestCase {
.startObject("text") .startObject("text")
.field("type", "text") .field("type", "text")
.endObject().endObject().endObject()); .endObject().endObject().endObject());
try { IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> b.get());
b.get(); assertThat(e.getMessage(), containsString("mapper [text] is used by multiple types"));
} catch (MapperParsingException e) {
StringBuilder messages = new StringBuilder();
for (Exception rootCause: e.guessRootCauses()) {
messages.append(rootCause.getMessage());
}
assertThat(messages.toString(), containsString("mapper [text] is used by multiple types"));
}
} }
public void testRestartIndexCreationAfterFullClusterRestart() throws Exception { public void testRestartIndexCreationAfterFullClusterRestart() throws Exception {

View File

@ -58,6 +58,7 @@ import static org.elasticsearch.action.support.WriteRequest.RefreshPolicy.IMMEDI
import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount;
import static org.hamcrest.Matchers.containsString;
import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.nullValue; import static org.hamcrest.Matchers.nullValue;
@ -491,7 +492,7 @@ public class GatewayIndexStateIT extends ESIntegTestCase {
assertEquals(ex.getMessage(), "Failed to verify index " + metaData.getIndex()); assertEquals(ex.getMessage(), "Failed to verify index " + metaData.getIndex());
assertNotNull(ex.getCause()); assertNotNull(ex.getCause());
assertEquals(MapperParsingException.class, ex.getCause().getClass()); assertEquals(MapperParsingException.class, ex.getCause().getClass());
assertEquals(ex.getCause().getMessage(), "analyzer [test] not found for field [field1]"); assertThat(ex.getCause().getMessage(), containsString("analyzer [test] not found for field [field1]"));
} }
public void testArchiveBrokenClusterSettings() throws Exception { public void testArchiveBrokenClusterSettings() throws Exception {

View File

@ -22,27 +22,17 @@ import org.apache.lucene.spatial.prefix.PrefixTreeStrategy;
import org.apache.lucene.spatial.prefix.RecursivePrefixTreeStrategy; import org.apache.lucene.spatial.prefix.RecursivePrefixTreeStrategy;
import org.apache.lucene.spatial.prefix.tree.GeohashPrefixTree; import org.apache.lucene.spatial.prefix.tree.GeohashPrefixTree;
import org.apache.lucene.spatial.prefix.tree.QuadPrefixTree; import org.apache.lucene.spatial.prefix.tree.QuadPrefixTree;
import org.elasticsearch.Version;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.common.compress.CompressedXContent;
import org.elasticsearch.common.geo.GeoUtils; import org.elasticsearch.common.geo.GeoUtils;
import org.elasticsearch.common.geo.builders.ShapeBuilder; import org.elasticsearch.common.geo.builders.ShapeBuilder;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.index.mapper.DocumentMapper;
import org.elasticsearch.index.mapper.DocumentMapperParser;
import org.elasticsearch.index.mapper.FieldMapper;
import org.elasticsearch.index.mapper.GeoShapeFieldMapper;
import org.elasticsearch.index.mapper.MapperService;
import org.elasticsearch.plugins.Plugin; import org.elasticsearch.plugins.Plugin;
import org.elasticsearch.test.ESSingleNodeTestCase; import org.elasticsearch.test.ESSingleNodeTestCase;
import org.elasticsearch.test.InternalSettingsPlugin; import org.elasticsearch.test.InternalSettingsPlugin;
import org.elasticsearch.test.VersionUtils;
import java.io.IOException; import java.io.IOException;
import java.util.Collection; import java.util.Collection;
import static com.carrotsearch.randomizedtesting.RandomizedTest.getRandom;
import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.containsString;
import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.instanceOf;

View File

@ -49,7 +49,7 @@ public class MapperServiceTests extends ESSingleNodeTestCase {
String index = "test-index"; String index = "test-index";
String type = ".test-type"; String type = ".test-type";
String field = "field"; String field = "field";
MapperParsingException e = expectThrows(MapperParsingException.class, () -> { IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> {
client().admin().indices().prepareCreate(index) client().admin().indices().prepareCreate(index)
.addMapping(type, field, "type=text") .addMapping(type, field, "type=text")
.execute().actionGet(); .execute().actionGet();
@ -62,7 +62,7 @@ public class MapperServiceTests extends ESSingleNodeTestCase {
String field = "field"; String field = "field";
String type = new String(new char[256]).replace("\0", "a"); String type = new String(new char[256]).replace("\0", "a");
MapperParsingException e = expectThrows(MapperParsingException.class, () -> { MapperException e = expectThrows(MapperException.class, () -> {
client().admin().indices().prepareCreate(index) client().admin().indices().prepareCreate(index)
.addMapping(type, field, "type=text") .addMapping(type, field, "type=text")
.execute().actionGet(); .execute().actionGet();
@ -175,14 +175,14 @@ public class MapperServiceTests extends ESSingleNodeTestCase {
mappings.put(MapperService.DEFAULT_MAPPING, MapperService.parseMapping("{}")); mappings.put(MapperService.DEFAULT_MAPPING, MapperService.parseMapping("{}"));
MapperException e = expectThrows(MapperParsingException.class, MapperException e = expectThrows(MapperParsingException.class,
() -> mapperService.merge(mappings, false)); () -> mapperService.merge(mappings, MergeReason.MAPPING_UPDATE, false));
assertThat(e.getMessage(), startsWith("Failed to parse mapping [" + MapperService.DEFAULT_MAPPING + "]: ")); assertThat(e.getMessage(), startsWith("Failed to parse mapping [" + MapperService.DEFAULT_MAPPING + "]: "));
mappings.clear(); mappings.clear();
mappings.put("type1", MapperService.parseMapping("{}")); mappings.put("type1", MapperService.parseMapping("{}"));
e = expectThrows( MapperParsingException.class, e = expectThrows( MapperParsingException.class,
() -> mapperService.merge(mappings, false)); () -> mapperService.merge(mappings, MergeReason.MAPPING_UPDATE, false));
assertThat(e.getMessage(), startsWith("Failed to parse mapping [type1]: ")); assertThat(e.getMessage(), startsWith("Failed to parse mapping [type1]: "));
} }

View File

@ -19,17 +19,11 @@
package org.elasticsearch.index.mapper; package org.elasticsearch.index.mapper;
import org.elasticsearch.Version;
import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsResponse;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.common.compress.CompressedXContent;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.index.IndexService; import org.elasticsearch.index.IndexService;
import org.elasticsearch.index.mapper.DocumentMapper;
import org.elasticsearch.index.mapper.FieldMapper;
import org.elasticsearch.index.mapper.MapperService;
import org.elasticsearch.index.mapper.MapperService.MergeReason; import org.elasticsearch.index.mapper.MapperService.MergeReason;
import org.elasticsearch.plugins.Plugin; import org.elasticsearch.plugins.Plugin;
import org.elasticsearch.test.ESSingleNodeTestCase; import org.elasticsearch.test.ESSingleNodeTestCase;
@ -37,9 +31,7 @@ import org.elasticsearch.test.InternalSettingsPlugin;
import java.io.IOException; import java.io.IOException;
import java.util.Collection; import java.util.Collection;
import java.util.LinkedHashMap;
import static org.elasticsearch.test.StreamsUtils.copyToStringFromClasspath;
import static org.hamcrest.CoreMatchers.containsString; import static org.hamcrest.CoreMatchers.containsString;
import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.CoreMatchers.equalTo;

View File

@ -111,14 +111,9 @@ public class ChildQuerySearchIT extends ESIntegTestCase {
} }
public void testSelfReferentialIsForbidden() { public void testSelfReferentialIsForbidden() {
try { IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () ->
prepareCreate("test").addMapping("type", "_parent", "type=type").get(); prepareCreate("test").addMapping("type", "_parent", "type=type").get());
fail("self referential should be forbidden"); assertThat(e.getMessage(), equalTo("The [_parent.type] option can't point to the same type"));
} catch (Exception e) {
Throwable cause = e.getCause();
assertThat(cause, instanceOf(IllegalArgumentException.class));
assertThat(cause.getMessage(), equalTo("The [_parent.type] option can't point to the same type"));
}
} }
public void testMultiLevelChild() throws Exception { public void testMultiLevelChild() throws Exception {

View File

@ -332,12 +332,9 @@ public class PercolatorFieldMapperTests extends ESSingleNodeTestCase {
String percolatorMapper = XContentFactory.jsonBuilder().startObject().startObject(typeName) String percolatorMapper = XContentFactory.jsonBuilder().startObject().startObject(typeName)
.startObject("properties").startObject(fieldName).field("type", "percolator").field("index", "no").endObject().endObject() .startObject("properties").startObject(fieldName).field("type", "percolator").field("index", "no").endObject().endObject()
.endObject().endObject().string(); .endObject().endObject().string();
try { MapperParsingException e = expectThrows(MapperParsingException.class, () ->
mapperService.merge(typeName, new CompressedXContent(percolatorMapper), MapperService.MergeReason.MAPPING_UPDATE, true); mapperService.merge(typeName, new CompressedXContent(percolatorMapper), MapperService.MergeReason.MAPPING_UPDATE, true));
fail("MapperParsingException expected"); assertThat(e.getMessage(), containsString("Mapping definition for [" + fieldName + "] has unsupported parameters: [index : no]"));
} catch (MapperParsingException e) {
assertThat(e.getMessage(), equalTo("Mapping definition for [" + fieldName + "] has unsupported parameters: [index : no]"));
}
} }
// multiple percolator fields are allowed in the mapping, but only one field can be used at index time. // multiple percolator fields are allowed in the mapping, but only one field can be used at index time.

View File

@ -263,9 +263,7 @@ public abstract class IndexShardTestCase extends ESTestCase {
try { try {
IndexCache indexCache = new IndexCache(indexSettings, new DisabledQueryCache(indexSettings), null); IndexCache indexCache = new IndexCache(indexSettings, new DisabledQueryCache(indexSettings), null);
MapperService mapperService = MapperTestUtils.newMapperService(createTempDir(), indexSettings.getSettings()); MapperService mapperService = MapperTestUtils.newMapperService(createTempDir(), indexSettings.getSettings());
for (ObjectObjectCursor<String, MappingMetaData> typeMapping : indexMetaData.getMappings()) { mapperService.merge(indexMetaData, MapperService.MergeReason.MAPPING_RECOVERY, true);
mapperService.merge(typeMapping.key, typeMapping.value.source(), MapperService.MergeReason.MAPPING_RECOVERY, true);
}
SimilarityService similarityService = new SimilarityService(indexSettings, Collections.emptyMap()); SimilarityService similarityService = new SimilarityService(indexSettings, Collections.emptyMap());
final IndexEventListener indexEventListener = new IndexEventListener() { final IndexEventListener indexEventListener = new IndexEventListener() {
}; };