process internal refresh/update mapping events per index

this allows us to only create the index (if needed) for the exact duration it is needed, and not across processing of all the events
This commit is contained in:
Shay Banon 2013-07-24 21:34:33 +02:00
parent 426c2867d9
commit 993fad4c33
1 changed files with 131 additions and 107 deletions

View File

@ -20,13 +20,13 @@
package org.elasticsearch.cluster.metadata; package org.elasticsearch.cluster.metadata;
import com.google.common.collect.Lists; import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.google.common.collect.Sets; import com.google.common.collect.Sets;
import org.elasticsearch.action.support.master.MasterNodeOperationRequest; import org.elasticsearch.action.support.master.MasterNodeOperationRequest;
import org.elasticsearch.cluster.*; import org.elasticsearch.cluster.*;
import org.elasticsearch.cluster.action.index.NodeMappingCreatedAction; import org.elasticsearch.cluster.action.index.NodeMappingCreatedAction;
import org.elasticsearch.cluster.routing.IndexRoutingTable; import org.elasticsearch.cluster.routing.IndexRoutingTable;
import org.elasticsearch.common.Priority; import org.elasticsearch.common.Priority;
import org.elasticsearch.common.collect.Tuple;
import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.compress.CompressedString; import org.elasticsearch.common.compress.CompressedString;
import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.inject.Inject;
@ -106,130 +106,154 @@ public class MetaDataMappingService extends AbstractComponent {
* and generate a single cluster change event out of all of those. * and generate a single cluster change event out of all of those.
*/ */
ClusterState executeRefreshOrUpdate(final ClusterState currentState) throws Exception { ClusterState executeRefreshOrUpdate(final ClusterState currentState) throws Exception {
List<Object> tasks = new ArrayList<Object>(); List<Object> allTasks = new ArrayList<Object>();
refreshOrUpdateQueue.drainTo(tasks); refreshOrUpdateQueue.drainTo(allTasks);
if (tasks.isEmpty()) { if (allTasks.isEmpty()) {
return currentState; return currentState;
} }
Set<String> indicesToRemove = Sets.newHashSet(); // break down to tasks per index, so we can optimize the on demand index service creation
// keep track of what we already refreshed, no need to refresh it again... // to only happen for the duration of a single index processing of its respective events
Set<Tuple<String, String>> processedRefreshes = Sets.newHashSet(); Map<String, List<Object>> tasksPerIndex = Maps.newHashMap();
try { for (Object task : allTasks) {
boolean dirty = false; String index = null;
MetaData.Builder mdBuilder = newMetaDataBuilder().metaData(currentState.metaData()); if (task instanceof UpdateTask) {
for (Object task : tasks) { index = ((UpdateTask) task).index;
if (task instanceof RefreshTask) { } else if (task instanceof RefreshTask) {
RefreshTask refreshTask = (RefreshTask) task; index = ((RefreshTask) task).index;
String index = refreshTask.index; } else {
final IndexMetaData indexMetaData = mdBuilder.get(index); logger.warn("illegal state, got wrong mapping task type [{}]", task);
if (indexMetaData == null) { }
// index got delete on us, ignore... if (index != null) {
continue; List<Object> indexTasks = tasksPerIndex.get(index);
} if (indexTasks == null) {
IndexService indexService = indicesService.indexService(index); indexTasks = new ArrayList<Object>();
if (indexService == null) { tasksPerIndex.put(index, indexTasks);
// we need to create the index here, and add the current mapping to it, so we can merge }
indexService = indicesService.createIndex(indexMetaData.index(), indexMetaData.settings(), currentState.nodes().localNode().id()); indexTasks.add(task);
indicesToRemove.add(index); }
}
boolean dirty = false;
MetaData.Builder mdBuilder = newMetaDataBuilder().metaData(currentState.metaData());
for (Map.Entry<String, List<Object>> entry : tasksPerIndex.entrySet()) {
String index = entry.getKey();
List<Object> tasks = entry.getValue();
boolean removeIndex = false;
// keep track of what we already refreshed, no need to refresh it again...
Set<String> processedRefreshes = Sets.newHashSet();
try {
for (Object task : tasks) {
if (task instanceof RefreshTask) {
RefreshTask refreshTask = (RefreshTask) task;
final IndexMetaData indexMetaData = mdBuilder.get(index);
if (indexMetaData == null) {
// index got delete on us, ignore...
continue;
}
IndexService indexService = indicesService.indexService(index);
if (indexService == null) {
// we need to create the index here, and add the current mapping to it, so we can merge
indexService = indicesService.createIndex(indexMetaData.index(), indexMetaData.settings(), currentState.nodes().localNode().id());
removeIndex = true;
for (String type : refreshTask.types) {
// only add the current relevant mapping (if exists)
if (indexMetaData.mappings().containsKey(type)) {
// don't apply the default mapping, it has been applied when the mapping was created
indexService.mapperService().merge(type, indexMetaData.mappings().get(type).source().string(), false);
}
}
}
IndexMetaData.Builder indexMetaDataBuilder = newIndexMetaDataBuilder(indexMetaData);
List<String> updatedTypes = Lists.newArrayList();
for (String type : refreshTask.types) { for (String type : refreshTask.types) {
if (processedRefreshes.contains(type)) {
continue;
}
DocumentMapper mapper = indexService.mapperService().documentMapper(type);
if (!mapper.mappingSource().equals(indexMetaData.mappings().get(type).source())) {
updatedTypes.add(type);
indexMetaDataBuilder.putMapping(new MappingMetaData(mapper));
}
processedRefreshes.add(type);
}
if (updatedTypes.isEmpty()) {
continue;
}
logger.warn("[{}] re-syncing mappings with cluster state for types [{}]", index, updatedTypes);
mdBuilder.put(indexMetaDataBuilder);
dirty = true;
} else if (task instanceof UpdateTask) {
UpdateTask updateTask = (UpdateTask) task;
String type = updateTask.type;
CompressedString mappingSource = updateTask.mappingSource;
// first, check if it really needs to be updated
final IndexMetaData indexMetaData = mdBuilder.get(index);
if (indexMetaData == null) {
// index got delete on us, ignore...
continue;
}
if (indexMetaData.mappings().containsKey(type) && indexMetaData.mapping(type).source().equals(mappingSource)) {
continue;
}
IndexService indexService = indicesService.indexService(index);
if (indexService == null) {
// we need to create the index here, and add the current mapping to it, so we can merge
indexService = indicesService.createIndex(indexMetaData.index(), indexMetaData.settings(), currentState.nodes().localNode().id());
removeIndex = true;
// only add the current relevant mapping (if exists) // only add the current relevant mapping (if exists)
if (indexMetaData.mappings().containsKey(type)) { if (indexMetaData.mappings().containsKey(type)) {
// don't apply the default mapping, it has been applied when the mapping was created
indexService.mapperService().merge(type, indexMetaData.mappings().get(type).source().string(), false); indexService.mapperService().merge(type, indexMetaData.mappings().get(type).source().string(), false);
} }
} }
}
IndexMetaData.Builder indexMetaDataBuilder = newIndexMetaDataBuilder(indexMetaData); DocumentMapper updatedMapper = indexService.mapperService().merge(type, mappingSource.string(), false);
List<String> updatedTypes = Lists.newArrayList(); processedRefreshes.add(type);
for (String type : refreshTask.types) {
Tuple<String, String> processedRefresh = Tuple.tuple(index, type); // if we end up with the same mapping as the original once, ignore
if (processedRefreshes.contains(processedRefresh)) { if (indexMetaData.mappings().containsKey(type) && indexMetaData.mapping(type).source().equals(updatedMapper.mappingSource())) {
continue; continue;
} }
DocumentMapper mapper = indexService.mapperService().documentMapper(type);
if (!mapper.mappingSource().equals(indexMetaData.mappings().get(type).source())) { // build the updated mapping source
updatedTypes.add(type); if (logger.isDebugEnabled()) {
indexMetaDataBuilder.putMapping(new MappingMetaData(mapper)); try {
logger.debug("[{}] update_mapping [{}] (dynamic) with source [{}]", index, type, updatedMapper.mappingSource().string());
} catch (Exception e) {
// ignore
}
} else if (logger.isInfoEnabled()) {
logger.info("[{}] update_mapping [{}] (dynamic)", index, type);
} }
processedRefreshes.add(processedRefresh);
mdBuilder.put(newIndexMetaDataBuilder(indexMetaData).putMapping(new MappingMetaData(updatedMapper)));
dirty = true;
} else {
logger.warn("illegal state, got wrong mapping task type [{}]", task);
} }
if (updatedTypes.isEmpty()) {
continue;
}
logger.warn("[{}] re-syncing mappings with cluster state for types [{}]", index, updatedTypes);
mdBuilder.put(indexMetaDataBuilder);
dirty = true;
} else if (task instanceof UpdateTask) {
UpdateTask updateTask = (UpdateTask) task;
String index = updateTask.index;
String type = updateTask.type;
CompressedString mappingSource = updateTask.mappingSource;
// first, check if it really needs to be updated
final IndexMetaData indexMetaData = mdBuilder.get(index);
if (indexMetaData == null) {
// index got delete on us, ignore...
continue;
}
if (indexMetaData.mappings().containsKey(type) && indexMetaData.mapping(type).source().equals(mappingSource)) {
continue;
}
IndexService indexService = indicesService.indexService(index);
if (indexService == null) {
// we need to create the index here, and add the current mapping to it, so we can merge
indexService = indicesService.createIndex(indexMetaData.index(), indexMetaData.settings(), currentState.nodes().localNode().id());
indicesToRemove.add(index);
// only add the current relevant mapping (if exists)
if (indexMetaData.mappings().containsKey(type)) {
indexService.mapperService().merge(type, indexMetaData.mappings().get(type).source().string(), false);
}
}
DocumentMapper updatedMapper = indexService.mapperService().merge(type, mappingSource.string(), false);
processedRefreshes.add(Tuple.tuple(index, type));
// if we end up with the same mapping as the original once, ignore
if (indexMetaData.mappings().containsKey(type) && indexMetaData.mapping(type).source().equals(updatedMapper.mappingSource())) {
continue;
}
// build the updated mapping source
if (logger.isDebugEnabled()) {
try {
logger.debug("[{}] update_mapping [{}] (dynamic) with source [{}]", index, type, updatedMapper.mappingSource().string());
} catch (Exception e) {
// ignore
}
} else if (logger.isInfoEnabled()) {
logger.info("[{}] update_mapping [{}] (dynamic)", index, type);
}
mdBuilder.put(newIndexMetaDataBuilder(indexMetaData).putMapping(new MappingMetaData(updatedMapper)));
dirty = true;
} else {
logger.warn("illegal state, got wrong mapping task type [{}]", task);
} }
} } finally {
if (!dirty) { if (removeIndex) {
return currentState; indicesService.removeIndex(index, "created for mapping processing");
} }
return newClusterStateBuilder().state(currentState).metaData(mdBuilder).build(); for (Object task : tasks) {
} finally { if (task instanceof UpdateTask) {
for (String index : indicesToRemove) { ((UpdateTask) task).listener.onResponse(new Response(true));
indicesService.removeIndex(index, "created for mapping processing"); }
}
for (Object task : tasks) {
if (task instanceof UpdateTask) {
((UpdateTask) task).listener.onResponse(new Response(true));
} }
} }
} }
if (!dirty) {
return currentState;
}
return newClusterStateBuilder().state(currentState).metaData(mdBuilder).build();
} }
/** /**