remove some field caching logic, does not seem to do much perf wise, and simplifies back the code

This commit is contained in:
kimchy 2011-04-19 17:07:32 +03:00
parent 56c9783cb3
commit 5cc943c7db
10 changed files with 7 additions and 112 deletions

View File

@ -151,8 +151,6 @@ public class TransportShardBulkAction extends TransportShardReplicationOperation
ops = new Engine.IndexingOperation[request.items().length];
}
ops[i] = op;
} else {
op.docMapper().processDocumentAfterIndex(op.doc());
}
// add the response
@ -222,7 +220,6 @@ public class TransportShardBulkAction extends TransportShardReplicationOperation
try {
PercolatorExecutor.Response percolate = indexService.percolateService().percolate(new PercolatorExecutor.DocAndSourceQueryRequest(op.parsedDoc(), indexRequest.percolate()));
((IndexResponse) itemResponse.response()).matches(percolate.matches());
op.docMapper().processDocumentAfterIndex(op.doc());
} catch (Exception e) {
logger.warn("failed to percolate [{}]", e, itemRequest.request());
}
@ -243,11 +240,9 @@ public class TransportShardBulkAction extends TransportShardReplicationOperation
if (indexRequest.opType() == IndexRequest.OpType.INDEX) {
Engine.Index index = indexShard.prepareIndex(sourceToParse).version(indexRequest.version()).origin(Engine.Operation.Origin.REPLICA);
indexShard.index(index);
index.docMapper().processDocumentAfterIndex(index.doc());
} else {
Engine.Create create = indexShard.prepareCreate(sourceToParse).version(indexRequest.version()).origin(Engine.Operation.Origin.REPLICA);
indexShard.create(create);
create.docMapper().processDocumentAfterIndex(create.doc());
}
} catch (Exception e) {
// ignore, we are on backup

View File

@ -212,18 +212,12 @@ public class TransportIndexAction extends TransportShardReplicationOperationActi
@Override protected void postPrimaryOperation(IndexRequest request, PrimaryResponse<IndexResponse> response) {
Engine.IndexingOperation op = (Engine.IndexingOperation) response.payload();
if (!Strings.hasLength(request.percolate())) {
try {
op.docMapper().processDocumentAfterIndex(op.doc());
} catch (Exception e) {
logger.warn("failed to cleanup doc after index [{}]", e, request);
}
return;
}
IndexService indexService = indicesService.indexServiceSafe(request.index());
try {
PercolatorExecutor.Response percolate = indexService.percolateService().percolate(new PercolatorExecutor.DocAndSourceQueryRequest(op.parsedDoc(), request.percolate()));
response.response().matches(percolate.matches());
op.docMapper().processDocumentAfterIndex(op.doc());
} catch (Exception e) {
logger.warn("failed to percolate [{}]", e, request);
}
@ -239,13 +233,11 @@ public class TransportIndexAction extends TransportShardReplicationOperationActi
.version(request.version())
.origin(Engine.Operation.Origin.REPLICA);
indexShard.index(index);
index.docMapper().processDocumentAfterIndex(index.doc());
} else {
Engine.Create create = indexShard.prepareCreate(sourceToParse)
.version(request.version())
.origin(Engine.Operation.Origin.REPLICA);
indexShard.create(create);
create.docMapper().processDocumentAfterIndex(create.doc());
}
if (request.refresh()) {
try {

View File

@ -20,7 +20,6 @@
package org.elasticsearch.index.mapper;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Fieldable;
import org.apache.lucene.search.Filter;
import org.elasticsearch.common.Nullable;
@ -134,8 +133,6 @@ public interface DocumentMapper {
*/
void addFieldMapperListener(FieldMapperListener fieldMapperListener, boolean includeExisting);
void processDocumentAfterIndex(Document doc);
/**
* A result of a merge.
*/

View File

@ -168,6 +168,4 @@ public interface FieldMapper<T> {
Filter rangeFilter(String lowerTerm, String upperTerm, boolean includeLower, boolean includeUpper);
FieldDataType fieldDataType();
void processFieldAfterIndex(Fieldable field);
}

View File

@ -422,7 +422,4 @@ public abstract class AbstractFieldMapper<T> implements FieldMapper<T>, XContent
// nothing to do here, sub classes to override if needed
}
public void processFieldAfterIndex(Fieldable field) {
}
}

View File

@ -28,7 +28,6 @@ import org.elasticsearch.index.mapper.MapperParsingException;
import org.elasticsearch.index.mapper.MergeMappingException;
import java.io.IOException;
import java.util.ArrayDeque;
/**
* @author kimchy (shay.banon)
@ -62,12 +61,6 @@ public class IdFieldMapper extends AbstractFieldMapper<String> implements org.el
}
}
private final ThreadLocal<ArrayDeque<Field>> fieldCache = new ThreadLocal<ArrayDeque<Field>>() {
@Override protected ArrayDeque<Field> initialValue() {
return new ArrayDeque<Field>();
}
};
protected IdFieldMapper() {
this(Defaults.NAME, Defaults.INDEX_NAME);
}
@ -115,13 +108,7 @@ public class IdFieldMapper extends AbstractFieldMapper<String> implements org.el
if (index == Field.Index.NO && store == Field.Store.NO) {
return null;
}
ArrayDeque<Field> cache = fieldCache.get();
Field field = cache.poll();
if (field == null) {
field = new Field(names.indexName(), "", store, index);
}
field.setValue(context.id());
return field;
return new Field(names.indexName(), false, context.id(), store, index, termVector);
} else if (context.parsedIdState() == ParseContext.ParsedIdState.EXTERNAL) {
if (context.id() == null) {
throw new MapperParsingException("No id mapping with [" + names.name() + "] found in the content, and not explicitly set");
@ -129,32 +116,16 @@ public class IdFieldMapper extends AbstractFieldMapper<String> implements org.el
if (index == Field.Index.NO && store == Field.Store.NO) {
return null;
}
ArrayDeque<Field> cache = fieldCache.get();
Field field = cache.poll();
if (field == null) {
field = new Field(names.indexName(), "", store, index);
}
field.setValue(context.id());
return field;
return new Field(names.indexName(), false, context.id(), store, index, termVector);
} else {
throw new MapperParsingException("Illegal parsed id state");
}
}
@Override public void processFieldAfterIndex(Fieldable field) {
Field field1 = (Field) field;
field1.setValue("");
fieldCache.get().add(field1);
}
@Override protected String contentType() {
return CONTENT_TYPE;
}
@Override public void close() {
fieldCache.remove();
}
@Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
// if all are defaults, no sense to write it at all
if (store == Defaults.STORE && index == Defaults.INDEX) {

View File

@ -21,7 +21,6 @@ package org.elasticsearch.index.mapper.xcontent;
import org.apache.lucene.document.*;
import org.elasticsearch.ElasticSearchParseException;
import org.elasticsearch.common.Bytes;
import org.elasticsearch.common.compress.lzf.LZF;
import org.elasticsearch.common.compress.lzf.LZFDecoder;
import org.elasticsearch.common.compress.lzf.LZFEncoder;
@ -31,7 +30,6 @@ import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.index.mapper.MergeMappingException;
import java.io.IOException;
import java.util.ArrayDeque;
/**
* @author kimchy (shay.banon)
@ -82,12 +80,6 @@ public class SourceFieldMapper extends AbstractFieldMapper<byte[]> implements or
}
}
private ThreadLocal<ArrayDeque<Field>> fieldCache = new ThreadLocal<ArrayDeque<Field>>() {
@Override protected ArrayDeque<Field> initialValue() {
return new ArrayDeque<Field>();
}
};
private final boolean enabled;
private Boolean compress;
@ -134,19 +126,7 @@ public class SourceFieldMapper extends AbstractFieldMapper<byte[]> implements or
context.source(data);
}
}
ArrayDeque<Field> cache = fieldCache.get();
Field field = cache.poll();
if (field == null) {
field = new Field(names().indexName(), Bytes.EMPTY_ARRAY);
}
field.setValue(data);
return field;
}
@Override public void processFieldAfterIndex(Fieldable field) {
Field field1 = (Field) field;
field1.setValue(Bytes.EMPTY_ARRAY);
fieldCache.get().add(field1);
return new Field(names().indexName(), data);
}
@Override public byte[] value(Document document) {
@ -201,10 +181,6 @@ public class SourceFieldMapper extends AbstractFieldMapper<byte[]> implements or
}
}
@Override public void close() {
fieldCache.remove();
}
@Override protected String contentType() {
return CONTENT_TYPE;
}

View File

@ -28,7 +28,6 @@ import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.index.mapper.MergeMappingException;
import java.io.IOException;
import java.util.ArrayDeque;
/**
* @author kimchy (shay.banon)
@ -62,12 +61,6 @@ public class TypeFieldMapper extends AbstractFieldMapper<String> implements org.
}
}
private final ThreadLocal<ArrayDeque<Field>> fieldCache = new ThreadLocal<ArrayDeque<Field>>() {
@Override protected ArrayDeque<Field> initialValue() {
return new ArrayDeque<Field>();
}
};
protected TypeFieldMapper() {
this(Defaults.NAME, Defaults.INDEX_NAME);
}
@ -112,21 +105,7 @@ public class TypeFieldMapper extends AbstractFieldMapper<String> implements org.
if (index == Field.Index.NO && store == Field.Store.NO) {
return null;
}
ArrayDeque<Field> cache = fieldCache.get();
Field field = cache.poll();
if (field == null) {
field = new Field(names.indexName(), "", store, index);
}
field.setValue(context.type());
return field;
}
@Override public void processFieldAfterIndex(Fieldable field) {
fieldCache.get().add((Field) field);
}
@Override public void close() {
fieldCache.remove();
return new Field(names.indexName(), false, context.type(), store, index, termVector);
}
@Override protected String contentType() {

View File

@ -83,6 +83,9 @@ public class UidFieldMapper extends AbstractFieldMapper<Uid> implements org.elas
throw new MapperParsingException("No id found while parsing the content source");
}
context.uid(Uid.createUid(context.stringBuilder(), context.type(), context.id()));
// so, caching uid stream and field is fine
// since we don't do any mapping parsing without immediate indexing
// and, when percolating, we don't index the uid
UidField field = fieldCache.get();
field.setUid(context.uid());
return field; // version get updated by the engine

View File

@ -21,7 +21,6 @@ package org.elasticsearch.index.mapper.xcontent;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Fieldable;
import org.apache.lucene.search.Filter;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.Preconditions;
@ -518,18 +517,6 @@ public class XContentDocumentMapper implements DocumentMapper, ToXContent {
}
}
@Override public void processDocumentAfterIndex(Document doc) {
for (Fieldable field : doc.getFields()) {
FieldMappers fieldMappers = mappers().indexName(field.name());
if (fieldMappers != null) {
FieldMapper mapper = fieldMappers.mapper();
if (mapper != null) {
mapper.processFieldAfterIndex(field);
}
}
}
}
@Override public synchronized MergeResult merge(DocumentMapper mergeWith, MergeFlags mergeFlags) {
XContentDocumentMapper xContentMergeWith = (XContentDocumentMapper) mergeWith;
MergeContext mergeContext = new MergeContext(this, mergeFlags);