LUCENE-4055: don't modify FieldInfo during flush

git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/branches/lucene4055@1340181 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Michael McCandless 2012-05-18 17:38:07 +00:00
parent b0ec623d46
commit 12f88eec3f
14 changed files with 43 additions and 34 deletions

View File

@ -23,7 +23,7 @@ import java.util.Map;
abstract class DocFieldConsumer {
/** Called when DocumentsWriterPerThread decides to create a new
* segment */
abstract void flush(Map<FieldInfo, DocFieldConsumerPerField> fieldsToFlush, SegmentWriteState state) throws IOException;
abstract void flush(Map<String, DocFieldConsumerPerField> fieldsToFlush, SegmentWriteState state) throws IOException;
/** Called when an aborting exception is hit */
abstract void abort();

View File

@ -71,10 +71,10 @@ final class DocFieldProcessor extends DocConsumer {
@Override
public void flush(SegmentWriteState state) throws IOException {
Map<FieldInfo, DocFieldConsumerPerField> childFields = new HashMap<FieldInfo, DocFieldConsumerPerField>();
Map<String,DocFieldConsumerPerField> childFields = new HashMap<String,DocFieldConsumerPerField>();
Collection<DocFieldConsumerPerField> fields = fields();
for (DocFieldConsumerPerField f : fields) {
childFields.put(f.getFieldInfo(), f);
childFields.put(f.getFieldInfo().name, f);
}
fieldsWriter.flush(state);

View File

@ -39,12 +39,12 @@ final class DocInverter extends DocFieldConsumer {
}
@Override
void flush(Map<FieldInfo, DocFieldConsumerPerField> fieldsToFlush, SegmentWriteState state) throws IOException {
void flush(Map<String, DocFieldConsumerPerField> fieldsToFlush, SegmentWriteState state) throws IOException {
Map<FieldInfo, InvertedDocConsumerPerField> childFieldsToFlush = new HashMap<FieldInfo, InvertedDocConsumerPerField>();
Map<FieldInfo, InvertedDocEndConsumerPerField> endChildFieldsToFlush = new HashMap<FieldInfo, InvertedDocEndConsumerPerField>();
Map<String, InvertedDocConsumerPerField> childFieldsToFlush = new HashMap<String, InvertedDocConsumerPerField>();
Map<String, InvertedDocEndConsumerPerField> endChildFieldsToFlush = new HashMap<String, InvertedDocEndConsumerPerField>();
for (Map.Entry<FieldInfo, DocFieldConsumerPerField> fieldToFlush : fieldsToFlush.entrySet()) {
for (Map.Entry<String, DocFieldConsumerPerField> fieldToFlush : fieldsToFlush.entrySet()) {
DocInverterPerField perField = (DocInverterPerField) fieldToFlush.getValue();
childFieldsToFlush.put(fieldToFlush.getKey(), perField.consumer);
endChildFieldsToFlush.put(fieldToFlush.getKey(), perField.endConsumer);

View File

@ -79,6 +79,12 @@ public final class FieldInfo {
} else { // for non-indexed fields, leave defaults
this.storeTermVector = false;
this.storePayloads = false;
// nocommit these trip ... which is spooky... means
// the FI we are cloning was in a bad state...
//assert !storeTermVector;
//assert !storePayloads;
//assert !omitNorms;
//assert normsType == null;
this.omitNorms = false;
this.indexOptions = IndexOptions.DOCS_AND_FREQS_AND_POSITIONS;
this.normType = null;
@ -155,7 +161,9 @@ public final class FieldInfo {
}
void setStorePayloads() {
storePayloads = true;
if (indexed && indexOptions.compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS) >= 0) {
storePayloads = true;
}
}
void setNormValueType(Type type) {

View File

@ -306,8 +306,12 @@ public class FieldInfos implements Iterable<FieldInfo> {
}
final FieldInfos finish() {
// nocommit: bogus we don't clone each FI
return new FieldInfos(byName.values().toArray(new FieldInfo[byName.size()]));
FieldInfo[] cloned = new FieldInfo[byName.size()];
int upto = 0;
for(FieldInfo fieldInfo : byName.values()) {
cloned[upto++] = fieldInfo.clone();
}
return new FieldInfos(cloned);
}
}
}

View File

@ -39,7 +39,7 @@ final class FreqProxTermsWriter extends TermsHashConsumer {
// Other writers would presumably share alot of this...
@Override
public void flush(Map<FieldInfo, TermsHashConsumerPerField> fieldsToFlush, final SegmentWriteState state) throws IOException {
public void flush(Map<String,TermsHashConsumerPerField> fieldsToFlush, final SegmentWriteState state) throws IOException {
// Gather all FieldData's that have postings, across all
// ThreadStates
@ -80,15 +80,7 @@ final class FreqProxTermsWriter extends TermsHashConsumer {
final FieldInfo fieldInfo = allFields.get(fieldNumber).fieldInfo;
final FreqProxTermsWriterPerField fieldWriter = allFields.get(fieldNumber);
// Aggregate the storePayload as seen by the same
// field across multiple threads
if (fieldInfo.getIndexOptions().compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS) >= 0) {
if (fieldWriter.hasPayloads) {
fieldInfo.setStorePayloads();
}
}
// If this field has postings then add them to the
// segment
fieldWriter.flush(fieldInfo.name, consumer, state);

View File

@ -68,7 +68,11 @@ final class FreqProxTermsWriterPerField extends TermsHashConsumerPerField implem
}
@Override
void finish() {}
void finish() {
if (hasPayloads) {
fieldInfo.setStorePayloads();
}
}
boolean hasPayloads;

View File

@ -26,7 +26,7 @@ abstract class InvertedDocConsumer {
abstract void abort();
/** Flush a new segment */
abstract void flush(Map<FieldInfo, InvertedDocConsumerPerField> fieldsToFlush, SegmentWriteState state) throws IOException;
abstract void flush(Map<String, InvertedDocConsumerPerField> fieldsToFlush, SegmentWriteState state) throws IOException;
abstract InvertedDocConsumerPerField addField(DocInverterPerField docInverterPerField, FieldInfo fieldInfo);

View File

@ -21,7 +21,7 @@ import java.io.IOException;
import java.util.Map;
abstract class InvertedDocEndConsumer {
abstract void flush(Map<FieldInfo, InvertedDocEndConsumerPerField> fieldsToFlush, SegmentWriteState state) throws IOException;
abstract void flush(Map<String, InvertedDocEndConsumerPerField> fieldsToFlush, SegmentWriteState state) throws IOException;
abstract void abort();
abstract InvertedDocEndConsumerPerField addField(DocInverterPerField docInverterPerField, FieldInfo fieldInfo);
abstract void startDocument() throws IOException;

View File

@ -55,13 +55,13 @@ final class NormsConsumer extends InvertedDocEndConsumer {
/** Produce _X.nrm if any document had a field with norms
* not disabled */
@Override
public void flush(Map<FieldInfo,InvertedDocEndConsumerPerField> fieldsToFlush, SegmentWriteState state) throws IOException {
public void flush(Map<String,InvertedDocEndConsumerPerField> fieldsToFlush, SegmentWriteState state) throws IOException {
boolean success = false;
boolean anythingFlushed = false;
try {
if (state.fieldInfos.hasNorms()) {
for (FieldInfo fi : state.fieldInfos) {
final NormsConsumerPerField toWrite = (NormsConsumerPerField) fieldsToFlush.get(fi);
final NormsConsumerPerField toWrite = (NormsConsumerPerField) fieldsToFlush.get(fi.name);
// we must check the final value of omitNorms for the fieldinfo, it could have
// changed for this field since the first time we added it.
if (!fi.omitsNorms()) {
@ -71,7 +71,7 @@ final class NormsConsumer extends InvertedDocEndConsumer {
assert fi.getNormType() == type;
} else if (fi.isIndexed()) {
anythingFlushed = true;
assert fi.getNormType() == null;
assert fi.getNormType() == null: "got " + fi.getNormType() + "; field=" + fi.name;
}
}
}

View File

@ -199,7 +199,8 @@ final class SegmentMerger {
// NOTE: this is actually merging all the fieldinfos
public void mergeDocValuesAndNormsFieldInfos() throws IOException {
// mapping from all docvalues fields found to their promoted types
// this is because FieldInfos does not store the valueSize
// this is because FieldInfos does not store the
// valueSize
Map<FieldInfo,TypePromoter> docValuesTypes = new HashMap<FieldInfo,TypePromoter>();
Map<FieldInfo,TypePromoter> normValuesTypes = new HashMap<FieldInfo,TypePromoter>();

View File

@ -49,7 +49,7 @@ final class TermVectorsConsumer extends TermsHashConsumer {
}
@Override
void flush(Map<FieldInfo, TermsHashConsumerPerField> fieldsToFlush, final SegmentWriteState state) throws IOException {
void flush(Map<String, TermsHashConsumerPerField> fieldsToFlush, final SegmentWriteState state) throws IOException {
if (writer != null) {
// At least one doc in this run had term vectors enabled
try {

View File

@ -96,17 +96,17 @@ final class TermsHash extends InvertedDocConsumer {
}
@Override
void flush(Map<FieldInfo,InvertedDocConsumerPerField> fieldsToFlush, final SegmentWriteState state) throws IOException {
Map<FieldInfo,TermsHashConsumerPerField> childFields = new HashMap<FieldInfo,TermsHashConsumerPerField>();
Map<FieldInfo,InvertedDocConsumerPerField> nextChildFields;
void flush(Map<String,InvertedDocConsumerPerField> fieldsToFlush, final SegmentWriteState state) throws IOException {
Map<String,TermsHashConsumerPerField> childFields = new HashMap<String,TermsHashConsumerPerField>();
Map<String,InvertedDocConsumerPerField> nextChildFields;
if (nextTermsHash != null) {
nextChildFields = new HashMap<FieldInfo,InvertedDocConsumerPerField>();
nextChildFields = new HashMap<String,InvertedDocConsumerPerField>();
} else {
nextChildFields = null;
}
for (final Map.Entry<FieldInfo,InvertedDocConsumerPerField> entry : fieldsToFlush.entrySet()) {
for (final Map.Entry<String,InvertedDocConsumerPerField> entry : fieldsToFlush.entrySet()) {
TermsHashPerField perField = (TermsHashPerField) entry.getValue();
childFields.put(entry.getKey(), perField.consumer);
if (nextTermsHash != null) {

View File

@ -21,7 +21,7 @@ import java.io.IOException;
import java.util.Map;
abstract class TermsHashConsumer {
abstract void flush(Map<FieldInfo, TermsHashConsumerPerField> fieldsToFlush, final SegmentWriteState state) throws IOException;
abstract void flush(Map<String, TermsHashConsumerPerField> fieldsToFlush, final SegmentWriteState state) throws IOException;
abstract void abort();
abstract void startDocument() throws IOException;
abstract void finishDocument(TermsHash termsHash) throws IOException;