LUCENE-2815: fix MultiFields thread safety, don't cache null terms(field)

git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1050415 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Yonik Seeley 2010-12-17 15:15:01 +00:00
parent b7a25aeab6
commit 2280dde754
2 changed files with 25 additions and 24 deletions

View File

@ -1420,7 +1420,7 @@ public abstract class IndexReader implements Cloneable,Closeable {
}
private Fields fields;
private volatile Fields fields;
/** @lucene.internal */
void storeFields(Fields fields) {

View File

@ -19,9 +19,10 @@ package org.apache.lucene.index;
import java.io.IOException;
import java.util.Map;
import java.util.HashMap;
import java.util.List;
import java.util.ArrayList;
import java.util.concurrent.ConcurrentHashMap;
import org.apache.lucene.util.ReaderUtil;
import org.apache.lucene.util.ReaderUtil.Gather; // for javadocs
import org.apache.lucene.util.Bits;
@ -45,7 +46,7 @@ import org.apache.lucene.util.BytesRef;
public final class MultiFields extends Fields {
private final Fields[] subs;
private final ReaderUtil.Slice[] subSlices;
private final Map<String,Terms> terms = new HashMap<String,Terms>();
private final Map<String,Terms> terms = new ConcurrentHashMap<String,Terms>();
/** Returns a single {@link Fields} instance for this
* reader, merging fields/terms/docs/positions on the
@ -240,32 +241,32 @@ public final class MultiFields extends Fields {
@Override
public Terms terms(String field) throws IOException {
final Terms result;
Terms result = terms.get(field);
if (result != null)
return result;
if (!terms.containsKey(field)) {
// Lazy init: first time this field is requested, we
// create & add to terms:
final List<Terms> subs2 = new ArrayList<Terms>();
final List<ReaderUtil.Slice> slices2 = new ArrayList<ReaderUtil.Slice>();
// Lazy init: first time this field is requested, we
// create & add to terms:
final List<Terms> subs2 = new ArrayList<Terms>();
final List<ReaderUtil.Slice> slices2 = new ArrayList<ReaderUtil.Slice>();
// Gather all sub-readers that share this field
for(int i=0;i<subs.length;i++) {
final Terms terms = subs[i].terms(field);
if (terms != null) {
subs2.add(terms);
slices2.add(subSlices[i]);
}
// Gather all sub-readers that share this field
for(int i=0;i<subs.length;i++) {
final Terms terms = subs[i].terms(field);
if (terms != null) {
subs2.add(terms);
slices2.add(subSlices[i]);
}
if (subs2.size() == 0) {
result = null;
} else {
result = new MultiTerms(subs2.toArray(Terms.EMPTY_ARRAY),
slices2.toArray(ReaderUtil.Slice.EMPTY_ARRAY));
}
terms.put(field, result);
}
if (subs2.size() == 0) {
result = null;
// don't cache this case with an unbounded cache, since the number of fields that don't exist
// is unbounded.
} else {
result = terms.get(field);
result = new MultiTerms(subs2.toArray(Terms.EMPTY_ARRAY),
slices2.toArray(ReaderUtil.Slice.EMPTY_ARRAY));
terms.put(field, result);
}
return result;