LUCENE-1257: More generified APIs and implementations. Also makes BooleanQuery implement Iterable for easy extended for loop on clauses. Thanks Kay Kay!

git-svn-id: https://svn.apache.org/repos/asf/lucene/java/trunk@826213 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Uwe Schindler 2009-10-17 09:53:02 +00:00
parent 58878dd20e
commit 975ce67fb5
13 changed files with 127 additions and 121 deletions

View File

@ -38,14 +38,14 @@ import org.apache.lucene.util.SortedVIntList;
public class BooleanFilter extends Filter
{
ArrayList shouldFilters = null;
ArrayList notFilters = null;
ArrayList mustFilters = null;
ArrayList<Filter> shouldFilters = null;
ArrayList<Filter> notFilters = null;
ArrayList<Filter> mustFilters = null;
private DocIdSetIterator getDISI(ArrayList filters, int index, IndexReader reader)
private DocIdSetIterator getDISI(ArrayList<Filter> filters, int index, IndexReader reader)
throws IOException
{
return ((Filter)filters.get(index)).getDocIdSet(reader).iterator();
return filters.get(index).getDocIdSet(reader).iterator();
}
/**
@ -78,7 +78,7 @@ public class BooleanFilter extends Filter
res = new OpenBitSetDISI(getDISI(notFilters, i, reader), reader.maxDoc());
res.flip(0, reader.maxDoc()); // NOTE: may set bits on deleted docs
} else {
DocIdSet dis = ((Filter)notFilters.get(i)).getDocIdSet(reader);
DocIdSet dis = notFilters.get(i).getDocIdSet(reader);
if(dis instanceof OpenBitSet) {
// optimized case for OpenBitSets
res.andNot((OpenBitSet) dis);
@ -94,7 +94,7 @@ public class BooleanFilter extends Filter
if (res == null) {
res = new OpenBitSetDISI(getDISI(mustFilters, i, reader), reader.maxDoc());
} else {
DocIdSet dis = ((Filter)mustFilters.get(i)).getDocIdSet(reader);
DocIdSet dis = mustFilters.get(i).getDocIdSet(reader);
if(dis instanceof OpenBitSet) {
// optimized case for OpenBitSets
res.and((OpenBitSet) dis);
@ -132,25 +132,25 @@ public class BooleanFilter extends Filter
{
if (filterClause.getOccur().equals(Occur.MUST)) {
if (mustFilters==null) {
mustFilters=new ArrayList();
mustFilters=new ArrayList<Filter>();
}
mustFilters.add(filterClause.getFilter());
}
if (filterClause.getOccur().equals(Occur.SHOULD)) {
if (shouldFilters==null) {
shouldFilters=new ArrayList();
shouldFilters=new ArrayList<Filter>();
}
shouldFilters.add(filterClause.getFilter());
}
if (filterClause.getOccur().equals(Occur.MUST_NOT)) {
if (notFilters==null) {
notFilters=new ArrayList();
notFilters=new ArrayList<Filter>();
}
notFilters.add(filterClause.getFilter());
}
}
private boolean equalFilters(ArrayList filters1, ArrayList filters2)
private boolean equalFilters(ArrayList<Filter> filters1, ArrayList<Filter> filters2)
{
return (filters1 == filters2) ||
((filters1 != null) && filters1.equals(filters2));
@ -191,7 +191,7 @@ public class BooleanFilter extends Filter
return buffer.toString();
}
private void appendFilters(ArrayList filters, String occurString, StringBuilder buffer)
private void appendFilters(ArrayList<Filter> filters, String occurString, StringBuilder buffer)
{
if (filters != null) {
for (int i = 0; i < filters.size(); i++) {

View File

@ -26,8 +26,7 @@ import java.util.Map;
*/
public class NormalizeCharMap {
//Map<Character, NormalizeMap> submap;
Map submap;
Map<Character, NormalizeCharMap> submap;
String normStr;
int diff;
@ -44,9 +43,9 @@ public class NormalizeCharMap {
for(int i = 0; i < singleMatch.length(); i++) {
char c = singleMatch.charAt(i);
if (currMap.submap == null) {
currMap.submap = new HashMap(1);
currMap.submap = new HashMap<Character, NormalizeCharMap>(1);
}
NormalizeCharMap map = (NormalizeCharMap) currMap.submap.get(CharacterCache.valueOf(c));
NormalizeCharMap map = currMap.submap.get(CharacterCache.valueOf(c));
if (map == null) {
map = new NormalizeCharMap();
currMap.submap.put(new Character(c), map);

View File

@ -39,8 +39,8 @@ public class WordlistLoader {
* @param wordfile File containing the wordlist
* @return A HashSet with the file's words
*/
public static HashSet getWordSet(File wordfile) throws IOException {
HashSet result = new HashSet();
public static HashSet<String> getWordSet(File wordfile) throws IOException {
HashSet<String> result = new HashSet<String>();
FileReader reader = null;
try {
reader = new FileReader(wordfile);
@ -63,8 +63,8 @@ public class WordlistLoader {
* @param comment The comment string to ignore
* @return A HashSet with the file's words
*/
public static HashSet getWordSet(File wordfile, String comment) throws IOException {
HashSet result = new HashSet();
public static HashSet<String> getWordSet(File wordfile, String comment) throws IOException {
HashSet<String> result = new HashSet<String>();
FileReader reader = null;
try {
reader = new FileReader(wordfile);
@ -87,8 +87,8 @@ public class WordlistLoader {
* @param reader Reader containing the wordlist
* @return A HashSet with the reader's words
*/
public static HashSet getWordSet(Reader reader) throws IOException {
HashSet result = new HashSet();
public static HashSet<String> getWordSet(Reader reader) throws IOException {
HashSet<String> result = new HashSet<String>();
BufferedReader br = null;
try {
if (reader instanceof BufferedReader) {
@ -118,8 +118,8 @@ public class WordlistLoader {
* @param comment The string representing a comment.
* @return A HashSet with the reader's words
*/
public static HashSet getWordSet(Reader reader, String comment) throws IOException {
HashSet result = new HashSet();
public static HashSet<String> getWordSet(Reader reader, String comment) throws IOException {
HashSet<String> result = new HashSet<String>();
BufferedReader br = null;
try {
if (reader instanceof BufferedReader) {
@ -151,10 +151,10 @@ public class WordlistLoader {
* @return stem dictionary that overrules the stemming algorithm
* @throws IOException
*/
public static HashMap getStemDict(File wordstemfile) throws IOException {
public static HashMap<String, String> getStemDict(File wordstemfile) throws IOException {
if (wordstemfile == null)
throw new NullPointerException("wordstemfile may not be null");
HashMap result = new HashMap();
HashMap<String, String> result = new HashMap<String, String>();
BufferedReader br = null;
FileReader fr = null;
try {

View File

@ -20,9 +20,10 @@ package org.apache.lucene.index;
import java.util.HashMap;
import java.util.ArrayList;
import java.util.List;
import java.util.Iterator;
import java.util.Map.Entry;
import org.apache.lucene.search.Query;
/** Holds buffered deletes, by docID, term or query. We
* hold two instances of this class: one for the deletes
* prior to the last flush, the other for deletes after
@ -32,9 +33,9 @@ import java.util.Map.Entry;
* previously flushed segments. */
class BufferedDeletes {
int numTerms;
HashMap terms = new HashMap();
HashMap queries = new HashMap();
List docIDs = new ArrayList();
HashMap<Term,Num> terms = new HashMap<Term,Num>();
HashMap<Query,Integer> queries = new HashMap<Query,Integer>();
List<Integer> docIDs = new ArrayList<Integer>();
long bytesUsed;
// Number of documents a delete term applies to.
@ -103,42 +104,38 @@ class BufferedDeletes {
MergePolicy.OneMerge merge,
int mergeDocCount) {
final HashMap newDeleteTerms;
final HashMap<Term,Num> newDeleteTerms;
// Remap delete-by-term
if (terms.size() > 0) {
newDeleteTerms = new HashMap();
Iterator iter = terms.entrySet().iterator();
while(iter.hasNext()) {
Entry entry = (Entry) iter.next();
Num num = (Num) entry.getValue();
newDeleteTerms = new HashMap<Term, Num>();
for(Entry<Term,Num> entry : terms.entrySet()) {
Num num = entry.getValue();
newDeleteTerms.put(entry.getKey(),
new Num(mapper.remap(num.getNum())));
}
} else
} else
newDeleteTerms = null;
// Remap delete-by-docID
final List newDeleteDocIDs;
final List<Integer> newDeleteDocIDs;
if (docIDs.size() > 0) {
newDeleteDocIDs = new ArrayList(docIDs.size());
Iterator iter = docIDs.iterator();
while(iter.hasNext()) {
Integer num = (Integer) iter.next();
newDeleteDocIDs = new ArrayList<Integer>(docIDs.size());
for (Integer num : docIDs) {
newDeleteDocIDs.add(Integer.valueOf(mapper.remap(num.intValue())));
}
} else
} else
newDeleteDocIDs = null;
// Remap delete-by-query
final HashMap newDeleteQueries;
final HashMap<Query,Integer> newDeleteQueries;
if (queries.size() > 0) {
newDeleteQueries = new HashMap(queries.size());
Iterator iter = queries.entrySet().iterator();
while(iter.hasNext()) {
Entry entry = (Entry) iter.next();
newDeleteQueries = new HashMap<Query, Integer>(queries.size());
for(Entry<Query,Integer> entry: queries.entrySet()) {
Integer num = (Integer) entry.getValue();
newDeleteQueries.put(entry.getKey(),
Integer.valueOf(mapper.remap(num.intValue())));

View File

@ -83,13 +83,13 @@ public class CheckIndex {
/** Empty unless you passed specific segments list to check as optional 3rd argument.
* @see CheckIndex#checkIndex(List) */
public List/*<String>*/ segmentsChecked = new ArrayList();
public List<String> segmentsChecked = new ArrayList<String>();
/** True if the index was created with a newer version of Lucene than the CheckIndex tool. */
public boolean toolOutOfDate;
/** List of {@link SegmentInfoStatus} instances, detailing status of each segment. */
public List/*<SegmentInfoStatus*/ segmentInfos = new ArrayList();
public List<SegmentInfoStatus> segmentInfos = new ArrayList<SegmentInfoStatus>();
/** Directory index is in. */
public Directory dir;
@ -544,7 +544,7 @@ public class CheckIndex {
}
// Keeper
result.newSegments.add(info.clone());
result.newSegments.add((SegmentInfo) info.clone());
}
if (0 == result.numBadSegments) {

View File

@ -968,15 +968,13 @@ final class DocumentsWriter {
boolean any = false;
// Delete by term
Iterator iter = deletesFlushed.terms.entrySet().iterator();
TermDocs docs = reader.termDocs();
try {
while (iter.hasNext()) {
Entry entry = (Entry) iter.next();
Term term = (Term) entry.getKey();
for (Entry<Term, BufferedDeletes.Num> entry: deletesFlushed.terms.entrySet()) {
Term term = entry.getKey();
docs.seek(term);
int limit = ((BufferedDeletes.Num) entry.getValue()).getNum();
int limit = entry.getValue().getNum();
while (docs.next()) {
int docID = docs.doc();
if (docIDStart+docID >= limit)
@ -990,9 +988,8 @@ final class DocumentsWriter {
}
// Delete by docID
iter = deletesFlushed.docIDs.iterator();
while(iter.hasNext()) {
int docID = ((Integer) iter.next()).intValue();
for (Integer docIdInt : deletesFlushed.docIDs) {
int docID = docIdInt.intValue();
if (docID >= docIDStart && docID < docEnd) {
reader.deleteDocument(docID-docIDStart);
any = true;
@ -1001,11 +998,9 @@ final class DocumentsWriter {
// Delete by query
IndexSearcher searcher = new IndexSearcher(reader);
iter = deletesFlushed.queries.entrySet().iterator();
while(iter.hasNext()) {
Entry entry = (Entry) iter.next();
Query query = (Query) entry.getKey();
int limit = ((Integer) entry.getValue()).intValue();
for (Entry<Query, Integer> entry : deletesFlushed.queries.entrySet()) {
Query query = entry.getKey();
int limit = entry.getValue().intValue();
Weight weight = query.weight(searcher);
Scorer scorer = weight.scorer(reader, true, false);
if (scorer != null) {
@ -1027,7 +1022,7 @@ final class DocumentsWriter {
// delete term will be applied to those documents as well
// as the disk segments.
synchronized private void addDeleteTerm(Term term, int docCount) {
BufferedDeletes.Num num = (BufferedDeletes.Num) deletesInRAM.terms.get(term);
BufferedDeletes.Num num = deletesInRAM.terms.get(term);
final int docIDUpto = flushedDocCount + docCount;
if (num == null)
deletesInRAM.terms.put(term, new BufferedDeletes.Num(docIDUpto));

View File

@ -73,7 +73,7 @@ public final class SegmentInfo {
// and true for newly created merged segments (both
// compound and non compound).
private List files; // cached list of files that this segment uses
private List<String> files; // cached list of files that this segment uses
// in the Directory
long sizeInBytes = -1; // total byte size of all of our files (computed on demand)
@ -583,14 +583,14 @@ public final class SegmentInfo {
* modify it.
*/
public List files() throws IOException {
public List<String> files() throws IOException {
if (files != null) {
// Already cached:
return files;
}
files = new ArrayList();
files = new ArrayList<String>();
boolean useCompoundFile = getUseCompoundFile();

View File

@ -41,7 +41,7 @@ import java.util.Map;
* <p><b>NOTE:</b> This API is new and still experimental
* (subject to change suddenly in the next release)</p>
*/
public final class SegmentInfos extends Vector {
public final class SegmentInfos extends Vector<SegmentInfo> {
/** The file format version, a negative number. */
/* Works since counter, the old 1st entry, is always >= 0 */
@ -103,7 +103,7 @@ public final class SegmentInfos extends Vector {
// or wrote; this is normally the same as generation except if
// there was an IOException that had interrupted a commit
private Map userData = Collections.EMPTY_MAP; // Opaque Map<String, String> that user can specify during IndexWriter.commit
private Map<String,String> userData = Collections.<String,String>emptyMap(); // Opaque Map<String, String> that user can specify during IndexWriter.commit
/**
* If non-null, information about loading segments_N files
@ -269,10 +269,10 @@ public final class SegmentInfos extends Vector {
} else if (0 != input.readByte()) {
userData = Collections.singletonMap("userData", input.readString());
} else {
userData = Collections.EMPTY_MAP;
userData = Collections.<String,String>emptyMap();
}
} else {
userData = Collections.EMPTY_MAP;
userData = Collections.<String,String>emptyMap();
}
if (format <= FORMAT_CHECKSUM) {
@ -372,9 +372,9 @@ public final class SegmentInfos extends Vector {
public Object clone() {
SegmentInfos sis = (SegmentInfos) super.clone();
for(int i=0;i<sis.size();i++) {
sis.set(i, sis.info(i).clone());
sis.set(i, (SegmentInfo) sis.info(i).clone());
}
sis.userData = new HashMap(userData);
sis.userData = new HashMap<String, String>(userData);
return sis;
}
@ -435,7 +435,7 @@ public final class SegmentInfos extends Vector {
* @throws CorruptIndexException if the index is corrupt
* @throws IOException if there is a low-level IO error
*/
public static Map readCurrentUserData(Directory directory)
public static Map<String,String> readCurrentUserData(Directory directory)
throws CorruptIndexException, IOException {
SegmentInfos sis = new SegmentInfos();
sis.read(directory);
@ -814,8 +814,8 @@ public final class SegmentInfos extends Vector {
* associated with any "external" segments are skipped).
* The returned collection is recomputed on each
* invocation. */
public Collection files(Directory dir, boolean includeSegmentsFile) throws IOException {
HashSet files = new HashSet();
public Collection<String> files(Directory dir, boolean includeSegmentsFile) throws IOException {
HashSet<String> files = new HashSet<String>();
if (includeSegmentsFile) {
files.add(getCurrentSegmentFileName());
}
@ -909,13 +909,13 @@ public final class SegmentInfos extends Vector {
return buffer.toString();
}
public Map getUserData() {
public Map<String,String> getUserData() {
return userData;
}
void setUserData(Map data) {
void setUserData(Map<String,String> data) {
if (data == null) {
userData = Collections.EMPTY_MAP;
userData = Collections.<String,String>emptyMap();
} else {
userData = data;
}

View File

@ -29,7 +29,7 @@ import java.util.*;
* queries, e.g. {@link TermQuery}s, {@link PhraseQuery}s or other
* BooleanQuerys.
*/
public class BooleanQuery extends Query {
public class BooleanQuery extends Query implements Iterable<BooleanClause> {
private static int maxClauseCount = 1024;
@ -62,7 +62,7 @@ public class BooleanQuery extends Query {
BooleanQuery.maxClauseCount = maxClauseCount;
}
private ArrayList clauses = new ArrayList();
private ArrayList<BooleanClause> clauses = new ArrayList<BooleanClause>();
private boolean disableCoord;
/** Constructs an empty boolean query. */
@ -158,11 +158,17 @@ public class BooleanQuery extends Query {
/** Returns the set of clauses in this query. */
public BooleanClause[] getClauses() {
return (BooleanClause[])clauses.toArray(new BooleanClause[clauses.size()]);
return clauses.toArray(new BooleanClause[clauses.size()]);
}
/** Returns the list of clauses in this query. */
public List clauses() { return clauses; }
public List<BooleanClause> clauses() { return clauses; }
/** Returns an iterator on the clauses in this query. It implements the {@link Iterable} interface to
* make it possible to do:
* <pre>for (BooleanClause clause : booleanQuery) {}</pre>
*/
public final Iterator<BooleanClause> iterator() { return clauses().iterator(); }
/**
* Expert: the Weight for BooleanQuery, used to
@ -174,21 +180,25 @@ public class BooleanQuery extends Query {
protected class BooleanWeight extends Weight {
/** The Similarity implementation. */
protected Similarity similarity;
protected ArrayList weights;
protected ArrayList<Weight> weights;
public BooleanWeight(Searcher searcher)
throws IOException {
this.similarity = getSimilarity(searcher);
weights = new ArrayList(clauses.size());
weights = new ArrayList<Weight>(clauses.size());
for (int i = 0 ; i < clauses.size(); i++) {
BooleanClause c = (BooleanClause)clauses.get(i);
weights.add(c.getQuery().createWeight(searcher));
}
}
@Override
public Query getQuery() { return BooleanQuery.this; }
@Override
public float getValue() { return getBoost(); }
@Override
public float sumOfSquaredWeights() throws IOException {
float sum = 0.0f;
for (int i = 0 ; i < weights.size(); i++) {
@ -207,15 +217,16 @@ public class BooleanQuery extends Query {
}
@Override
public void normalize(float norm) {
norm *= getBoost(); // incorporate boost
for (Iterator iter = weights.iterator(); iter.hasNext();) {
Weight w = (Weight) iter.next();
for (Weight w : weights) {
// normalize all clauses, (even if prohibited in case of side affects)
w.normalize(norm);
}
}
@Override
public Explanation explain(IndexReader reader, int doc)
throws IOException {
final int minShouldMatch =
@ -227,9 +238,10 @@ public class BooleanQuery extends Query {
float sum = 0.0f;
boolean fail = false;
int shouldMatchCount = 0;
for (Iterator wIter = weights.iterator(), cIter = clauses.iterator(); wIter.hasNext();) {
Weight w = (Weight) wIter.next();
BooleanClause c = (BooleanClause) cIter.next();
Iterator<BooleanClause> cIter = clauses.iterator();
for (Iterator<Weight> wIter = weights.iterator(); wIter.hasNext();) {
Weight w = wIter.next();
BooleanClause c = cIter.next();
if (w.scorer(reader, true, true) == null) {
continue;
}
@ -287,14 +299,15 @@ public class BooleanQuery extends Query {
}
}
@Override
public Scorer scorer(IndexReader reader, boolean scoreDocsInOrder, boolean topScorer)
throws IOException {
List required = new ArrayList();
List prohibited = new ArrayList();
List optional = new ArrayList();
for (Iterator wIter = weights.iterator(), cIter = clauses.iterator(); wIter.hasNext();) {
Weight w = (Weight) wIter.next();
BooleanClause c = (BooleanClause) cIter.next();
List<Scorer> required = new ArrayList<Scorer>();
List<Scorer> prohibited = new ArrayList<Scorer>();
List<Scorer> optional = new ArrayList<Scorer>();
Iterator<BooleanClause> cIter = clauses.iterator();
for (Weight w : weights) {
BooleanClause c = cIter.next();
Scorer subScorer = w.scorer(reader, true, false);
if (subScorer == null) {
if (c.isRequired()) {
@ -328,10 +341,10 @@ public class BooleanQuery extends Query {
return new BooleanScorer2(similarity, minNrShouldMatch, required, prohibited, optional);
}
@Override
public boolean scoresDocsOutOfOrder() {
int numProhibited = 0;
for (Iterator cIter = clauses.iterator(); cIter.hasNext();) {
BooleanClause c = (BooleanClause) cIter.next();
for (BooleanClause c : clauses) {
if (c.isRequired()) {
return false; // BS2 (in-order) will be used by scorer()
} else if (c.isProhibited()) {
@ -349,10 +362,12 @@ public class BooleanQuery extends Query {
}
@Override
public Weight createWeight(Searcher searcher) throws IOException {
return new BooleanWeight(searcher);
}
@Override
public Query rewrite(IndexReader reader) throws IOException {
if (minNrShouldMatch == 0 && clauses.size() == 1) { // optimize 1-clause queries
BooleanClause c = (BooleanClause)clauses.get(0);
@ -372,7 +387,7 @@ public class BooleanQuery extends Query {
BooleanQuery clone = null; // recursively rewrite
for (int i = 0 ; i < clauses.size(); i++) {
BooleanClause c = (BooleanClause)clauses.get(i);
BooleanClause c = clauses.get(i);
Query query = c.getQuery().rewrite(reader);
if (query != c.getQuery()) { // clause rewrote: must clone
if (clone == null)
@ -387,20 +402,22 @@ public class BooleanQuery extends Query {
}
// inherit javadoc
@Override
public void extractTerms(Set<Term> terms) {
for (Iterator i = clauses.iterator(); i.hasNext();) {
BooleanClause clause = (BooleanClause) i.next();
for (BooleanClause clause : clauses) {
clause.getQuery().extractTerms(terms);
}
}
@Override
public Object clone() {
BooleanQuery clone = (BooleanQuery)super.clone();
clone.clauses = (ArrayList)this.clauses.clone();
clone.clauses = (ArrayList<BooleanClause>)this.clauses.clone();
return clone;
}
/** Prints a user-readable version of this query. */
@Override
public String toString(String field) {
StringBuilder buffer = new StringBuilder();
boolean needParens=(getBoost() != 1.0) || (getMinimumNumberShouldMatch()>0) ;
@ -409,7 +426,7 @@ public class BooleanQuery extends Query {
}
for (int i = 0 ; i < clauses.size(); i++) {
BooleanClause c = (BooleanClause)clauses.get(i);
BooleanClause c = clauses.get(i);
if (c.isProhibited())
buffer.append("-");
else if (c.isRequired())
@ -450,6 +467,7 @@ public class BooleanQuery extends Query {
}
/** Returns true iff <code>o</code> is equal to this. */
@Override
public boolean equals(Object o) {
if (!(o instanceof BooleanQuery))
return false;
@ -460,6 +478,7 @@ public class BooleanQuery extends Query {
}
/** Returns a hash code value for this object.*/
@Override
public int hashCode() {
return Float.floatToIntBits(getBoost()) ^ clauses.hashCode()
+ getMinimumNumberShouldMatch();

View File

@ -18,7 +18,6 @@ package org.apache.lucene.search;
*/
import java.io.IOException;
import java.util.Iterator;
import java.util.List;
import org.apache.lucene.index.IndexReader;
@ -182,13 +181,12 @@ final class BooleanScorer extends Scorer {
private int doc = -1;
BooleanScorer(Similarity similarity, int minNrShouldMatch,
List optionalScorers, List prohibitedScorers) throws IOException {
List<Scorer> optionalScorers, List<Scorer> prohibitedScorers) throws IOException {
super(similarity);
this.minNrShouldMatch = minNrShouldMatch;
if (optionalScorers != null && optionalScorers.size() > 0) {
for (Iterator si = optionalScorers.iterator(); si.hasNext();) {
Scorer scorer = (Scorer) si.next();
for (Scorer scorer : optionalScorers) {
maxCoord++;
if (scorer.nextDoc() != NO_MORE_DOCS) {
scorers = new SubScorer(scorer, false, false, bucketTable.newCollector(0), scorers);
@ -197,8 +195,7 @@ final class BooleanScorer extends Scorer {
}
if (prohibitedScorers != null && prohibitedScorers.size() > 0) {
for (Iterator si = prohibitedScorers.iterator(); si.hasNext();) {
Scorer scorer = (Scorer) si.next();
for (Scorer scorer : prohibitedScorers) {
int mask = nextMask;
nextMask = nextMask << 1;
prohibitedMask |= mask; // update prohibited mask

View File

@ -31,9 +31,9 @@ import java.util.List;
*/
class BooleanScorer2 extends Scorer {
private final List requiredScorers;
private final List optionalScorers;
private final List prohibitedScorers;
private final List<Scorer> requiredScorers;
private final List<Scorer> optionalScorers;
private final List<Scorer> prohibitedScorers;
private class Coordinator {
float[] coordFactors = null;
@ -81,7 +81,7 @@ class BooleanScorer2 extends Scorer {
* the list of optional scorers.
*/
public BooleanScorer2(Similarity similarity, int minNrShouldMatch,
List required, List prohibited, List optional) throws IOException {
List<Scorer> required, List<Scorer> prohibited, List<Scorer> optional) throws IOException {
super(similarity);
if (minNrShouldMatch < 0) {
throw new IllegalArgumentException("Minimum number of optional scorers should not be negative");
@ -138,7 +138,7 @@ class BooleanScorer2 extends Scorer {
}
}
private Scorer countingDisjunctionSumScorer(final List scorers,
private Scorer countingDisjunctionSumScorer(final List<Scorer> scorers,
int minNrShouldMatch) throws IOException {
// each scorer from the list counted as a single matcher
return new DisjunctionSumScorer(scorers, minNrShouldMatch) {
@ -162,7 +162,7 @@ class BooleanScorer2 extends Scorer {
private static final Similarity defaultSimilarity = Similarity.getDefault();
private Scorer countingConjunctionSumScorer(List requiredScorers) throws IOException {
private Scorer countingConjunctionSumScorer(List<Scorer> requiredScorers) throws IOException {
// each scorer from the list counted as a single matcher
final int requiredNrMatchers = requiredScorers.size();
return new ConjunctionScorer(defaultSimilarity, requiredScorers) {
@ -220,7 +220,7 @@ class BooleanScorer2 extends Scorer {
private Scorer makeCountingSumScorerSomeReq() throws IOException { // At least one required scorer.
if (optionalScorers.size() == minNrShouldMatch) { // all optional scorers also required.
ArrayList allReq = new ArrayList(requiredScorers);
ArrayList<Scorer> allReq = new ArrayList<Scorer>(requiredScorers);
allReq.addAll(optionalScorers);
return addProhibitedScorers(countingConjunctionSumScorer(allReq));
} else { // optionalScorers.size() > minNrShouldMatch, and at least one required scorer

View File

@ -217,7 +217,7 @@ public class DisjunctionMaxQuery extends Query implements Iterable<Query> {
@Override
public Object clone() {
DisjunctionMaxQuery clone = (DisjunctionMaxQuery)super.clone();
clone.disjuncts = (ArrayList)this.disjuncts.clone();
clone.disjuncts = (ArrayList<Query>)this.disjuncts.clone();
return clone;
}

View File

@ -228,9 +228,8 @@ public abstract class IndexInput implements Cloneable {
return clone;
}
// returns Map<String, String>
public Map readStringStringMap() throws IOException {
final Map map = new HashMap();
public Map<String,String> readStringStringMap() throws IOException {
final Map<String,String> map = new HashMap<String,String>();
final int count = readInt();
for(int i=0;i<count;i++) {
final String key = readString();