LUCENE-1369: switch from Hashtable to HashMap and from Vector to List, when possible

git-svn-id: https://svn.apache.org/repos/asf/lucene/java/trunk@692921 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Michael McCandless 2008-09-07 19:22:40 +00:00
parent 6242cb3322
commit d5a40278bc
34 changed files with 489 additions and 287 deletions

View File

@ -17,6 +17,13 @@ package org.apache.lucene.analysis.br;
* limitations under the License.
*/
import java.io.File;
import java.io.IOException;
import java.io.Reader;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.LowerCaseFilter;
import org.apache.lucene.analysis.StopFilter;
@ -24,12 +31,6 @@ import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.WordlistLoader;
import org.apache.lucene.analysis.standard.StandardFilter;
import org.apache.lucene.analysis.standard.StandardTokenizer;
import java.io.File;
import java.io.IOException;
import java.io.Reader;
import java.util.Hashtable;
import java.util.HashSet;
import java.util.Set;
/**
* Analyzer for Brazilian language. Supports an external list of stopwords (words that
@ -92,7 +93,7 @@ public final class BrazilianAnalyzer extends Analyzer {
/**
* Builds an analyzer with the given stop words.
*/
public BrazilianAnalyzer( Hashtable stopwords ) {
public BrazilianAnalyzer( Map stopwords ) {
stoptable = new HashSet(stopwords.keySet());
}
@ -112,7 +113,7 @@ public final class BrazilianAnalyzer extends Analyzer {
/**
* Builds an exclusionlist from a Hashtable.
*/
public void setStemExclusionTable( Hashtable exclusionlist ) {
public void setStemExclusionTable( Map exclusionlist ) {
excltable = new HashSet(exclusionlist.keySet());
}
/**

View File

@ -23,7 +23,6 @@ import org.apache.lucene.analysis.TokenStream;
import java.io.IOException;
import java.util.HashSet;
import java.util.Hashtable;
import java.util.Set;
/**

View File

@ -17,7 +17,8 @@ package org.apache.lucene.analysis.cn;
* limitations under the License.
*/
import java.util.Hashtable;
import java.util.HashMap;
import java.util.Map;
import org.apache.lucene.analysis.Token;
import org.apache.lucene.analysis.TokenFilter;
@ -54,12 +55,12 @@ public final class ChineseFilter extends TokenFilter {
};
private Hashtable stopTable;
private Map stopTable;
public ChineseFilter(TokenStream in) {
super(in);
stopTable = new Hashtable(STOP_WORDS.length);
stopTable = new HashMap(STOP_WORDS.length);
for (int i = 0; i < STOP_WORDS.length; i++)
stopTable.put(STOP_WORDS[i], STOP_WORDS[i]);
}

View File

@ -26,7 +26,6 @@ import org.apache.lucene.analysis.standard.StandardFilter;
import org.apache.lucene.analysis.standard.StandardTokenizer;
import java.io.*;
import java.util.Hashtable;
import java.util.HashSet;
import java.util.Set;

View File

@ -22,7 +22,7 @@ import java.io.File;
import java.io.IOException;
import java.io.Reader;
import java.util.HashSet;
import java.util.Hashtable;
import java.util.Map;
import java.util.Set;
import org.apache.lucene.analysis.Analyzer;
@ -91,7 +91,7 @@ public class GermanAnalyzer extends Analyzer {
/**
* Builds an analyzer with the given stop words.
*/
public GermanAnalyzer(Hashtable stopwords) {
public GermanAnalyzer(Map stopwords) {
stopSet = new HashSet(stopwords.keySet());
}
@ -112,7 +112,7 @@ public class GermanAnalyzer extends Analyzer {
/**
* Builds an exclusionlist from a Hashtable.
*/
public void setStemExclusionTable(Hashtable exclusionlist) {
public void setStemExclusionTable(Map exclusionlist) {
exclusionSet = new HashSet(exclusionlist.keySet());
}

View File

@ -16,6 +16,7 @@ package org.apache.lucene.analysis.el;
* limitations under the License.
*/
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.StopFilter;
import org.apache.lucene.analysis.TokenStream;
@ -23,7 +24,7 @@ import org.apache.lucene.analysis.standard.StandardTokenizer;
import java.io.Reader;
import java.util.HashSet;
import java.util.Hashtable;
import java.util.Map;
import java.util.Set;
/**
@ -200,7 +201,7 @@ public final class GreekAnalyzer extends Analyzer
/**
* Builds an analyzer with the given stop words.
*/
public GreekAnalyzer(char[] charset, Hashtable stopwords)
public GreekAnalyzer(char[] charset, Map stopwords)
{
this.charset = charset;
stopSet = new HashSet(stopwords.keySet());

View File

@ -29,7 +29,7 @@ import java.io.File;
import java.io.IOException;
import java.io.Reader;
import java.util.HashSet;
import java.util.Hashtable;
import java.util.Map;
import java.util.Set;
/**
@ -111,9 +111,9 @@ public final class FrenchAnalyzer extends Analyzer {
}
/**
* Builds an exclusionlist from a Hashtable.
* Builds an exclusionlist from a Map.
*/
public void setStemExclusionTable(Hashtable exclusionlist) {
public void setStemExclusionTable(Map exclusionlist) {
excltable = new HashSet(exclusionlist.keySet());
}

View File

@ -20,9 +20,10 @@ package org.apache.lucene.analysis.fr;
import org.apache.lucene.analysis.Token;
import org.apache.lucene.analysis.TokenFilter;
import org.apache.lucene.analysis.TokenStream;
import java.io.IOException;
import java.util.Hashtable;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
/**
@ -83,7 +84,7 @@ public final class FrenchStemFilter extends TokenFilter {
/**
* Set an alternative exclusion list for this filter.
*/
public void setExclusionTable( Hashtable exclusiontable ) {
public void setExclusionTable( Map exclusiontable ) {
exclusions = new HashSet(exclusiontable.keySet());
}
}

View File

@ -19,7 +19,7 @@ package org.apache.lucene.analysis.ru;
import java.io.Reader;
import java.util.HashSet;
import java.util.Hashtable;
import java.util.Map;
import java.util.Set;
import org.apache.lucene.analysis.Analyzer;
@ -237,7 +237,7 @@ public final class RussianAnalyzer extends Analyzer
* Builds an analyzer with the given stop words.
* @todo create a Set version of this ctor
*/
public RussianAnalyzer(char[] charset, Hashtable stopwords)
public RussianAnalyzer(char[] charset, Map stopwords)
{
this.charset = charset;
stopSet = new HashSet(stopwords.keySet());

View File

@ -57,16 +57,17 @@ package lucli;
import java.io.IOException;
import java.io.Reader;
import java.io.StringReader;
import java.util.Hashtable;
import java.util.Vector;
import java.util.TreeMap;
import java.util.Map.Entry;
import java.util.Set;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Comparator;
import java.util.Iterator;
import java.util.Enumeration;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.TreeMap;
import java.util.Map.Entry;
import jline.ConsoleReader;
@ -91,15 +92,15 @@ import org.apache.lucene.search.Searcher;
/**
* Various methods that interact with Lucene and provide info about the
* index, search, etc. Parts addapted from Lucene demo.
* index, search, etc. Parts adapted from Lucene demo.
*/
class LuceneMethods {
private int numDocs;
private String indexName; //directory of this index
private java.util.Iterator fieldIterator;
private Vector fields; //Fields as a vector
private Vector indexedFields; //Fields as a vector
private List fields; //Fields as a vector
private List indexedFields; //Fields as a vector
private String fieldsArray[]; //Fields as an array
private Searcher searcher;
private Query query; //current query string
@ -247,8 +248,8 @@ class LuceneMethods {
private void getFieldInfo() throws IOException {
IndexReader indexReader = IndexReader.open(indexName);
fields = new Vector();
indexedFields = new Vector();
fields = new ArrayList();
indexedFields = new ArrayList();
//get the list of all field names
fieldIterator = indexReader.getFieldNames(FieldOption.ALL).iterator();
@ -274,14 +275,14 @@ class LuceneMethods {
private void invertDocument(Document doc)
throws IOException {
Hashtable tokenHash = new Hashtable();
Map tokenMap = new HashMap();
final int maxFieldLength = 10000;
Analyzer analyzer = new StandardAnalyzer();
Enumeration fields = doc.fields();
Iterator fields = doc.getFields().iterator();
final Token reusableToken = new Token();
while (fields.hasMoreElements()) {
Field field = (Field) fields.nextElement();
while (fields.hasNext()) {
Field field = (Field) fields.next();
String fieldName = field.name();
@ -304,12 +305,12 @@ class LuceneMethods {
position += (nextToken.getPositionIncrement() - 1);
position++;
String name = nextToken.term();
Integer Count = (Integer) tokenHash.get(name);
Integer Count = (Integer) tokenMap.get(name);
if (Count == null) { // not in there yet
tokenHash.put(name, new Integer(1)); //first one
tokenMap.put(name, new Integer(1)); //first one
} else {
int count = Count.intValue();
tokenHash.put(name, new Integer(count + 1));
tokenMap.put(name, new Integer(count + 1));
}
if (position > maxFieldLength) break;
}
@ -320,7 +321,7 @@ class LuceneMethods {
}
}
Entry[] sortedHash = getSortedHashtableEntries(tokenHash);
Entry[] sortedHash = getSortedMapEntries(tokenMap);
for (int ii = 0; ii < sortedHash.length && ii < 10; ii++) {
Entry currentEntry = sortedHash[ii];
message((ii + 1) + ":" + currentEntry.getKey() + " " + currentEntry.getValue());
@ -353,17 +354,16 @@ class LuceneMethods {
indexReader.close();
}
/** Sort Hashtable values
* @param h the hashtable we're sorting
/** Sort Map values
* @param m the map we're sorting
* from http://developer.java.sun.com/developer/qow/archive/170/index.jsp
*/
public static Entry[]
getSortedHashtableEntries(Hashtable h) {
Set set = h.entrySet();
getSortedMapEntries(Map m) {
Set set = m.entrySet();
Entry[] entries =
(Entry[]) set.toArray(
new Entry[set.size()]);
new Entry[set.size()]);
Arrays.sort(entries, new Comparator() {
public int compare(Object o1, Object o2) {
Object v1 = ((Entry) o1).getValue();

View File

@ -28,8 +28,9 @@ import java.io.InputStreamReader;
import java.nio.ByteBuffer;
import java.nio.charset.Charset;
import java.util.ArrayList;
import java.util.Enumeration;
import java.util.Iterator;
import java.util.LinkedHashSet;
import java.util.List;
import java.util.Set;
import junit.framework.TestCase;
@ -350,7 +351,7 @@ public class MemoryIndexTest extends TestCase {
private String[] readLines(File file) throws Exception {
BufferedReader reader = new BufferedReader(new InputStreamReader(
new FileInputStream(file)));
ArrayList lines = new ArrayList();
List lines = new ArrayList();
String line;
while ((line = reader.readLine()) != null) {
String t = line.trim();
@ -373,9 +374,9 @@ public class MemoryIndexTest extends TestCase {
private MemoryIndex createMemoryIndex(Document doc) {
MemoryIndex index = new MemoryIndex();
Enumeration iter = doc.fields();
while (iter.hasMoreElements()) {
Field field = (Field) iter.nextElement();
Iterator iter = doc.getFields().iterator();
while (iter.hasNext()) {
Field field = (Field) iter.next();
index.addField(field.name(), field.stringValue(), analyzer);
}
return index;

View File

@ -252,13 +252,20 @@ public class PrecedenceQueryParser implements PrecedenceQueryParserConstants {
return locale;
}
/**
* @deprecated use {@link #addClause(List, int, int, Query)} instead.
*/
protected void addClause(Vector clauses, int conj, int modifier, Query q) {
addClause((List) clauses, conj, modifier, q);
}
protected void addClause(List clauses, int conj, int modifier, Query q) {
boolean required, prohibited;
// If this term is introduced by AND, make the preceding term required,
// unless it's already prohibited
if (clauses.size() > 0 && conj == CONJ_AND) {
BooleanClause c = (BooleanClause) clauses.elementAt(clauses.size()-1);
BooleanClause c = (BooleanClause) clauses.get(clauses.size()-1);
if (!c.isProhibited())
c.setOccur(BooleanClause.Occur.MUST);
}
@ -268,7 +275,7 @@ public class PrecedenceQueryParser implements PrecedenceQueryParserConstants {
// unless it's prohibited (that means we leave -a OR b but +a OR b-->a OR b)
// notice if the input is a OR b, first term is parsed as required; without
// this modification a OR b would parsed as +a OR b
BooleanClause c = (BooleanClause) clauses.elementAt(clauses.size()-1);
BooleanClause c = (BooleanClause) clauses.get(clauses.size()-1);
if (!c.isProhibited())
c.setOccur(BooleanClause.Occur.SHOULD);
}
@ -293,11 +300,11 @@ public class PrecedenceQueryParser implements PrecedenceQueryParserConstants {
required = (!prohibited && conj != CONJ_OR);
}
if (required && !prohibited)
clauses.addElement(new BooleanClause(q, BooleanClause.Occur.MUST));
clauses.add(new BooleanClause(q, BooleanClause.Occur.MUST));
else if (!required && !prohibited)
clauses.addElement(new BooleanClause(q, BooleanClause.Occur.SHOULD));
clauses.add(new BooleanClause(q, BooleanClause.Occur.SHOULD));
else if (!required && prohibited)
clauses.addElement(new BooleanClause(q, BooleanClause.Occur.MUST_NOT));
clauses.add(new BooleanClause(q, BooleanClause.Occur.MUST_NOT));
else
throw new RuntimeException("Clause cannot be both required and prohibited");
}
@ -310,7 +317,7 @@ public class PrecedenceQueryParser implements PrecedenceQueryParserConstants {
// PhraseQuery, or nothing based on the term count
TokenStream source = analyzer.tokenStream(field, new StringReader(queryText));
Vector v = new Vector();
List list = new ArrayList();
final org.apache.lucene.analysis.Token reusableToken = new org.apache.lucene.analysis.Token();
org.apache.lucene.analysis.Token nextToken;
int positionCount = 0;
@ -325,7 +332,7 @@ public class PrecedenceQueryParser implements PrecedenceQueryParserConstants {
}
if (nextToken == null)
break;
v.addElement(nextToken.clone());
list.add(nextToken.clone());
if (nextToken.getPositionIncrement() == 1)
positionCount++;
else
@ -338,18 +345,18 @@ public class PrecedenceQueryParser implements PrecedenceQueryParserConstants {
// ignore
}
if (v.size() == 0)
if (list.size() == 0)
return null;
else if (v.size() == 1) {
nextToken = (org.apache.lucene.analysis.Token) v.elementAt(0);
else if (list.size() == 1) {
nextToken = (org.apache.lucene.analysis.Token) list.get(0);
return new TermQuery(new Term(field, nextToken.term()));
} else {
if (severalTokensAtSamePosition) {
if (positionCount == 1) {
// no phrase query:
BooleanQuery q = new BooleanQuery();
for (int i = 0; i < v.size(); i++) {
nextToken = (org.apache.lucene.analysis.Token) v.elementAt(i);
for (int i = 0; i < list.size(); i++) {
nextToken = (org.apache.lucene.analysis.Token) list.get(i);
TermQuery currentQuery = new TermQuery(
new Term(field, nextToken.term()));
q.add(currentQuery, BooleanClause.Occur.SHOULD);
@ -360,8 +367,8 @@ public class PrecedenceQueryParser implements PrecedenceQueryParserConstants {
// phrase query:
MultiPhraseQuery mpq = new MultiPhraseQuery();
List multiTerms = new ArrayList();
for (int i = 0; i < v.size(); i++) {
nextToken = (org.apache.lucene.analysis.Token) v.elementAt(i);
for (int i = 0; i < list.size(); i++) {
nextToken = (org.apache.lucene.analysis.Token) list.get(i);
if (nextToken.getPositionIncrement() == 1 && multiTerms.size() > 0) {
mpq.add((Term[])multiTerms.toArray(new Term[0]));
multiTerms.clear();
@ -375,10 +382,9 @@ public class PrecedenceQueryParser implements PrecedenceQueryParserConstants {
else {
PhraseQuery q = new PhraseQuery();
q.setSlop(phraseSlop);
for (int i = 0; i < v.size(); i++) {
for (int i = 0; i < list.size(); i++) {
q.add(new Term(field, ((org.apache.lucene.analysis.Token)
v.elementAt(i)).term()));
list.get(i)).term()));
}
return q;
}
@ -440,13 +446,32 @@ public class PrecedenceQueryParser implements PrecedenceQueryParserConstants {
* Can be overridden by extending classes, to modify query being
* returned.
*
* @param clauses Vector that contains {@link BooleanClause} instances
* @param clauses List that contains {@link BooleanClause} instances
* to join.
*
* @return Resulting {@link Query} object.
* @exception ParseException throw in overridden method to disallow
* @deprecated use {@link #getBooleanQuery(List)} instead
*/
protected Query getBooleanQuery(Vector clauses) throws ParseException
{
return getBooleanQuery((List) clauses, false);
}
/**
* Factory method for generating query, given a set of clauses.
* By default creates a boolean query composed of clauses passed in.
*
* Can be overridden by extending classes, to modify query being
* returned.
*
* @param clauses List that contains {@link BooleanClause} instances
* to join.
*
* @return Resulting {@link Query} object.
* @exception ParseException throw in overridden method to disallow
*/
protected Query getBooleanQuery(Vector clauses) throws ParseException
protected Query getBooleanQuery(List clauses) throws ParseException
{
return getBooleanQuery(clauses, false);
}
@ -458,22 +483,42 @@ public class PrecedenceQueryParser implements PrecedenceQueryParserConstants {
* Can be overridden by extending classes, to modify query being
* returned.
*
* @param clauses Vector that contains {@link BooleanClause} instances
* @param clauses List that contains {@link BooleanClause} instances
* to join.
* @param disableCoord true if coord scoring should be disabled.
*
* @return Resulting {@link Query} object.
* @exception ParseException throw in overridden method to disallow
* @deprecated use {@link #getBooleanQuery(List, boolean)} instead
*/
protected Query getBooleanQuery(Vector clauses, boolean disableCoord)
throws ParseException
{
return getBooleanQuery((List) clauses, disableCoord);
}
/**
* Factory method for generating query, given a set of clauses.
* By default creates a boolean query composed of clauses passed in.
*
* Can be overridden by extending classes, to modify query being
* returned.
*
* @param clauses List that contains {@link BooleanClause} instances
* to join.
* @param disableCoord true if coord scoring should be disabled.
*
* @return Resulting {@link Query} object.
* @exception ParseException throw in overridden method to disallow
*/
protected Query getBooleanQuery(Vector clauses, boolean disableCoord)
throws ParseException
{
protected Query getBooleanQuery(List clauses, boolean disableCoord)
throws ParseException {
if (clauses == null || clauses.size() == 0)
return null;
BooleanQuery query = new BooleanQuery(disableCoord);
for (int i = 0; i < clauses.size(); i++) {
query.add((BooleanClause)clauses.elementAt(i));
query.add((BooleanClause)clauses.get(i));
}
return query;
}
@ -675,7 +720,7 @@ public class PrecedenceQueryParser implements PrecedenceQueryParserConstants {
}
final public Query Query(String field) throws ParseException {
Vector clauses = new Vector();
List clauses = new ArrayList();
Query q, firstQuery=null;
boolean orPresent = false;
int modifier;
@ -727,7 +772,7 @@ public class PrecedenceQueryParser implements PrecedenceQueryParserConstants {
}
final public Query andExpression(String field) throws ParseException {
Vector clauses = new Vector();
List clauses = new ArrayList();
Query q, firstQuery=null;
int modifier;
q = Clause(field);

View File

@ -166,28 +166,28 @@ public class PrecedenceQueryParser {
throw new ParseException("Too many boolean clauses");
}
}
/**
* @return Returns the analyzer.
*/
public Analyzer getAnalyzer() {
return analyzer;
}
/**
* @return Returns the field.
*/
public String getField() {
return field;
}
/**
* Get the minimal similarity for fuzzy queries.
*/
public float getFuzzyMinSim() {
return fuzzyMinSim;
}
/**
* Set the minimum similarity for fuzzy queries.
* Default is 0.5f.
@ -195,7 +195,7 @@ public class PrecedenceQueryParser {
public void setFuzzyMinSim(float fuzzyMinSim) {
this.fuzzyMinSim = fuzzyMinSim;
}
/**
* Get the prefix length for fuzzy queries.
* @return Returns the fuzzyPrefixLength.
@ -203,7 +203,7 @@ public class PrecedenceQueryParser {
public int getFuzzyPrefixLength() {
return fuzzyPrefixLength;
}
/**
* Set the prefix length for fuzzy queries. Default is 0.
* @param fuzzyPrefixLength The fuzzyPrefixLength to set.
@ -276,13 +276,20 @@ public class PrecedenceQueryParser {
return locale;
}
/**
* @deprecated use {@link #addClause(List, int, int, Query)} instead.
*/
protected void addClause(Vector clauses, int conj, int modifier, Query q) {
addClause((List) clauses, conj, modifier, q);
}
protected void addClause(List clauses, int conj, int modifier, Query q) {
boolean required, prohibited;
// If this term is introduced by AND, make the preceding term required,
// unless it's already prohibited
if (clauses.size() > 0 && conj == CONJ_AND) {
BooleanClause c = (BooleanClause) clauses.elementAt(clauses.size()-1);
BooleanClause c = (BooleanClause) clauses.get(clauses.size()-1);
if (!c.isProhibited())
c.setOccur(BooleanClause.Occur.MUST);
}
@ -292,7 +299,7 @@ public class PrecedenceQueryParser {
// unless it's prohibited (that means we leave -a OR b but +a OR b-->a OR b)
// notice if the input is a OR b, first term is parsed as required; without
// this modification a OR b would parsed as +a OR b
BooleanClause c = (BooleanClause) clauses.elementAt(clauses.size()-1);
BooleanClause c = (BooleanClause) clauses.get(clauses.size()-1);
if (!c.isProhibited())
c.setOccur(BooleanClause.Occur.SHOULD);
}
@ -317,15 +324,15 @@ public class PrecedenceQueryParser {
required = (!prohibited && conj != CONJ_OR);
}
if (required && !prohibited)
clauses.addElement(new BooleanClause(q, BooleanClause.Occur.MUST));
clauses.add(new BooleanClause(q, BooleanClause.Occur.MUST));
else if (!required && !prohibited)
clauses.addElement(new BooleanClause(q, BooleanClause.Occur.SHOULD));
clauses.add(new BooleanClause(q, BooleanClause.Occur.SHOULD));
else if (!required && prohibited)
clauses.addElement(new BooleanClause(q, BooleanClause.Occur.MUST_NOT));
clauses.add(new BooleanClause(q, BooleanClause.Occur.MUST_NOT));
else
throw new RuntimeException("Clause cannot be both required and prohibited");
}
/**
* @exception ParseException throw in overridden method to disallow
*/
@ -334,7 +341,7 @@ public class PrecedenceQueryParser {
// PhraseQuery, or nothing based on the term count
TokenStream source = analyzer.tokenStream(field, new StringReader(queryText));
Vector v = new Vector();
List list = new ArrayList();
final org.apache.lucene.analysis.Token reusableToken = new org.apache.lucene.analysis.Token();
org.apache.lucene.analysis.Token nextToken;
int positionCount = 0;
@ -349,7 +356,7 @@ public class PrecedenceQueryParser {
}
if (nextToken == null)
break;
v.addElement(nextToken.clone());
list.add(nextToken.clone());
if (nextToken.getPositionIncrement() == 1)
positionCount++;
else
@ -362,18 +369,18 @@ public class PrecedenceQueryParser {
// ignore
}
if (v.size() == 0)
if (list.size() == 0)
return null;
else if (v.size() == 1) {
nextToken = (org.apache.lucene.analysis.Token) v.elementAt(0);
else if (list.size() == 1) {
nextToken = (org.apache.lucene.analysis.Token) list.get(0);
return new TermQuery(new Term(field, nextToken.term()));
} else {
if (severalTokensAtSamePosition) {
if (positionCount == 1) {
// no phrase query:
BooleanQuery q = new BooleanQuery();
for (int i = 0; i < v.size(); i++) {
nextToken = (org.apache.lucene.analysis.Token) v.elementAt(i);
for (int i = 0; i < list.size(); i++) {
nextToken = (org.apache.lucene.analysis.Token) list.get(i);
TermQuery currentQuery = new TermQuery(
new Term(field, nextToken.term()));
q.add(currentQuery, BooleanClause.Occur.SHOULD);
@ -384,8 +391,8 @@ public class PrecedenceQueryParser {
// phrase query:
MultiPhraseQuery mpq = new MultiPhraseQuery();
List multiTerms = new ArrayList();
for (int i = 0; i < v.size(); i++) {
nextToken = (org.apache.lucene.analysis.Token) v.elementAt(i);
for (int i = 0; i < list.size(); i++) {
nextToken = (org.apache.lucene.analysis.Token) list.get(i);
if (nextToken.getPositionIncrement() == 1 && multiTerms.size() > 0) {
mpq.add((Term[])multiTerms.toArray(new Term[0]));
multiTerms.clear();
@ -399,16 +406,15 @@ public class PrecedenceQueryParser {
else {
PhraseQuery q = new PhraseQuery();
q.setSlop(phraseSlop);
for (int i = 0; i < v.size(); i++) {
q.add(new Term(field, ((org.apache.lucene.analysis.Token)
v.elementAt(i)).term()));
for (int i = 0; i < list.size(); i++) {
q.add(new Term(field, ((org.apache.lucene.analysis.Token)
list.get(i)).term()));
}
return q;
}
}
}
/**
* Base implementation delegates to {@link #getFieldQuery(String,String)}.
* This method may be overridden, for example, to return
@ -416,8 +422,8 @@ public class PrecedenceQueryParser {
*
* @exception ParseException throw in overridden method to disallow
*/
protected Query getFieldQuery(String field, String queryText, int slop)
throws ParseException {
protected Query getFieldQuery(String field, String queryText, int slop)
throws ParseException {
Query query = getFieldQuery(field, queryText);
if (query instanceof PhraseQuery) {
@ -429,7 +435,7 @@ public class PrecedenceQueryParser {
return query;
}
/**
* @exception ParseException throw in overridden method to disallow
*/
@ -464,13 +470,32 @@ public class PrecedenceQueryParser {
* Can be overridden by extending classes, to modify query being
* returned.
*
* @param clauses Vector that contains {@link BooleanClause} instances
* @param clauses List that contains {@link BooleanClause} instances
* to join.
*
* @return Resulting {@link Query} object.
* @exception ParseException throw in overridden method to disallow
* @deprecated use {@link #getBooleanQuery(List)} instead
*/
protected Query getBooleanQuery(Vector clauses) throws ParseException
{
return getBooleanQuery((List) clauses, false);
}
/**
* Factory method for generating query, given a set of clauses.
* By default creates a boolean query composed of clauses passed in.
*
* Can be overridden by extending classes, to modify query being
* returned.
*
* @param clauses List that contains {@link BooleanClause} instances
* to join.
*
* @return Resulting {@link Query} object.
* @exception ParseException throw in overridden method to disallow
*/
protected Query getBooleanQuery(Vector clauses) throws ParseException
protected Query getBooleanQuery(List clauses) throws ParseException
{
return getBooleanQuery(clauses, false);
}
@ -482,22 +507,42 @@ public class PrecedenceQueryParser {
* Can be overridden by extending classes, to modify query being
* returned.
*
* @param clauses Vector that contains {@link BooleanClause} instances
* @param clauses List that contains {@link BooleanClause} instances
* to join.
* @param disableCoord true if coord scoring should be disabled.
*
* @return Resulting {@link Query} object.
* @exception ParseException throw in overridden method to disallow
* @deprecated use {@link #getBooleanQuery(List, boolean)} instead
*/
protected Query getBooleanQuery(Vector clauses, boolean disableCoord)
throws ParseException
{
return getBooleanQuery((List) clauses, disableCoord);
}
/**
* Factory method for generating query, given a set of clauses.
* By default creates a boolean query composed of clauses passed in.
*
* Can be overridden by extending classes, to modify query being
* returned.
*
* @param clauses List that contains {@link BooleanClause} instances
* to join.
* @param disableCoord true if coord scoring should be disabled.
*
* @return Resulting {@link Query} object.
* @exception ParseException throw in overridden method to disallow
*/
protected Query getBooleanQuery(Vector clauses, boolean disableCoord)
throws ParseException
{
protected Query getBooleanQuery(List clauses, boolean disableCoord)
throws ParseException {
if (clauses == null || clauses.size() == 0)
return null;
BooleanQuery query = new BooleanQuery(disableCoord);
for (int i = 0; i < clauses.size(); i++) {
query.add((BooleanClause)clauses.elementAt(i));
query.add((BooleanClause)clauses.get(i));
}
return query;
}
@ -563,7 +608,7 @@ public class PrecedenceQueryParser {
Term t = new Term(field, termStr);
return new PrefixQuery(t);
}
/**
* Factory method for generating a query (similar to
* {@link #getWildcardQuery}). Called when parser parses
@ -733,7 +778,7 @@ int Modifier() : {
Query Query(String field) :
{
Vector clauses = new Vector();
List clauses = new ArrayList();
Query q, firstQuery=null;
boolean orPresent = false;
int modifier;
@ -760,7 +805,7 @@ Query Query(String field) :
Query andExpression(String field) :
{
Vector clauses = new Vector();
List clauses = new ArrayList();
Query q, firstQuery=null;
int modifier;
}

View File

@ -48,7 +48,7 @@ public abstract class ComposedQuery extends SrndQuery {
public boolean isOperatorInfix() { return operatorInfix; } /* else prefix operator */
public List makeLuceneSubQueriesField(String fn, BasicQueryFactory qf) {
ArrayList luceneSubQueries = new ArrayList();
List luceneSubQueries = new ArrayList();
Iterator sqi = getSubQueriesIterator();
while (sqi.hasNext()) {
luceneSubQueries.add( ((SrndQuery) sqi.next()).makeLuceneQueryField(fn, qf));

View File

@ -24,11 +24,11 @@ import org.apache.lucene.search.Query;
public class FieldsQuery extends SrndQuery { /* mostly untested */
private SrndQuery q;
private ArrayList fieldNames;
private List fieldNames;
private final char fieldOp;
private final String OrOperatorName = "OR"; /* for expanded queries, not normally visible */
public FieldsQuery(SrndQuery q, ArrayList fieldNames, char fieldOp) {
public FieldsQuery(SrndQuery q, List fieldNames, char fieldOp) {
this.q = q;
this.fieldNames = fieldNames;
this.fieldOp = fieldOp;
@ -49,7 +49,7 @@ public class FieldsQuery extends SrndQuery { /* mostly untested */
if (fieldNames.size() == 1) { /* single field name: no new queries needed */
return q.makeLuceneQueryFieldNoBoost((String) fieldNames.get(0), qf);
} else { /* OR query over the fields */
ArrayList queries = new ArrayList();
List queries = new ArrayList();
Iterator fni = getFieldNames().listIterator();
SrndQuery qc;
while (fni.hasNext()) {

View File

@ -16,12 +16,14 @@ package org.apache.lucene.queryParser.surround.query;
* limitations under the License.
*/
import java.util.ArrayList;
import java.io.IOException;
import org.apache.lucene.index.Term;
import java.util.ArrayList;
import java.util.List;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.search.Query;
import org.apache.lucene.index.Term;
import org.apache.lucene.search.BooleanClause;
import org.apache.lucene.search.Query;
public abstract class SimpleTerm
extends SrndQuery
@ -78,7 +80,7 @@ public abstract class SimpleTerm
}
public Query rewrite(IndexReader reader) throws IOException {
final ArrayList luceneSubQueries = new ArrayList();
final List luceneSubQueries = new ArrayList();
visitMatchingTerms( reader, fieldName,
new MatchingTermVisitor() {
public void visitMatchingTerm(Term term) throws IOException {

View File

@ -16,15 +16,17 @@ package org.apache.lucene.swing.models;
* limitations under the License.
*/
import javax.swing.*;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.List;
import javax.swing.AbstractListModel;
/**
* @author Jonathan Simon - jonathan_s_simon@yahoo.com
*/
public class BaseListModel extends AbstractListModel {
private ArrayList data = new ArrayList();
private List data = new ArrayList();
public BaseListModel(Iterator iterator) {
while (iterator.hasNext()) {

View File

@ -16,16 +16,18 @@ package org.apache.lucene.swing.models;
* limitations under the License.
*/
import javax.swing.table.AbstractTableModel;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.List;
import javax.swing.table.AbstractTableModel;
/**
* @author Jonathan Simon - jonathan_s_simon@yahoo.com
*/
public class BaseTableModel extends AbstractTableModel {
private ArrayList columnNames = new ArrayList();
private ArrayList rows = new ArrayList();
private List columnNames = new ArrayList();
private List rows = new ArrayList();
public BaseTableModel(Iterator data) {
columnNames.add("Name");

View File

@ -17,6 +17,7 @@ package org.apache.lucene.swing.models;
*/
import java.util.ArrayList;
import java.util.List;
import javax.swing.ListModel;
@ -28,7 +29,7 @@ import junit.framework.TestCase;
public class TestBasicList extends TestCase {
private ListModel baseListModel;
private ListSearcher listSearcher;
private ArrayList list;
private List list;
protected void setUp() throws Exception {
list = new ArrayList();

View File

@ -17,6 +17,7 @@ package org.apache.lucene.swing.models;
*/
import java.util.ArrayList;
import java.util.List;
import javax.swing.table.TableModel;
@ -28,7 +29,7 @@ import junit.framework.TestCase;
public class TestBasicTable extends TestCase {
private TableModel baseTableModel;
private TableSearcher tableSearcher;
private ArrayList list;
private List list;
protected void setUp() throws Exception {
list = new ArrayList();

View File

@ -17,10 +17,11 @@ package org.apache.lucene.demo.html;
* limitations under the License.
*/
import java.util.*;
import java.util.HashMap;
import java.util.Map;
public class Entities {
static final Hashtable decoder = new Hashtable(300);
static final Map decoder = new HashMap(300);
static final String[] encoder = new String[0x100];
static final String decode(String entity) {

View File

@ -2952,7 +2952,7 @@ public class IndexWriter {
final SegmentInfo info = sis.info(j);
docCount += info.docCount;
assert !segmentInfos.contains(info);
segmentInfos.addElement(info); // add each info
segmentInfos.add(info); // add each info
}
}
}
@ -3077,7 +3077,7 @@ public class IndexWriter {
SegmentInfo info = sis.info(j);
assert !segmentInfos.contains(info): "dup info dir=" + info.dir + " name=" + info.name;
docCount += info.docCount;
segmentInfos.addElement(info); // add each info
segmentInfos.add(info); // add each info
}
}
}
@ -3287,10 +3287,10 @@ public class IndexWriter {
}
synchronized(this) {
segmentInfos.setSize(0); // pop old infos & add new
segmentInfos.clear(); // pop old infos & add new
info = new SegmentInfo(mergedName, docCount, directory, false, true,
-1, null, false, merger.hasProx());
segmentInfos.addElement(info);
segmentInfos.add(info);
}
// Notify DocumentsWriter that the flushed count just increased
@ -3650,7 +3650,7 @@ public class IndexWriter {
docWriter.pushDeletes();
if (flushDocs)
segmentInfos.addElement(newSegment);
segmentInfos.add(newSegment);
if (flushDeletes) {
flushDeletesCount++;

View File

@ -17,13 +17,14 @@ package org.apache.lucene.index;
* limitations under the License.
*/
import org.apache.lucene.document.Document;
import org.apache.lucene.document.FieldSelector;
import java.io.IOException;
import java.util.Collection;
import java.util.Hashtable;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.FieldSelector;
import org.apache.lucene.index.MultiSegmentReader.MultiTermDocs;
import org.apache.lucene.index.MultiSegmentReader.MultiTermEnum;
import org.apache.lucene.index.MultiSegmentReader.MultiTermPositions;
@ -36,7 +37,7 @@ public class MultiReader extends IndexReader {
protected IndexReader[] subReaders;
private int[] starts; // 1st docno for each segment
private boolean[] decrefOnClose; // remember which subreaders to decRef on close
private Hashtable normsCache = new Hashtable();
private Map normsCache = new HashMap();
private int maxDoc = 0;
private int numDocs = -1;
private boolean hasDeletions = false;
@ -288,7 +289,9 @@ public class MultiReader extends IndexReader {
protected void doSetNorm(int n, String field, byte value)
throws CorruptIndexException, IOException {
normsCache.remove(field); // clear cache
synchronized (normsCache) {
normsCache.remove(field); // clear cache
}
int i = readerIndex(n); // find segment num
subReaders[i].setNorm(n-starts[i], field, value); // dispatch
}

View File

@ -17,26 +17,26 @@ package org.apache.lucene.index;
* limitations under the License.
*/
import org.apache.lucene.document.Document;
import org.apache.lucene.document.FieldSelector;
import org.apache.lucene.store.Directory;
import java.io.IOException;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Hashtable;
import java.util.Iterator;
import java.util.Map;
import java.util.Set;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.FieldSelector;
import org.apache.lucene.store.Directory;
/**
* An IndexReader which reads indexes with multiple segments.
*/
class MultiSegmentReader extends DirectoryIndexReader {
protected SegmentReader[] subReaders;
private int[] starts; // 1st docno for each segment
private Hashtable normsCache = new Hashtable();
private Map normsCache = new HashMap();
private int maxDoc = 0;
private int numDocs = -1;
private boolean hasDeletions = false;
@ -149,17 +149,18 @@ class MultiSegmentReader extends DirectoryIndexReader {
// try to copy unchanged norms from the old normsCache to the new one
if (oldNormsCache != null) {
Iterator it = oldNormsCache.keySet().iterator();
Iterator it = oldNormsCache.entrySet().iterator();
while (it.hasNext()) {
String field = (String) it.next();
Map.Entry entry = (Map.Entry) it.next();
String field = (String) entry.getKey();
if (!hasNorms(field)) {
continue;
}
byte[] oldBytes = (byte[]) oldNormsCache.get(field);
byte[] oldBytes = (byte[]) entry.getValue();
byte[] bytes = new byte[maxDoc()];
for (int i = 0; i < subReaders.length; i++) {
Integer oldReaderIndex = ((Integer) segmentReaders.get(subReaders[i].getSegmentName()));
@ -175,7 +176,7 @@ class MultiSegmentReader extends DirectoryIndexReader {
subReaders[i].norms(field, bytes, starts[i]);
}
}
normsCache.put(field, bytes); // update cache
}
}
@ -353,7 +354,9 @@ class MultiSegmentReader extends DirectoryIndexReader {
protected void doSetNorm(int n, String field, byte value)
throws CorruptIndexException, IOException {
normsCache.remove(field); // clear cache
synchronized (normsCache) {
normsCache.remove(field); // clear cache
}
int i = readerIndex(n); // find segment num
subReaders[i].setNorm(n-starts[i], field, value); // dispatch
}

View File

@ -91,7 +91,7 @@ final class SegmentInfos extends Vector {
private static PrintStream infoStream;
public final SegmentInfo info(int i) {
return (SegmentInfo) elementAt(i);
return (SegmentInfo) get(i);
}
/**
@ -231,7 +231,7 @@ final class SegmentInfos extends Vector {
}
for (int i = input.readInt(); i > 0; i--) { // read segmentInfos
addElement(new SegmentInfo(directory, format, input));
add(new SegmentInfo(directory, format, input));
}
if(format >= 0){ // in old format the version number may be at the end of the file
@ -337,7 +337,7 @@ final class SegmentInfos extends Vector {
public Object clone() {
SegmentInfos sis = (SegmentInfos) super.clone();
for(int i=0;i<sis.size();i++) {
sis.setElementAt(sis.info(i).clone(), i);
sis.set(i, sis.info(i).clone());
}
return sis;
}

View File

@ -17,17 +17,18 @@ package org.apache.lucene.index;
* limitations under the License.
*/
import java.util.Vector;
import java.util.Iterator;
import java.util.Collection;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Iterator;
import java.util.List;
import org.apache.lucene.document.FieldSelector;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.FieldSelector;
import org.apache.lucene.document.FieldSelectorResult;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.IndexOutput;
import org.apache.lucene.store.IndexInput;
import org.apache.lucene.store.IndexOutput;
/**
* The SegmentMerger class combines two or more Segments, represented by an IndexReader ({@link #add},
@ -49,7 +50,7 @@ final class SegmentMerger {
private String segment;
private int termIndexInterval = IndexWriter.DEFAULT_TERM_INDEX_INTERVAL;
private Vector readers = new Vector();
private List readers = new ArrayList();
private FieldInfos fieldInfos;
private int mergedDocs;
@ -93,7 +94,7 @@ final class SegmentMerger {
* @param reader
*/
final void add(IndexReader reader) {
readers.addElement(reader);
readers.add(reader);
}
/**
@ -102,7 +103,7 @@ final class SegmentMerger {
* @return The ith reader to be merged
*/
final IndexReader segmentReader(int i) {
return (IndexReader) readers.elementAt(i);
return (IndexReader) readers.get(i);
}
/**
@ -152,18 +153,18 @@ final class SegmentMerger {
*/
final void closeReaders() throws IOException {
for (int i = 0; i < readers.size(); i++) { // close readers
IndexReader reader = (IndexReader) readers.elementAt(i);
IndexReader reader = (IndexReader) readers.get(i);
reader.close();
}
}
final Vector createCompoundFile(String fileName)
final List createCompoundFile(String fileName)
throws IOException {
CompoundFileWriter cfsWriter =
new CompoundFileWriter(directory, fileName, checkAbort);
Vector files =
new Vector(IndexFileNames.COMPOUND_EXTENSIONS.length + 1);
List files =
new ArrayList(IndexFileNames.COMPOUND_EXTENSIONS.length + 1);
// Basic files
for (int i = 0; i < IndexFileNames.COMPOUND_EXTENSIONS.length; i++) {
@ -229,7 +230,7 @@ final class SegmentMerger {
// FieldInfos, then we can do a bulk copy of the
// stored fields:
for (int i = 0; i < readers.size(); i++) {
IndexReader reader = (IndexReader) readers.elementAt(i);
IndexReader reader = (IndexReader) readers.get(i);
if (reader instanceof SegmentReader) {
SegmentReader segmentReader = (SegmentReader) reader;
boolean same = true;
@ -261,14 +262,14 @@ final class SegmentMerger {
// name -> number mapping are the same. So, we start
// with the fieldInfos of the last segment in this
// case, to keep that numbering.
final SegmentReader sr = (SegmentReader) readers.elementAt(readers.size()-1);
final SegmentReader sr = (SegmentReader) readers.get(readers.size()-1);
fieldInfos = (FieldInfos) sr.fieldInfos.clone();
} else {
fieldInfos = new FieldInfos(); // merge field names
}
for (int i = 0; i < readers.size(); i++) {
IndexReader reader = (IndexReader) readers.elementAt(i);
IndexReader reader = (IndexReader) readers.get(i);
if (reader instanceof SegmentReader) {
SegmentReader segmentReader = (SegmentReader) reader;
for (int j = 0; j < segmentReader.getFieldInfos().size(); j++) {
@ -307,7 +308,7 @@ final class SegmentMerger {
try {
for (int i = 0; i < readers.size(); i++) {
final IndexReader reader = (IndexReader) readers.elementAt(i);
final IndexReader reader = (IndexReader) readers.get(i);
final SegmentReader matchingSegmentReader = matchingSegmentReaders[i];
final FieldsReader matchingFieldsReader;
final boolean hasMatchingReader;
@ -385,7 +386,7 @@ final class SegmentMerger {
// are no deletions in any of these segments, so we
// just sum numDocs() of each segment to get total docCount
for (int i = 0; i < readers.size(); i++)
docCount += ((IndexReader) readers.elementAt(i)).numDocs();
docCount += ((IndexReader) readers.get(i)).numDocs();
return docCount;
}
@ -418,7 +419,7 @@ final class SegmentMerger {
hasMatchingReader = false;
matchingVectorsReader = null;
}
IndexReader reader = (IndexReader) readers.elementAt(r);
IndexReader reader = (IndexReader) readers.get(r);
final boolean hasDeletions = reader.hasDeletions();
int maxDoc = reader.maxDoc();
for (int docNum = 0; docNum < maxDoc;) {
@ -510,7 +511,7 @@ final class SegmentMerger {
int base = 0;
final int readerCount = readers.size();
for (int i = 0; i < readerCount; i++) {
IndexReader reader = (IndexReader) readers.elementAt(i);
IndexReader reader = (IndexReader) readers.get(i);
TermEnum termEnum = reader.terms();
SegmentMergeInfo smi = new SegmentMergeInfo(base, termEnum, reader);
int[] docMap = smi.getDocMap();
@ -750,7 +751,7 @@ final class SegmentMerger {
output.writeBytes(NORMS_HEADER,NORMS_HEADER.length);
}
for (int j = 0; j < readers.size(); j++) {
IndexReader reader = (IndexReader) readers.elementAt(j);
IndexReader reader = (IndexReader) readers.get(j);
int maxDoc = reader.maxDoc();
if (normBuffer == null || normBuffer.length < maxDoc) {
// the buffer is too small for the current segment

View File

@ -18,14 +18,15 @@ package org.apache.lucene.index;
*/
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.Vector;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.FieldSelector;
@ -695,8 +696,8 @@ class SegmentReader extends DirectoryIndexReader {
undeleteAll = true;
}
Vector files() throws IOException {
return new Vector(si.files());
List files() throws IOException {
return new ArrayList(si.files());
}
public TermEnum terms() {

View File

@ -17,6 +17,11 @@ package org.apache.lucene.queryParser;
* limitations under the License.
*/
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.Vector;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.search.BooleanClause;
import org.apache.lucene.search.BooleanQuery;
@ -24,9 +29,6 @@ import org.apache.lucene.search.MultiPhraseQuery;
import org.apache.lucene.search.PhraseQuery;
import org.apache.lucene.search.Query;
import java.util.Vector;
import java.util.Map;
/**
* A QueryParser which constructs queries to search multiple fields.
*
@ -97,7 +99,7 @@ public class MultiFieldQueryParser extends QueryParser
protected Query getFieldQuery(String field, String queryText, int slop) throws ParseException {
if (field == null) {
Vector clauses = new Vector();
List clauses = new ArrayList();
for (int i = 0; i < fields.length; i++) {
Query q = super.getFieldQuery(fields[i], queryText);
if (q != null) {
@ -139,7 +141,7 @@ public class MultiFieldQueryParser extends QueryParser
protected Query getFuzzyQuery(String field, String termStr, float minSimilarity) throws ParseException
{
if (field == null) {
Vector clauses = new Vector();
List clauses = new ArrayList();
for (int i = 0; i < fields.length; i++) {
clauses.add(new BooleanClause(getFuzzyQuery(fields[i], termStr, minSimilarity),
BooleanClause.Occur.SHOULD));
@ -152,7 +154,7 @@ public class MultiFieldQueryParser extends QueryParser
protected Query getPrefixQuery(String field, String termStr) throws ParseException
{
if (field == null) {
Vector clauses = new Vector();
List clauses = new ArrayList();
for (int i = 0; i < fields.length; i++) {
clauses.add(new BooleanClause(getPrefixQuery(fields[i], termStr),
BooleanClause.Occur.SHOULD));
@ -164,7 +166,7 @@ public class MultiFieldQueryParser extends QueryParser
protected Query getWildcardQuery(String field, String termStr) throws ParseException {
if (field == null) {
Vector clauses = new Vector();
List clauses = new ArrayList();
for (int i = 0; i < fields.length; i++) {
clauses.add(new BooleanClause(getWildcardQuery(fields[i], termStr),
BooleanClause.Occur.SHOULD));
@ -177,7 +179,7 @@ public class MultiFieldQueryParser extends QueryParser
protected Query getRangeQuery(String field, String part1, String part2, boolean inclusive) throws ParseException {
if (field == null) {
Vector clauses = new Vector();
List clauses = new ArrayList();
for (int i = 0; i < fields.length; i++) {
clauses.add(new BooleanClause(getRangeQuery(fields[i], part1, part2, inclusive),
BooleanClause.Occur.SHOULD));

View File

@ -164,7 +164,7 @@ public class QueryParser implements QueryParserConstants {
public Query parse(String query) throws ParseException {
ReInit(new FastCharStream(new StringReader(query)));
try {
// TopLevelQuery is a Query followed by the end-of-input (EOF)
// TopLevelQuery is a Query followed by the end-of-input (EOF)
Query res = TopLevelQuery(field);
return res!=null ? res : newBooleanQuery(false);
}
@ -342,7 +342,6 @@ public class QueryParser implements QueryParserConstants {
return useOldRangeQuery;
}
/**
* Set locale used by date range parsing.
*/
@ -412,13 +411,20 @@ public class QueryParser implements QueryParserConstants {
return resolution;
}
/**
* @deprecated use {@link #addClause(List, int, int, Query)} instead.
*/
protected void addClause(Vector clauses, int conj, int mods, Query q) {
addClause((List) clauses, conj, mods, q);
}
protected void addClause(List clauses, int conj, int mods, Query q) {
boolean required, prohibited;
// If this term is introduced by AND, make the preceding term required,
// unless it's already prohibited
if (clauses.size() > 0 && conj == CONJ_AND) {
BooleanClause c = (BooleanClause) clauses.elementAt(clauses.size()-1);
BooleanClause c = (BooleanClause) clauses.get(clauses.size()-1);
if (!c.isProhibited())
c.setOccur(BooleanClause.Occur.MUST);
}
@ -428,7 +434,7 @@ public class QueryParser implements QueryParserConstants {
// unless it's prohibited (that means we leave -a OR b but +a OR b-->a OR b)
// notice if the input is a OR b, first term is parsed as required; without
// this modification a OR b would parsed as +a OR b
BooleanClause c = (BooleanClause) clauses.elementAt(clauses.size()-1);
BooleanClause c = (BooleanClause) clauses.get(clauses.size()-1);
if (!c.isProhibited())
c.setOccur(BooleanClause.Occur.SHOULD);
}
@ -453,11 +459,11 @@ public class QueryParser implements QueryParserConstants {
required = (!prohibited && conj != CONJ_OR);
}
if (required && !prohibited)
clauses.addElement(newBooleanClause(q, BooleanClause.Occur.MUST));
clauses.add(newBooleanClause(q, BooleanClause.Occur.MUST));
else if (!required && !prohibited)
clauses.addElement(newBooleanClause(q, BooleanClause.Occur.SHOULD));
clauses.add(newBooleanClause(q, BooleanClause.Occur.SHOULD));
else if (!required && prohibited)
clauses.addElement(newBooleanClause(q, BooleanClause.Occur.MUST_NOT));
clauses.add(newBooleanClause(q, BooleanClause.Occur.MUST_NOT));
else
throw new RuntimeException("Clause cannot be both required and prohibited");
}
@ -471,7 +477,7 @@ public class QueryParser implements QueryParserConstants {
// PhraseQuery, or nothing based on the term count
TokenStream source = analyzer.tokenStream(field, new StringReader(queryText));
Vector v = new Vector();
List list = new ArrayList();
final org.apache.lucene.analysis.Token reusableToken = new org.apache.lucene.analysis.Token();
org.apache.lucene.analysis.Token nextToken;
int positionCount = 0;
@ -486,7 +492,7 @@ public class QueryParser implements QueryParserConstants {
}
if (nextToken == null)
break;
v.addElement(nextToken.clone());
list.add(nextToken.clone());
if (nextToken.getPositionIncrement() != 0)
positionCount += nextToken.getPositionIncrement();
else
@ -499,18 +505,18 @@ public class QueryParser implements QueryParserConstants {
// ignore
}
if (v.size() == 0)
if (list.size() == 0)
return null;
else if (v.size() == 1) {
nextToken = (org.apache.lucene.analysis.Token) v.elementAt(0);
else if (list.size() == 1) {
nextToken = (org.apache.lucene.analysis.Token) list.get(0);
return newTermQuery(new Term(field, nextToken.term()));
} else {
if (severalTokensAtSamePosition) {
if (positionCount == 1) {
// no phrase query:
BooleanQuery q = newBooleanQuery(true);
for (int i = 0; i < v.size(); i++) {
nextToken = (org.apache.lucene.analysis.Token) v.elementAt(i);
for (int i = 0; i < list.size(); i++) {
nextToken = (org.apache.lucene.analysis.Token) list.get(i);
Query currentQuery = newTermQuery(
new Term(field, nextToken.term()));
q.add(currentQuery, BooleanClause.Occur.SHOULD);
@ -523,8 +529,8 @@ public class QueryParser implements QueryParserConstants {
mpq.setSlop(phraseSlop);
List multiTerms = new ArrayList();
int position = -1;
for (int i = 0; i < v.size(); i++) {
nextToken = (org.apache.lucene.analysis.Token) v.elementAt(i);
for (int i = 0; i < list.size(); i++) {
nextToken = (org.apache.lucene.analysis.Token) list.get(i);
if (nextToken.getPositionIncrement() > 0 && multiTerms.size() > 0) {
if (enablePositionIncrements) {
mpq.add((Term[])multiTerms.toArray(new Term[0]),position);
@ -548,8 +554,8 @@ public class QueryParser implements QueryParserConstants {
PhraseQuery pq = newPhraseQuery();
pq.setSlop(phraseSlop);
int position = -1;
for (int i = 0; i < v.size(); i++) {
nextToken = (org.apache.lucene.analysis.Token) v.elementAt(i);
for (int i = 0; i < list.size(); i++) {
nextToken = (org.apache.lucene.analysis.Token) list.get(i);
if (enablePositionIncrements) {
position += nextToken.getPositionIncrement();
pq.add(new Term(field, nextToken.term()),position);
@ -740,13 +746,31 @@ public class QueryParser implements QueryParserConstants {
* Can be overridden by extending classes, to modify query being
* returned.
*
* @param clauses Vector that contains {@link BooleanClause} instances
* @param clauses List that contains {@link BooleanClause} instances
* to join.
*
* @return Resulting {@link Query} object.
* @exception ParseException throw in overridden method to disallow
* @deprecated use {@link #getBooleanQuery(List)} instead
*/
protected Query getBooleanQuery(Vector clauses) throws ParseException {
return getBooleanQuery((List) clauses, false);
}
/**
* Factory method for generating query, given a set of clauses.
* By default creates a boolean query composed of clauses passed in.
*
* Can be overridden by extending classes, to modify query being
* returned.
*
* @param clauses List that contains {@link BooleanClause} instances
* to join.
*
* @return Resulting {@link Query} object.
* @exception ParseException throw in overridden method to disallow
*/
protected Query getBooleanQuery(Vector clauses) throws ParseException {
protected Query getBooleanQuery(List clauses) throws ParseException {
return getBooleanQuery(clauses, false);
}
@ -757,14 +781,35 @@ public class QueryParser implements QueryParserConstants {
* Can be overridden by extending classes, to modify query being
* returned.
*
* @param clauses Vector that contains {@link BooleanClause} instances
* @param clauses List that contains {@link BooleanClause} instances
* to join.
* @param disableCoord true if coord scoring should be disabled.
*
* @return Resulting {@link Query} object.
* @exception ParseException throw in overridden method to disallow
* @deprecated use {@link #getBooleanQuery(List, boolean)} instead
*/
protected Query getBooleanQuery(Vector clauses, boolean disableCoord)
throws ParseException
{
return getBooleanQuery((List) clauses, disableCoord);
}
/**
* Factory method for generating query, given a set of clauses.
* By default creates a boolean query composed of clauses passed in.
*
* Can be overridden by extending classes, to modify query being
* returned.
*
* @param clauses List that contains {@link BooleanClause} instances
* to join.
* @param disableCoord true if coord scoring should be disabled.
*
* @return Resulting {@link Query} object.
* @exception ParseException throw in overridden method to disallow
*/
protected Query getBooleanQuery(Vector clauses, boolean disableCoord)
protected Query getBooleanQuery(List clauses, boolean disableCoord)
throws ParseException
{
if (clauses.size()==0) {
@ -772,7 +817,7 @@ public class QueryParser implements QueryParserConstants {
}
BooleanQuery query = newBooleanQuery(disableCoord);
for (int i = 0; i < clauses.size(); i++) {
query.add((BooleanClause)clauses.elementAt(i));
query.add((BooleanClause)clauses.get(i));
}
return query;
}
@ -846,7 +891,6 @@ public class QueryParser implements QueryParserConstants {
return newPrefixQuery(t);
}
/**
* Factory method for generating a query (similar to
* {@link #getWildcardQuery}). Called when parser parses
@ -872,7 +916,7 @@ public class QueryParser implements QueryParserConstants {
* removed, or kept only once if there was a double escape.
*
* Supports escaped unicode characters, e. g. translates
* <code>A</code> to <code>A</code>.
* <code>\\u0041</code> to <code>A</code>.
*
*/
private String discardEscapeChar(String input) throws ParseException {
@ -1056,7 +1100,7 @@ public class QueryParser implements QueryParserConstants {
}
final public Query Query(String field) throws ParseException {
Vector clauses = new Vector();
List clauses = new ArrayList();
Query q, firstQuery=null;
int conj, mods;
mods = Modifiers();

View File

@ -143,7 +143,7 @@ public class QueryParser {
private Operator operator = OR_OPERATOR;
boolean lowercaseExpandedTerms = true;
boolean useOldRangeQuery= false;
boolean useOldRangeQuery= false;
boolean allowLeadingWildcard = false;
boolean enablePositionIncrements = false;
@ -188,7 +188,7 @@ public class QueryParser {
public Query parse(String query) throws ParseException {
ReInit(new FastCharStream(new StringReader(query)));
try {
// TopLevelQuery is a Query followed by the end-of-input (EOF)
// TopLevelQuery is a Query followed by the end-of-input (EOF)
Query res = TopLevelQuery(field);
return res!=null ? res : newBooleanQuery(false);
}
@ -203,28 +203,28 @@ public class QueryParser {
throw new ParseException("Cannot parse '" +query+ "': too many boolean clauses");
}
}
/**
* @return Returns the analyzer.
*/
public Analyzer getAnalyzer() {
return analyzer;
}
/**
* @return Returns the field.
*/
public String getField() {
return field;
}
/**
* Get the minimal similarity for fuzzy queries.
*/
public float getFuzzyMinSim() {
return fuzzyMinSim;
}
/**
* Set the minimum similarity for fuzzy queries.
* Default is 0.5f.
@ -232,7 +232,7 @@ public class QueryParser {
public void setFuzzyMinSim(float fuzzyMinSim) {
this.fuzzyMinSim = fuzzyMinSim;
}
/**
* Get the prefix length for fuzzy queries.
* @return Returns the fuzzyPrefixLength.
@ -240,7 +240,7 @@ public class QueryParser {
public int getFuzzyPrefixLength() {
return fuzzyPrefixLength;
}
/**
* Set the prefix length for fuzzy queries. Default is 0.
* @param fuzzyPrefixLength The fuzzyPrefixLength to set.
@ -344,7 +344,7 @@ public class QueryParser {
public boolean getLowercaseExpandedTerms() {
return lowercaseExpandedTerms;
}
/**
* By default QueryParser uses new ConstantScoreRangeQuery in preference to RangeQuery
* for range queries. This implementation is generally preferable because it
@ -365,7 +365,6 @@ public class QueryParser {
public boolean getUseOldRangeQuery() {
return useOldRangeQuery;
}
/**
* Set locale used by date range parsing.
@ -391,7 +390,7 @@ public class QueryParser {
public void setDateResolution(DateTools.Resolution dateResolution) {
this.dateResolution = dateResolution;
}
/**
* Sets the date resolution used by RangeQueries for a specific field.
*
@ -402,12 +401,12 @@ public class QueryParser {
if (fieldName == null) {
throw new IllegalArgumentException("Field cannot be null.");
}
if (fieldToDateResolution == null) {
// lazily initialize HashMap
fieldToDateResolution = new HashMap();
}
fieldToDateResolution.put(fieldName, dateResolution);
}
@ -421,28 +420,35 @@ public class QueryParser {
if (fieldName == null) {
throw new IllegalArgumentException("Field cannot be null.");
}
if (fieldToDateResolution == null) {
// no field specific date resolutions set; return default date resolution instead
return this.dateResolution;
}
DateTools.Resolution resolution = (DateTools.Resolution) fieldToDateResolution.get(fieldName);
if (resolution == null) {
// no date resolutions set for the given field; return default date resolution instead
resolution = this.dateResolution;
}
return resolution;
}
/**
* @deprecated use {@link #addClause(List, int, int, Query)} instead.
*/
protected void addClause(Vector clauses, int conj, int mods, Query q) {
addClause((List) clauses, conj, mods, q);
}
protected void addClause(List clauses, int conj, int mods, Query q) {
boolean required, prohibited;
// If this term is introduced by AND, make the preceding term required,
// unless it's already prohibited
if (clauses.size() > 0 && conj == CONJ_AND) {
BooleanClause c = (BooleanClause) clauses.elementAt(clauses.size()-1);
BooleanClause c = (BooleanClause) clauses.get(clauses.size()-1);
if (!c.isProhibited())
c.setOccur(BooleanClause.Occur.MUST);
}
@ -452,7 +458,7 @@ public class QueryParser {
// unless it's prohibited (that means we leave -a OR b but +a OR b-->a OR b)
// notice if the input is a OR b, first term is parsed as required; without
// this modification a OR b would parsed as +a OR b
BooleanClause c = (BooleanClause) clauses.elementAt(clauses.size()-1);
BooleanClause c = (BooleanClause) clauses.get(clauses.size()-1);
if (!c.isProhibited())
c.setOccur(BooleanClause.Occur.SHOULD);
}
@ -477,11 +483,11 @@ public class QueryParser {
required = (!prohibited && conj != CONJ_OR);
}
if (required && !prohibited)
clauses.addElement(newBooleanClause(q, BooleanClause.Occur.MUST));
clauses.add(newBooleanClause(q, BooleanClause.Occur.MUST));
else if (!required && !prohibited)
clauses.addElement(newBooleanClause(q, BooleanClause.Occur.SHOULD));
clauses.add(newBooleanClause(q, BooleanClause.Occur.SHOULD));
else if (!required && prohibited)
clauses.addElement(newBooleanClause(q, BooleanClause.Occur.MUST_NOT));
clauses.add(newBooleanClause(q, BooleanClause.Occur.MUST_NOT));
else
throw new RuntimeException("Clause cannot be both required and prohibited");
}
@ -495,7 +501,7 @@ public class QueryParser {
// PhraseQuery, or nothing based on the term count
TokenStream source = analyzer.tokenStream(field, new StringReader(queryText));
Vector v = new Vector();
List list = new ArrayList();
final org.apache.lucene.analysis.Token reusableToken = new org.apache.lucene.analysis.Token();
org.apache.lucene.analysis.Token nextToken;
int positionCount = 0;
@ -510,7 +516,7 @@ public class QueryParser {
}
if (nextToken == null)
break;
v.addElement(nextToken.clone());
list.add(nextToken.clone());
if (nextToken.getPositionIncrement() != 0)
positionCount += nextToken.getPositionIncrement();
else
@ -523,18 +529,18 @@ public class QueryParser {
// ignore
}
if (v.size() == 0)
if (list.size() == 0)
return null;
else if (v.size() == 1) {
nextToken = (org.apache.lucene.analysis.Token) v.elementAt(0);
else if (list.size() == 1) {
nextToken = (org.apache.lucene.analysis.Token) list.get(0);
return newTermQuery(new Term(field, nextToken.term()));
} else {
if (severalTokensAtSamePosition) {
if (positionCount == 1) {
// no phrase query:
BooleanQuery q = newBooleanQuery(true);
for (int i = 0; i < v.size(); i++) {
nextToken = (org.apache.lucene.analysis.Token) v.elementAt(i);
for (int i = 0; i < list.size(); i++) {
nextToken = (org.apache.lucene.analysis.Token) list.get(i);
Query currentQuery = newTermQuery(
new Term(field, nextToken.term()));
q.add(currentQuery, BooleanClause.Occur.SHOULD);
@ -544,11 +550,11 @@ public class QueryParser {
else {
// phrase query:
MultiPhraseQuery mpq = newMultiPhraseQuery();
mpq.setSlop(phraseSlop);
mpq.setSlop(phraseSlop);
List multiTerms = new ArrayList();
int position = -1;
for (int i = 0; i < v.size(); i++) {
nextToken = (org.apache.lucene.analysis.Token) v.elementAt(i);
for (int i = 0; i < list.size(); i++) {
nextToken = (org.apache.lucene.analysis.Token) list.get(i);
if (nextToken.getPositionIncrement() > 0 && multiTerms.size() > 0) {
if (enablePositionIncrements) {
mpq.add((Term[])multiTerms.toArray(new Term[0]),position);
@ -572,8 +578,8 @@ public class QueryParser {
PhraseQuery pq = newPhraseQuery();
pq.setSlop(phraseSlop);
int position = -1;
for (int i = 0; i < v.size(); i++) {
nextToken = (org.apache.lucene.analysis.Token) v.elementAt(i);
for (int i = 0; i < list.size(); i++) {
nextToken = (org.apache.lucene.analysis.Token) list.get(i);
if (enablePositionIncrements) {
position += nextToken.getPositionIncrement();
pq.add(new Term(field, nextToken.term()),position);
@ -594,8 +600,8 @@ public class QueryParser {
*
* @exception ParseException throw in overridden method to disallow
*/
protected Query getFieldQuery(String field, String queryText, int slop)
throws ParseException {
protected Query getFieldQuery(String field, String queryText, int slop)
throws ParseException {
Query query = getFieldQuery(field, queryText);
if (query instanceof PhraseQuery) {
@ -764,13 +770,31 @@ public class QueryParser {
* Can be overridden by extending classes, to modify query being
* returned.
*
* @param clauses Vector that contains {@link BooleanClause} instances
* @param clauses List that contains {@link BooleanClause} instances
* to join.
*
* @return Resulting {@link Query} object.
* @exception ParseException throw in overridden method to disallow
* @deprecated use {@link #getBooleanQuery(List)} instead
*/
protected Query getBooleanQuery(Vector clauses) throws ParseException {
return getBooleanQuery((List) clauses, false);
}
/**
* Factory method for generating query, given a set of clauses.
* By default creates a boolean query composed of clauses passed in.
*
* Can be overridden by extending classes, to modify query being
* returned.
*
* @param clauses List that contains {@link BooleanClause} instances
* to join.
*
* @return Resulting {@link Query} object.
* @exception ParseException throw in overridden method to disallow
*/
protected Query getBooleanQuery(Vector clauses) throws ParseException {
protected Query getBooleanQuery(List clauses) throws ParseException {
return getBooleanQuery(clauses, false);
}
@ -781,14 +805,35 @@ public class QueryParser {
* Can be overridden by extending classes, to modify query being
* returned.
*
* @param clauses Vector that contains {@link BooleanClause} instances
* @param clauses List that contains {@link BooleanClause} instances
* to join.
* @param disableCoord true if coord scoring should be disabled.
*
* @return Resulting {@link Query} object.
* @exception ParseException throw in overridden method to disallow
* @deprecated use {@link #getBooleanQuery(List, boolean)} instead
*/
protected Query getBooleanQuery(Vector clauses, boolean disableCoord)
throws ParseException
{
return getBooleanQuery((List) clauses, disableCoord);
}
/**
* Factory method for generating query, given a set of clauses.
* By default creates a boolean query composed of clauses passed in.
*
* Can be overridden by extending classes, to modify query being
* returned.
*
* @param clauses List that contains {@link BooleanClause} instances
* to join.
* @param disableCoord true if coord scoring should be disabled.
*
* @return Resulting {@link Query} object.
* @exception ParseException throw in overridden method to disallow
*/
protected Query getBooleanQuery(Vector clauses, boolean disableCoord)
protected Query getBooleanQuery(List clauses, boolean disableCoord)
throws ParseException
{
if (clauses.size()==0) {
@ -796,7 +841,7 @@ public class QueryParser {
}
BooleanQuery query = newBooleanQuery(disableCoord);
for (int i = 0; i < clauses.size(); i++) {
query.add((BooleanClause)clauses.elementAt(i));
query.add((BooleanClause)clauses.get(i));
}
return query;
}
@ -870,7 +915,6 @@ public class QueryParser {
return newPrefixQuery(t);
}
/**
* Factory method for generating a query (similar to
* {@link #getWildcardQuery}). Called when parser parses
@ -896,29 +940,29 @@ public class QueryParser {
* removed, or kept only once if there was a double escape.
*
* Supports escaped unicode characters, e. g. translates
* <code>\u0041</code> to <code>A</code>.
* <code>\\u0041</code> to <code>A</code>.
*
*/
private String discardEscapeChar(String input) throws ParseException {
// Create char array to hold unescaped char sequence
char[] output = new char[input.length()];
// The length of the output can be less than the input
// due to discarded escape chars. This variable holds
// the actual length of the output
int length = 0;
// We remember whether the last processed character was
// an escape character
boolean lastCharWasEscapeChar = false;
// The multiplier the current unicode digit must be multiplied with.
// E. g. the first digit must be multiplied with 16^3, the second with 16^2...
int codePointMultiplier = 0;
// Used to calculate the codepoint of the escaped unicode character
int codePoint = 0;
for (int i = 0; i < input.length(); i++) {
char curChar = input.charAt(i);
if (codePointMultiplier > 0) {
@ -932,9 +976,9 @@ public class QueryParser {
if (curChar == 'u') {
// found an escaped unicode character
codePointMultiplier = 16 * 16 * 16;
} else {
} else {
// this character was escaped
output[length] = curChar;
output[length] = curChar;
length++;
}
lastCharWasEscapeChar = false;
@ -947,18 +991,18 @@ public class QueryParser {
}
}
}
if (codePointMultiplier > 0) {
throw new ParseException("Truncated unicode escape sequence.");
}
if (lastCharWasEscapeChar) {
throw new ParseException("Term can not end with escape character.");
}
return new String(output, 0, length);
}
/** Returns the numeric value of the hexadecimal character */
private static final int hexToInt(char c) throws ParseException {
if ('0' <= c && c <= '9') {
@ -971,7 +1015,7 @@ public class QueryParser {
throw new ParseException("None-hex character in unicode escape sequence: " + c);
}
}
/**
* Returns a String where those characters that QueryParser
* expects to be escaped are escaped by a preceding <code>\</code>.
@ -1108,7 +1152,7 @@ Query TopLevelQuery(String field) :
Query Query(String field) :
{
Vector clauses = new Vector();
List clauses = new ArrayList();
Query q, firstQuery=null;
int conj, mods;
}

View File

@ -19,7 +19,6 @@ package org.apache.lucene.search;
import java.io.IOException;
import java.util.Set;
import java.util.Vector;
import java.util.ArrayList;
import org.apache.lucene.index.Term;

View File

@ -24,7 +24,8 @@ import java.io.IOException;
import java.io.RandomAccessFile;
import java.security.MessageDigest;
import java.security.NoSuchAlgorithmException;
import java.util.Hashtable;
import java.util.HashMap;
import java.util.Map;
import org.apache.lucene.index.IndexFileNameFilter;
@ -58,7 +59,7 @@ public class FSDirectory extends Directory {
* instance from the cache. See LUCENE-776
* for some relevant discussion.
*/
private static final Hashtable DIRECTORIES = new Hashtable();
private static final Map DIRECTORIES = new HashMap();
private static boolean disableLocks = false;

View File

@ -186,7 +186,7 @@ public class TestDoc extends LuceneTestCase {
merger.closeReaders();
if (useCompoundFile) {
Vector filesToDelete = merger.createCompoundFile(merged + ".cfs");
List filesToDelete = merger.createCompoundFile(merged + ".cfs");
for (Iterator iter = filesToDelete.iterator(); iter.hasNext();)
directory.deleteFile((String) iter.next());
}

View File

@ -19,8 +19,10 @@ package org.apache.lucene.store;
import java.io.File;
import java.io.IOException;
import java.util.Enumeration;
import java.util.Hashtable;
import java.util.Collections;
import java.util.HashMap;
import java.util.Iterator;
import java.util.Map;
import org.apache.lucene.analysis.WhitespaceAnalyzer;
import org.apache.lucene.document.Document;
@ -60,8 +62,8 @@ public class TestLockFactory extends LuceneTestCase {
assertTrue("# calls to makeLock is 0 (after instantiating IndexWriter)",
lf.makeLockCount >= 1);
for(Enumeration e = lf.locksCreated.keys(); e.hasMoreElements();) {
String lockName = (String) e.nextElement();
for(Iterator e = lf.locksCreated.keySet().iterator(); e.hasNext();) {
String lockName = (String) e.next();
MockLockFactory.MockLock lock = (MockLockFactory.MockLock) lf.locksCreated.get(lockName);
assertTrue("# calls to Lock.obtain is 0 (after instantiating IndexWriter)",
lock.lockAttempts > 0);
@ -522,7 +524,7 @@ public class TestLockFactory extends LuceneTestCase {
public class MockLockFactory extends LockFactory {
public boolean lockPrefixSet;
public Hashtable locksCreated = new Hashtable();
public Map locksCreated = Collections.synchronizedMap(new HashMap());
public int makeLockCount = 0;
public void setLockPrefix(String lockPrefix) {