LUCENE-1369: switch from Hashtable to HashMap and from Vector to List, when possible

git-svn-id: https://svn.apache.org/repos/asf/lucene/java/trunk@692921 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Michael McCandless 2008-09-07 19:22:40 +00:00
parent 6242cb3322
commit d5a40278bc
34 changed files with 489 additions and 287 deletions

View File

@ -17,6 +17,13 @@ package org.apache.lucene.analysis.br;
* limitations under the License. * limitations under the License.
*/ */
import java.io.File;
import java.io.IOException;
import java.io.Reader;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.LowerCaseFilter; import org.apache.lucene.analysis.LowerCaseFilter;
import org.apache.lucene.analysis.StopFilter; import org.apache.lucene.analysis.StopFilter;
@ -24,12 +31,6 @@ import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.WordlistLoader; import org.apache.lucene.analysis.WordlistLoader;
import org.apache.lucene.analysis.standard.StandardFilter; import org.apache.lucene.analysis.standard.StandardFilter;
import org.apache.lucene.analysis.standard.StandardTokenizer; import org.apache.lucene.analysis.standard.StandardTokenizer;
import java.io.File;
import java.io.IOException;
import java.io.Reader;
import java.util.Hashtable;
import java.util.HashSet;
import java.util.Set;
/** /**
* Analyzer for Brazilian language. Supports an external list of stopwords (words that * Analyzer for Brazilian language. Supports an external list of stopwords (words that
@ -92,7 +93,7 @@ public final class BrazilianAnalyzer extends Analyzer {
/** /**
* Builds an analyzer with the given stop words. * Builds an analyzer with the given stop words.
*/ */
public BrazilianAnalyzer( Hashtable stopwords ) { public BrazilianAnalyzer( Map stopwords ) {
stoptable = new HashSet(stopwords.keySet()); stoptable = new HashSet(stopwords.keySet());
} }
@ -112,7 +113,7 @@ public final class BrazilianAnalyzer extends Analyzer {
/** /**
* Builds an exclusionlist from a Hashtable. * Builds an exclusionlist from a Hashtable.
*/ */
public void setStemExclusionTable( Hashtable exclusionlist ) { public void setStemExclusionTable( Map exclusionlist ) {
excltable = new HashSet(exclusionlist.keySet()); excltable = new HashSet(exclusionlist.keySet());
} }
/** /**

View File

@ -23,7 +23,6 @@ import org.apache.lucene.analysis.TokenStream;
import java.io.IOException; import java.io.IOException;
import java.util.HashSet; import java.util.HashSet;
import java.util.Hashtable;
import java.util.Set; import java.util.Set;
/** /**

View File

@ -17,7 +17,8 @@ package org.apache.lucene.analysis.cn;
* limitations under the License. * limitations under the License.
*/ */
import java.util.Hashtable; import java.util.HashMap;
import java.util.Map;
import org.apache.lucene.analysis.Token; import org.apache.lucene.analysis.Token;
import org.apache.lucene.analysis.TokenFilter; import org.apache.lucene.analysis.TokenFilter;
@ -54,12 +55,12 @@ public final class ChineseFilter extends TokenFilter {
}; };
private Hashtable stopTable; private Map stopTable;
public ChineseFilter(TokenStream in) { public ChineseFilter(TokenStream in) {
super(in); super(in);
stopTable = new Hashtable(STOP_WORDS.length); stopTable = new HashMap(STOP_WORDS.length);
for (int i = 0; i < STOP_WORDS.length; i++) for (int i = 0; i < STOP_WORDS.length; i++)
stopTable.put(STOP_WORDS[i], STOP_WORDS[i]); stopTable.put(STOP_WORDS[i], STOP_WORDS[i]);
} }

View File

@ -26,7 +26,6 @@ import org.apache.lucene.analysis.standard.StandardFilter;
import org.apache.lucene.analysis.standard.StandardTokenizer; import org.apache.lucene.analysis.standard.StandardTokenizer;
import java.io.*; import java.io.*;
import java.util.Hashtable;
import java.util.HashSet; import java.util.HashSet;
import java.util.Set; import java.util.Set;

View File

@ -22,7 +22,7 @@ import java.io.File;
import java.io.IOException; import java.io.IOException;
import java.io.Reader; import java.io.Reader;
import java.util.HashSet; import java.util.HashSet;
import java.util.Hashtable; import java.util.Map;
import java.util.Set; import java.util.Set;
import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.Analyzer;
@ -91,7 +91,7 @@ public class GermanAnalyzer extends Analyzer {
/** /**
* Builds an analyzer with the given stop words. * Builds an analyzer with the given stop words.
*/ */
public GermanAnalyzer(Hashtable stopwords) { public GermanAnalyzer(Map stopwords) {
stopSet = new HashSet(stopwords.keySet()); stopSet = new HashSet(stopwords.keySet());
} }
@ -112,7 +112,7 @@ public class GermanAnalyzer extends Analyzer {
/** /**
* Builds an exclusionlist from a Hashtable. * Builds an exclusionlist from a Hashtable.
*/ */
public void setStemExclusionTable(Hashtable exclusionlist) { public void setStemExclusionTable(Map exclusionlist) {
exclusionSet = new HashSet(exclusionlist.keySet()); exclusionSet = new HashSet(exclusionlist.keySet());
} }

View File

@ -16,6 +16,7 @@ package org.apache.lucene.analysis.el;
* limitations under the License. * limitations under the License.
*/ */
import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.StopFilter; import org.apache.lucene.analysis.StopFilter;
import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.TokenStream;
@ -23,7 +24,7 @@ import org.apache.lucene.analysis.standard.StandardTokenizer;
import java.io.Reader; import java.io.Reader;
import java.util.HashSet; import java.util.HashSet;
import java.util.Hashtable; import java.util.Map;
import java.util.Set; import java.util.Set;
/** /**
@ -200,7 +201,7 @@ public final class GreekAnalyzer extends Analyzer
/** /**
* Builds an analyzer with the given stop words. * Builds an analyzer with the given stop words.
*/ */
public GreekAnalyzer(char[] charset, Hashtable stopwords) public GreekAnalyzer(char[] charset, Map stopwords)
{ {
this.charset = charset; this.charset = charset;
stopSet = new HashSet(stopwords.keySet()); stopSet = new HashSet(stopwords.keySet());

View File

@ -29,7 +29,7 @@ import java.io.File;
import java.io.IOException; import java.io.IOException;
import java.io.Reader; import java.io.Reader;
import java.util.HashSet; import java.util.HashSet;
import java.util.Hashtable; import java.util.Map;
import java.util.Set; import java.util.Set;
/** /**
@ -111,9 +111,9 @@ public final class FrenchAnalyzer extends Analyzer {
} }
/** /**
* Builds an exclusionlist from a Hashtable. * Builds an exclusionlist from a Map.
*/ */
public void setStemExclusionTable(Hashtable exclusionlist) { public void setStemExclusionTable(Map exclusionlist) {
excltable = new HashSet(exclusionlist.keySet()); excltable = new HashSet(exclusionlist.keySet());
} }

View File

@ -20,9 +20,10 @@ package org.apache.lucene.analysis.fr;
import org.apache.lucene.analysis.Token; import org.apache.lucene.analysis.Token;
import org.apache.lucene.analysis.TokenFilter; import org.apache.lucene.analysis.TokenFilter;
import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.TokenStream;
import java.io.IOException; import java.io.IOException;
import java.util.Hashtable;
import java.util.HashSet; import java.util.HashSet;
import java.util.Map;
import java.util.Set; import java.util.Set;
/** /**
@ -83,7 +84,7 @@ public final class FrenchStemFilter extends TokenFilter {
/** /**
* Set an alternative exclusion list for this filter. * Set an alternative exclusion list for this filter.
*/ */
public void setExclusionTable( Hashtable exclusiontable ) { public void setExclusionTable( Map exclusiontable ) {
exclusions = new HashSet(exclusiontable.keySet()); exclusions = new HashSet(exclusiontable.keySet());
} }
} }

View File

@ -19,7 +19,7 @@ package org.apache.lucene.analysis.ru;
import java.io.Reader; import java.io.Reader;
import java.util.HashSet; import java.util.HashSet;
import java.util.Hashtable; import java.util.Map;
import java.util.Set; import java.util.Set;
import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.Analyzer;
@ -237,7 +237,7 @@ public final class RussianAnalyzer extends Analyzer
* Builds an analyzer with the given stop words. * Builds an analyzer with the given stop words.
* @todo create a Set version of this ctor * @todo create a Set version of this ctor
*/ */
public RussianAnalyzer(char[] charset, Hashtable stopwords) public RussianAnalyzer(char[] charset, Map stopwords)
{ {
this.charset = charset; this.charset = charset;
stopSet = new HashSet(stopwords.keySet()); stopSet = new HashSet(stopwords.keySet());

View File

@ -57,16 +57,17 @@ package lucli;
import java.io.IOException; import java.io.IOException;
import java.io.Reader; import java.io.Reader;
import java.io.StringReader; import java.io.StringReader;
import java.util.ArrayList;
import java.util.Hashtable;
import java.util.Vector;
import java.util.TreeMap;
import java.util.Map.Entry;
import java.util.Set;
import java.util.Arrays; import java.util.Arrays;
import java.util.Comparator; import java.util.Comparator;
import java.util.Iterator;
import java.util.Enumeration; import java.util.Enumeration;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.TreeMap;
import java.util.Map.Entry;
import jline.ConsoleReader; import jline.ConsoleReader;
@ -91,15 +92,15 @@ import org.apache.lucene.search.Searcher;
/** /**
* Various methods that interact with Lucene and provide info about the * Various methods that interact with Lucene and provide info about the
* index, search, etc. Parts addapted from Lucene demo. * index, search, etc. Parts adapted from Lucene demo.
*/ */
class LuceneMethods { class LuceneMethods {
private int numDocs; private int numDocs;
private String indexName; //directory of this index private String indexName; //directory of this index
private java.util.Iterator fieldIterator; private java.util.Iterator fieldIterator;
private Vector fields; //Fields as a vector private List fields; //Fields as a vector
private Vector indexedFields; //Fields as a vector private List indexedFields; //Fields as a vector
private String fieldsArray[]; //Fields as an array private String fieldsArray[]; //Fields as an array
private Searcher searcher; private Searcher searcher;
private Query query; //current query string private Query query; //current query string
@ -247,8 +248,8 @@ class LuceneMethods {
private void getFieldInfo() throws IOException { private void getFieldInfo() throws IOException {
IndexReader indexReader = IndexReader.open(indexName); IndexReader indexReader = IndexReader.open(indexName);
fields = new Vector(); fields = new ArrayList();
indexedFields = new Vector(); indexedFields = new ArrayList();
//get the list of all field names //get the list of all field names
fieldIterator = indexReader.getFieldNames(FieldOption.ALL).iterator(); fieldIterator = indexReader.getFieldNames(FieldOption.ALL).iterator();
@ -274,14 +275,14 @@ class LuceneMethods {
private void invertDocument(Document doc) private void invertDocument(Document doc)
throws IOException { throws IOException {
Hashtable tokenHash = new Hashtable(); Map tokenMap = new HashMap();
final int maxFieldLength = 10000; final int maxFieldLength = 10000;
Analyzer analyzer = new StandardAnalyzer(); Analyzer analyzer = new StandardAnalyzer();
Enumeration fields = doc.fields(); Iterator fields = doc.getFields().iterator();
final Token reusableToken = new Token(); final Token reusableToken = new Token();
while (fields.hasMoreElements()) { while (fields.hasNext()) {
Field field = (Field) fields.nextElement(); Field field = (Field) fields.next();
String fieldName = field.name(); String fieldName = field.name();
@ -304,12 +305,12 @@ class LuceneMethods {
position += (nextToken.getPositionIncrement() - 1); position += (nextToken.getPositionIncrement() - 1);
position++; position++;
String name = nextToken.term(); String name = nextToken.term();
Integer Count = (Integer) tokenHash.get(name); Integer Count = (Integer) tokenMap.get(name);
if (Count == null) { // not in there yet if (Count == null) { // not in there yet
tokenHash.put(name, new Integer(1)); //first one tokenMap.put(name, new Integer(1)); //first one
} else { } else {
int count = Count.intValue(); int count = Count.intValue();
tokenHash.put(name, new Integer(count + 1)); tokenMap.put(name, new Integer(count + 1));
} }
if (position > maxFieldLength) break; if (position > maxFieldLength) break;
} }
@ -320,7 +321,7 @@ class LuceneMethods {
} }
} }
Entry[] sortedHash = getSortedHashtableEntries(tokenHash); Entry[] sortedHash = getSortedMapEntries(tokenMap);
for (int ii = 0; ii < sortedHash.length && ii < 10; ii++) { for (int ii = 0; ii < sortedHash.length && ii < 10; ii++) {
Entry currentEntry = sortedHash[ii]; Entry currentEntry = sortedHash[ii];
message((ii + 1) + ":" + currentEntry.getKey() + " " + currentEntry.getValue()); message((ii + 1) + ":" + currentEntry.getKey() + " " + currentEntry.getValue());
@ -353,14 +354,13 @@ class LuceneMethods {
indexReader.close(); indexReader.close();
} }
/** Sort Hashtable values /** Sort Map values
* @param h the hashtable we're sorting * @param m the map we're sorting
* from http://developer.java.sun.com/developer/qow/archive/170/index.jsp * from http://developer.java.sun.com/developer/qow/archive/170/index.jsp
*/ */
public static Entry[] public static Entry[]
getSortedHashtableEntries(Hashtable h) { getSortedMapEntries(Map m) {
Set set = h.entrySet(); Set set = m.entrySet();
Entry[] entries = Entry[] entries =
(Entry[]) set.toArray( (Entry[]) set.toArray(
new Entry[set.size()]); new Entry[set.size()]);

View File

@ -28,8 +28,9 @@ import java.io.InputStreamReader;
import java.nio.ByteBuffer; import java.nio.ByteBuffer;
import java.nio.charset.Charset; import java.nio.charset.Charset;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Enumeration; import java.util.Iterator;
import java.util.LinkedHashSet; import java.util.LinkedHashSet;
import java.util.List;
import java.util.Set; import java.util.Set;
import junit.framework.TestCase; import junit.framework.TestCase;
@ -350,7 +351,7 @@ public class MemoryIndexTest extends TestCase {
private String[] readLines(File file) throws Exception { private String[] readLines(File file) throws Exception {
BufferedReader reader = new BufferedReader(new InputStreamReader( BufferedReader reader = new BufferedReader(new InputStreamReader(
new FileInputStream(file))); new FileInputStream(file)));
ArrayList lines = new ArrayList(); List lines = new ArrayList();
String line; String line;
while ((line = reader.readLine()) != null) { while ((line = reader.readLine()) != null) {
String t = line.trim(); String t = line.trim();
@ -373,9 +374,9 @@ public class MemoryIndexTest extends TestCase {
private MemoryIndex createMemoryIndex(Document doc) { private MemoryIndex createMemoryIndex(Document doc) {
MemoryIndex index = new MemoryIndex(); MemoryIndex index = new MemoryIndex();
Enumeration iter = doc.fields(); Iterator iter = doc.getFields().iterator();
while (iter.hasMoreElements()) { while (iter.hasNext()) {
Field field = (Field) iter.nextElement(); Field field = (Field) iter.next();
index.addField(field.name(), field.stringValue(), analyzer); index.addField(field.name(), field.stringValue(), analyzer);
} }
return index; return index;

View File

@ -252,13 +252,20 @@ public class PrecedenceQueryParser implements PrecedenceQueryParserConstants {
return locale; return locale;
} }
/**
* @deprecated use {@link #addClause(List, int, int, Query)} instead.
*/
protected void addClause(Vector clauses, int conj, int modifier, Query q) { protected void addClause(Vector clauses, int conj, int modifier, Query q) {
addClause((List) clauses, conj, modifier, q);
}
protected void addClause(List clauses, int conj, int modifier, Query q) {
boolean required, prohibited; boolean required, prohibited;
// If this term is introduced by AND, make the preceding term required, // If this term is introduced by AND, make the preceding term required,
// unless it's already prohibited // unless it's already prohibited
if (clauses.size() > 0 && conj == CONJ_AND) { if (clauses.size() > 0 && conj == CONJ_AND) {
BooleanClause c = (BooleanClause) clauses.elementAt(clauses.size()-1); BooleanClause c = (BooleanClause) clauses.get(clauses.size()-1);
if (!c.isProhibited()) if (!c.isProhibited())
c.setOccur(BooleanClause.Occur.MUST); c.setOccur(BooleanClause.Occur.MUST);
} }
@ -268,7 +275,7 @@ public class PrecedenceQueryParser implements PrecedenceQueryParserConstants {
// unless it's prohibited (that means we leave -a OR b but +a OR b-->a OR b) // unless it's prohibited (that means we leave -a OR b but +a OR b-->a OR b)
// notice if the input is a OR b, first term is parsed as required; without // notice if the input is a OR b, first term is parsed as required; without
// this modification a OR b would parsed as +a OR b // this modification a OR b would parsed as +a OR b
BooleanClause c = (BooleanClause) clauses.elementAt(clauses.size()-1); BooleanClause c = (BooleanClause) clauses.get(clauses.size()-1);
if (!c.isProhibited()) if (!c.isProhibited())
c.setOccur(BooleanClause.Occur.SHOULD); c.setOccur(BooleanClause.Occur.SHOULD);
} }
@ -293,11 +300,11 @@ public class PrecedenceQueryParser implements PrecedenceQueryParserConstants {
required = (!prohibited && conj != CONJ_OR); required = (!prohibited && conj != CONJ_OR);
} }
if (required && !prohibited) if (required && !prohibited)
clauses.addElement(new BooleanClause(q, BooleanClause.Occur.MUST)); clauses.add(new BooleanClause(q, BooleanClause.Occur.MUST));
else if (!required && !prohibited) else if (!required && !prohibited)
clauses.addElement(new BooleanClause(q, BooleanClause.Occur.SHOULD)); clauses.add(new BooleanClause(q, BooleanClause.Occur.SHOULD));
else if (!required && prohibited) else if (!required && prohibited)
clauses.addElement(new BooleanClause(q, BooleanClause.Occur.MUST_NOT)); clauses.add(new BooleanClause(q, BooleanClause.Occur.MUST_NOT));
else else
throw new RuntimeException("Clause cannot be both required and prohibited"); throw new RuntimeException("Clause cannot be both required and prohibited");
} }
@ -310,7 +317,7 @@ public class PrecedenceQueryParser implements PrecedenceQueryParserConstants {
// PhraseQuery, or nothing based on the term count // PhraseQuery, or nothing based on the term count
TokenStream source = analyzer.tokenStream(field, new StringReader(queryText)); TokenStream source = analyzer.tokenStream(field, new StringReader(queryText));
Vector v = new Vector(); List list = new ArrayList();
final org.apache.lucene.analysis.Token reusableToken = new org.apache.lucene.analysis.Token(); final org.apache.lucene.analysis.Token reusableToken = new org.apache.lucene.analysis.Token();
org.apache.lucene.analysis.Token nextToken; org.apache.lucene.analysis.Token nextToken;
int positionCount = 0; int positionCount = 0;
@ -325,7 +332,7 @@ public class PrecedenceQueryParser implements PrecedenceQueryParserConstants {
} }
if (nextToken == null) if (nextToken == null)
break; break;
v.addElement(nextToken.clone()); list.add(nextToken.clone());
if (nextToken.getPositionIncrement() == 1) if (nextToken.getPositionIncrement() == 1)
positionCount++; positionCount++;
else else
@ -338,18 +345,18 @@ public class PrecedenceQueryParser implements PrecedenceQueryParserConstants {
// ignore // ignore
} }
if (v.size() == 0) if (list.size() == 0)
return null; return null;
else if (v.size() == 1) { else if (list.size() == 1) {
nextToken = (org.apache.lucene.analysis.Token) v.elementAt(0); nextToken = (org.apache.lucene.analysis.Token) list.get(0);
return new TermQuery(new Term(field, nextToken.term())); return new TermQuery(new Term(field, nextToken.term()));
} else { } else {
if (severalTokensAtSamePosition) { if (severalTokensAtSamePosition) {
if (positionCount == 1) { if (positionCount == 1) {
// no phrase query: // no phrase query:
BooleanQuery q = new BooleanQuery(); BooleanQuery q = new BooleanQuery();
for (int i = 0; i < v.size(); i++) { for (int i = 0; i < list.size(); i++) {
nextToken = (org.apache.lucene.analysis.Token) v.elementAt(i); nextToken = (org.apache.lucene.analysis.Token) list.get(i);
TermQuery currentQuery = new TermQuery( TermQuery currentQuery = new TermQuery(
new Term(field, nextToken.term())); new Term(field, nextToken.term()));
q.add(currentQuery, BooleanClause.Occur.SHOULD); q.add(currentQuery, BooleanClause.Occur.SHOULD);
@ -360,8 +367,8 @@ public class PrecedenceQueryParser implements PrecedenceQueryParserConstants {
// phrase query: // phrase query:
MultiPhraseQuery mpq = new MultiPhraseQuery(); MultiPhraseQuery mpq = new MultiPhraseQuery();
List multiTerms = new ArrayList(); List multiTerms = new ArrayList();
for (int i = 0; i < v.size(); i++) { for (int i = 0; i < list.size(); i++) {
nextToken = (org.apache.lucene.analysis.Token) v.elementAt(i); nextToken = (org.apache.lucene.analysis.Token) list.get(i);
if (nextToken.getPositionIncrement() == 1 && multiTerms.size() > 0) { if (nextToken.getPositionIncrement() == 1 && multiTerms.size() > 0) {
mpq.add((Term[])multiTerms.toArray(new Term[0])); mpq.add((Term[])multiTerms.toArray(new Term[0]));
multiTerms.clear(); multiTerms.clear();
@ -375,10 +382,9 @@ public class PrecedenceQueryParser implements PrecedenceQueryParserConstants {
else { else {
PhraseQuery q = new PhraseQuery(); PhraseQuery q = new PhraseQuery();
q.setSlop(phraseSlop); q.setSlop(phraseSlop);
for (int i = 0; i < v.size(); i++) { for (int i = 0; i < list.size(); i++) {
q.add(new Term(field, ((org.apache.lucene.analysis.Token) q.add(new Term(field, ((org.apache.lucene.analysis.Token)
v.elementAt(i)).term())); list.get(i)).term()));
} }
return q; return q;
} }
@ -440,13 +446,32 @@ public class PrecedenceQueryParser implements PrecedenceQueryParserConstants {
* Can be overridden by extending classes, to modify query being * Can be overridden by extending classes, to modify query being
* returned. * returned.
* *
* @param clauses Vector that contains {@link BooleanClause} instances * @param clauses List that contains {@link BooleanClause} instances
* to join.
*
* @return Resulting {@link Query} object.
* @exception ParseException throw in overridden method to disallow
* @deprecated use {@link #getBooleanQuery(List)} instead
*/
protected Query getBooleanQuery(Vector clauses) throws ParseException
{
return getBooleanQuery((List) clauses, false);
}
/**
* Factory method for generating query, given a set of clauses.
* By default creates a boolean query composed of clauses passed in.
*
* Can be overridden by extending classes, to modify query being
* returned.
*
* @param clauses List that contains {@link BooleanClause} instances
* to join. * to join.
* *
* @return Resulting {@link Query} object. * @return Resulting {@link Query} object.
* @exception ParseException throw in overridden method to disallow * @exception ParseException throw in overridden method to disallow
*/ */
protected Query getBooleanQuery(Vector clauses) throws ParseException protected Query getBooleanQuery(List clauses) throws ParseException
{ {
return getBooleanQuery(clauses, false); return getBooleanQuery(clauses, false);
} }
@ -458,22 +483,42 @@ public class PrecedenceQueryParser implements PrecedenceQueryParserConstants {
* Can be overridden by extending classes, to modify query being * Can be overridden by extending classes, to modify query being
* returned. * returned.
* *
* @param clauses Vector that contains {@link BooleanClause} instances * @param clauses List that contains {@link BooleanClause} instances
* to join.
* @param disableCoord true if coord scoring should be disabled.
*
* @return Resulting {@link Query} object.
* @exception ParseException throw in overridden method to disallow
* @deprecated use {@link #getBooleanQuery(List, boolean)} instead
*/
protected Query getBooleanQuery(Vector clauses, boolean disableCoord)
throws ParseException
{
return getBooleanQuery((List) clauses, disableCoord);
}
/**
* Factory method for generating query, given a set of clauses.
* By default creates a boolean query composed of clauses passed in.
*
* Can be overridden by extending classes, to modify query being
* returned.
*
* @param clauses List that contains {@link BooleanClause} instances
* to join. * to join.
* @param disableCoord true if coord scoring should be disabled. * @param disableCoord true if coord scoring should be disabled.
* *
* @return Resulting {@link Query} object. * @return Resulting {@link Query} object.
* @exception ParseException throw in overridden method to disallow * @exception ParseException throw in overridden method to disallow
*/ */
protected Query getBooleanQuery(Vector clauses, boolean disableCoord) protected Query getBooleanQuery(List clauses, boolean disableCoord)
throws ParseException throws ParseException {
{
if (clauses == null || clauses.size() == 0) if (clauses == null || clauses.size() == 0)
return null; return null;
BooleanQuery query = new BooleanQuery(disableCoord); BooleanQuery query = new BooleanQuery(disableCoord);
for (int i = 0; i < clauses.size(); i++) { for (int i = 0; i < clauses.size(); i++) {
query.add((BooleanClause)clauses.elementAt(i)); query.add((BooleanClause)clauses.get(i));
} }
return query; return query;
} }
@ -675,7 +720,7 @@ public class PrecedenceQueryParser implements PrecedenceQueryParserConstants {
} }
final public Query Query(String field) throws ParseException { final public Query Query(String field) throws ParseException {
Vector clauses = new Vector(); List clauses = new ArrayList();
Query q, firstQuery=null; Query q, firstQuery=null;
boolean orPresent = false; boolean orPresent = false;
int modifier; int modifier;
@ -727,7 +772,7 @@ public class PrecedenceQueryParser implements PrecedenceQueryParserConstants {
} }
final public Query andExpression(String field) throws ParseException { final public Query andExpression(String field) throws ParseException {
Vector clauses = new Vector(); List clauses = new ArrayList();
Query q, firstQuery=null; Query q, firstQuery=null;
int modifier; int modifier;
q = Clause(field); q = Clause(field);

View File

@ -276,13 +276,20 @@ public class PrecedenceQueryParser {
return locale; return locale;
} }
/**
* @deprecated use {@link #addClause(List, int, int, Query)} instead.
*/
protected void addClause(Vector clauses, int conj, int modifier, Query q) { protected void addClause(Vector clauses, int conj, int modifier, Query q) {
addClause((List) clauses, conj, modifier, q);
}
protected void addClause(List clauses, int conj, int modifier, Query q) {
boolean required, prohibited; boolean required, prohibited;
// If this term is introduced by AND, make the preceding term required, // If this term is introduced by AND, make the preceding term required,
// unless it's already prohibited // unless it's already prohibited
if (clauses.size() > 0 && conj == CONJ_AND) { if (clauses.size() > 0 && conj == CONJ_AND) {
BooleanClause c = (BooleanClause) clauses.elementAt(clauses.size()-1); BooleanClause c = (BooleanClause) clauses.get(clauses.size()-1);
if (!c.isProhibited()) if (!c.isProhibited())
c.setOccur(BooleanClause.Occur.MUST); c.setOccur(BooleanClause.Occur.MUST);
} }
@ -292,7 +299,7 @@ public class PrecedenceQueryParser {
// unless it's prohibited (that means we leave -a OR b but +a OR b-->a OR b) // unless it's prohibited (that means we leave -a OR b but +a OR b-->a OR b)
// notice if the input is a OR b, first term is parsed as required; without // notice if the input is a OR b, first term is parsed as required; without
// this modification a OR b would parsed as +a OR b // this modification a OR b would parsed as +a OR b
BooleanClause c = (BooleanClause) clauses.elementAt(clauses.size()-1); BooleanClause c = (BooleanClause) clauses.get(clauses.size()-1);
if (!c.isProhibited()) if (!c.isProhibited())
c.setOccur(BooleanClause.Occur.SHOULD); c.setOccur(BooleanClause.Occur.SHOULD);
} }
@ -317,11 +324,11 @@ public class PrecedenceQueryParser {
required = (!prohibited && conj != CONJ_OR); required = (!prohibited && conj != CONJ_OR);
} }
if (required && !prohibited) if (required && !prohibited)
clauses.addElement(new BooleanClause(q, BooleanClause.Occur.MUST)); clauses.add(new BooleanClause(q, BooleanClause.Occur.MUST));
else if (!required && !prohibited) else if (!required && !prohibited)
clauses.addElement(new BooleanClause(q, BooleanClause.Occur.SHOULD)); clauses.add(new BooleanClause(q, BooleanClause.Occur.SHOULD));
else if (!required && prohibited) else if (!required && prohibited)
clauses.addElement(new BooleanClause(q, BooleanClause.Occur.MUST_NOT)); clauses.add(new BooleanClause(q, BooleanClause.Occur.MUST_NOT));
else else
throw new RuntimeException("Clause cannot be both required and prohibited"); throw new RuntimeException("Clause cannot be both required and prohibited");
} }
@ -334,7 +341,7 @@ public class PrecedenceQueryParser {
// PhraseQuery, or nothing based on the term count // PhraseQuery, or nothing based on the term count
TokenStream source = analyzer.tokenStream(field, new StringReader(queryText)); TokenStream source = analyzer.tokenStream(field, new StringReader(queryText));
Vector v = new Vector(); List list = new ArrayList();
final org.apache.lucene.analysis.Token reusableToken = new org.apache.lucene.analysis.Token(); final org.apache.lucene.analysis.Token reusableToken = new org.apache.lucene.analysis.Token();
org.apache.lucene.analysis.Token nextToken; org.apache.lucene.analysis.Token nextToken;
int positionCount = 0; int positionCount = 0;
@ -349,7 +356,7 @@ public class PrecedenceQueryParser {
} }
if (nextToken == null) if (nextToken == null)
break; break;
v.addElement(nextToken.clone()); list.add(nextToken.clone());
if (nextToken.getPositionIncrement() == 1) if (nextToken.getPositionIncrement() == 1)
positionCount++; positionCount++;
else else
@ -362,18 +369,18 @@ public class PrecedenceQueryParser {
// ignore // ignore
} }
if (v.size() == 0) if (list.size() == 0)
return null; return null;
else if (v.size() == 1) { else if (list.size() == 1) {
nextToken = (org.apache.lucene.analysis.Token) v.elementAt(0); nextToken = (org.apache.lucene.analysis.Token) list.get(0);
return new TermQuery(new Term(field, nextToken.term())); return new TermQuery(new Term(field, nextToken.term()));
} else { } else {
if (severalTokensAtSamePosition) { if (severalTokensAtSamePosition) {
if (positionCount == 1) { if (positionCount == 1) {
// no phrase query: // no phrase query:
BooleanQuery q = new BooleanQuery(); BooleanQuery q = new BooleanQuery();
for (int i = 0; i < v.size(); i++) { for (int i = 0; i < list.size(); i++) {
nextToken = (org.apache.lucene.analysis.Token) v.elementAt(i); nextToken = (org.apache.lucene.analysis.Token) list.get(i);
TermQuery currentQuery = new TermQuery( TermQuery currentQuery = new TermQuery(
new Term(field, nextToken.term())); new Term(field, nextToken.term()));
q.add(currentQuery, BooleanClause.Occur.SHOULD); q.add(currentQuery, BooleanClause.Occur.SHOULD);
@ -384,8 +391,8 @@ public class PrecedenceQueryParser {
// phrase query: // phrase query:
MultiPhraseQuery mpq = new MultiPhraseQuery(); MultiPhraseQuery mpq = new MultiPhraseQuery();
List multiTerms = new ArrayList(); List multiTerms = new ArrayList();
for (int i = 0; i < v.size(); i++) { for (int i = 0; i < list.size(); i++) {
nextToken = (org.apache.lucene.analysis.Token) v.elementAt(i); nextToken = (org.apache.lucene.analysis.Token) list.get(i);
if (nextToken.getPositionIncrement() == 1 && multiTerms.size() > 0) { if (nextToken.getPositionIncrement() == 1 && multiTerms.size() > 0) {
mpq.add((Term[])multiTerms.toArray(new Term[0])); mpq.add((Term[])multiTerms.toArray(new Term[0]));
multiTerms.clear(); multiTerms.clear();
@ -399,10 +406,9 @@ public class PrecedenceQueryParser {
else { else {
PhraseQuery q = new PhraseQuery(); PhraseQuery q = new PhraseQuery();
q.setSlop(phraseSlop); q.setSlop(phraseSlop);
for (int i = 0; i < v.size(); i++) { for (int i = 0; i < list.size(); i++) {
q.add(new Term(field, ((org.apache.lucene.analysis.Token) q.add(new Term(field, ((org.apache.lucene.analysis.Token)
v.elementAt(i)).term())); list.get(i)).term()));
} }
return q; return q;
} }
@ -464,13 +470,32 @@ public class PrecedenceQueryParser {
* Can be overridden by extending classes, to modify query being * Can be overridden by extending classes, to modify query being
* returned. * returned.
* *
* @param clauses Vector that contains {@link BooleanClause} instances * @param clauses List that contains {@link BooleanClause} instances
* to join.
*
* @return Resulting {@link Query} object.
* @exception ParseException throw in overridden method to disallow
* @deprecated use {@link #getBooleanQuery(List)} instead
*/
protected Query getBooleanQuery(Vector clauses) throws ParseException
{
return getBooleanQuery((List) clauses, false);
}
/**
* Factory method for generating query, given a set of clauses.
* By default creates a boolean query composed of clauses passed in.
*
* Can be overridden by extending classes, to modify query being
* returned.
*
* @param clauses List that contains {@link BooleanClause} instances
* to join. * to join.
* *
* @return Resulting {@link Query} object. * @return Resulting {@link Query} object.
* @exception ParseException throw in overridden method to disallow * @exception ParseException throw in overridden method to disallow
*/ */
protected Query getBooleanQuery(Vector clauses) throws ParseException protected Query getBooleanQuery(List clauses) throws ParseException
{ {
return getBooleanQuery(clauses, false); return getBooleanQuery(clauses, false);
} }
@ -482,22 +507,42 @@ public class PrecedenceQueryParser {
* Can be overridden by extending classes, to modify query being * Can be overridden by extending classes, to modify query being
* returned. * returned.
* *
* @param clauses Vector that contains {@link BooleanClause} instances * @param clauses List that contains {@link BooleanClause} instances
* to join.
* @param disableCoord true if coord scoring should be disabled.
*
* @return Resulting {@link Query} object.
* @exception ParseException throw in overridden method to disallow
* @deprecated use {@link #getBooleanQuery(List, boolean)} instead
*/
protected Query getBooleanQuery(Vector clauses, boolean disableCoord)
throws ParseException
{
return getBooleanQuery((List) clauses, disableCoord);
}
/**
* Factory method for generating query, given a set of clauses.
* By default creates a boolean query composed of clauses passed in.
*
* Can be overridden by extending classes, to modify query being
* returned.
*
* @param clauses List that contains {@link BooleanClause} instances
* to join. * to join.
* @param disableCoord true if coord scoring should be disabled. * @param disableCoord true if coord scoring should be disabled.
* *
* @return Resulting {@link Query} object. * @return Resulting {@link Query} object.
* @exception ParseException throw in overridden method to disallow * @exception ParseException throw in overridden method to disallow
*/ */
protected Query getBooleanQuery(Vector clauses, boolean disableCoord) protected Query getBooleanQuery(List clauses, boolean disableCoord)
throws ParseException throws ParseException {
{
if (clauses == null || clauses.size() == 0) if (clauses == null || clauses.size() == 0)
return null; return null;
BooleanQuery query = new BooleanQuery(disableCoord); BooleanQuery query = new BooleanQuery(disableCoord);
for (int i = 0; i < clauses.size(); i++) { for (int i = 0; i < clauses.size(); i++) {
query.add((BooleanClause)clauses.elementAt(i)); query.add((BooleanClause)clauses.get(i));
} }
return query; return query;
} }
@ -733,7 +778,7 @@ int Modifier() : {
Query Query(String field) : Query Query(String field) :
{ {
Vector clauses = new Vector(); List clauses = new ArrayList();
Query q, firstQuery=null; Query q, firstQuery=null;
boolean orPresent = false; boolean orPresent = false;
int modifier; int modifier;
@ -760,7 +805,7 @@ Query Query(String field) :
Query andExpression(String field) : Query andExpression(String field) :
{ {
Vector clauses = new Vector(); List clauses = new ArrayList();
Query q, firstQuery=null; Query q, firstQuery=null;
int modifier; int modifier;
} }

View File

@ -48,7 +48,7 @@ public abstract class ComposedQuery extends SrndQuery {
public boolean isOperatorInfix() { return operatorInfix; } /* else prefix operator */ public boolean isOperatorInfix() { return operatorInfix; } /* else prefix operator */
public List makeLuceneSubQueriesField(String fn, BasicQueryFactory qf) { public List makeLuceneSubQueriesField(String fn, BasicQueryFactory qf) {
ArrayList luceneSubQueries = new ArrayList(); List luceneSubQueries = new ArrayList();
Iterator sqi = getSubQueriesIterator(); Iterator sqi = getSubQueriesIterator();
while (sqi.hasNext()) { while (sqi.hasNext()) {
luceneSubQueries.add( ((SrndQuery) sqi.next()).makeLuceneQueryField(fn, qf)); luceneSubQueries.add( ((SrndQuery) sqi.next()).makeLuceneQueryField(fn, qf));

View File

@ -24,11 +24,11 @@ import org.apache.lucene.search.Query;
public class FieldsQuery extends SrndQuery { /* mostly untested */ public class FieldsQuery extends SrndQuery { /* mostly untested */
private SrndQuery q; private SrndQuery q;
private ArrayList fieldNames; private List fieldNames;
private final char fieldOp; private final char fieldOp;
private final String OrOperatorName = "OR"; /* for expanded queries, not normally visible */ private final String OrOperatorName = "OR"; /* for expanded queries, not normally visible */
public FieldsQuery(SrndQuery q, ArrayList fieldNames, char fieldOp) { public FieldsQuery(SrndQuery q, List fieldNames, char fieldOp) {
this.q = q; this.q = q;
this.fieldNames = fieldNames; this.fieldNames = fieldNames;
this.fieldOp = fieldOp; this.fieldOp = fieldOp;
@ -49,7 +49,7 @@ public class FieldsQuery extends SrndQuery { /* mostly untested */
if (fieldNames.size() == 1) { /* single field name: no new queries needed */ if (fieldNames.size() == 1) { /* single field name: no new queries needed */
return q.makeLuceneQueryFieldNoBoost((String) fieldNames.get(0), qf); return q.makeLuceneQueryFieldNoBoost((String) fieldNames.get(0), qf);
} else { /* OR query over the fields */ } else { /* OR query over the fields */
ArrayList queries = new ArrayList(); List queries = new ArrayList();
Iterator fni = getFieldNames().listIterator(); Iterator fni = getFieldNames().listIterator();
SrndQuery qc; SrndQuery qc;
while (fni.hasNext()) { while (fni.hasNext()) {

View File

@ -16,12 +16,14 @@ package org.apache.lucene.queryParser.surround.query;
* limitations under the License. * limitations under the License.
*/ */
import java.util.ArrayList;
import java.io.IOException; import java.io.IOException;
import org.apache.lucene.index.Term; import java.util.ArrayList;
import java.util.List;
import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexReader;
import org.apache.lucene.search.Query; import org.apache.lucene.index.Term;
import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.BooleanClause;
import org.apache.lucene.search.Query;
public abstract class SimpleTerm public abstract class SimpleTerm
extends SrndQuery extends SrndQuery
@ -78,7 +80,7 @@ public abstract class SimpleTerm
} }
public Query rewrite(IndexReader reader) throws IOException { public Query rewrite(IndexReader reader) throws IOException {
final ArrayList luceneSubQueries = new ArrayList(); final List luceneSubQueries = new ArrayList();
visitMatchingTerms( reader, fieldName, visitMatchingTerms( reader, fieldName,
new MatchingTermVisitor() { new MatchingTermVisitor() {
public void visitMatchingTerm(Term term) throws IOException { public void visitMatchingTerm(Term term) throws IOException {

View File

@ -16,15 +16,17 @@ package org.apache.lucene.swing.models;
* limitations under the License. * limitations under the License.
*/ */
import javax.swing.*;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Iterator; import java.util.Iterator;
import java.util.List;
import javax.swing.AbstractListModel;
/** /**
* @author Jonathan Simon - jonathan_s_simon@yahoo.com * @author Jonathan Simon - jonathan_s_simon@yahoo.com
*/ */
public class BaseListModel extends AbstractListModel { public class BaseListModel extends AbstractListModel {
private ArrayList data = new ArrayList(); private List data = new ArrayList();
public BaseListModel(Iterator iterator) { public BaseListModel(Iterator iterator) {
while (iterator.hasNext()) { while (iterator.hasNext()) {

View File

@ -16,16 +16,18 @@ package org.apache.lucene.swing.models;
* limitations under the License. * limitations under the License.
*/ */
import javax.swing.table.AbstractTableModel;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Iterator; import java.util.Iterator;
import java.util.List;
import javax.swing.table.AbstractTableModel;
/** /**
* @author Jonathan Simon - jonathan_s_simon@yahoo.com * @author Jonathan Simon - jonathan_s_simon@yahoo.com
*/ */
public class BaseTableModel extends AbstractTableModel { public class BaseTableModel extends AbstractTableModel {
private ArrayList columnNames = new ArrayList(); private List columnNames = new ArrayList();
private ArrayList rows = new ArrayList(); private List rows = new ArrayList();
public BaseTableModel(Iterator data) { public BaseTableModel(Iterator data) {
columnNames.add("Name"); columnNames.add("Name");

View File

@ -17,6 +17,7 @@ package org.apache.lucene.swing.models;
*/ */
import java.util.ArrayList; import java.util.ArrayList;
import java.util.List;
import javax.swing.ListModel; import javax.swing.ListModel;
@ -28,7 +29,7 @@ import junit.framework.TestCase;
public class TestBasicList extends TestCase { public class TestBasicList extends TestCase {
private ListModel baseListModel; private ListModel baseListModel;
private ListSearcher listSearcher; private ListSearcher listSearcher;
private ArrayList list; private List list;
protected void setUp() throws Exception { protected void setUp() throws Exception {
list = new ArrayList(); list = new ArrayList();

View File

@ -17,6 +17,7 @@ package org.apache.lucene.swing.models;
*/ */
import java.util.ArrayList; import java.util.ArrayList;
import java.util.List;
import javax.swing.table.TableModel; import javax.swing.table.TableModel;
@ -28,7 +29,7 @@ import junit.framework.TestCase;
public class TestBasicTable extends TestCase { public class TestBasicTable extends TestCase {
private TableModel baseTableModel; private TableModel baseTableModel;
private TableSearcher tableSearcher; private TableSearcher tableSearcher;
private ArrayList list; private List list;
protected void setUp() throws Exception { protected void setUp() throws Exception {
list = new ArrayList(); list = new ArrayList();

View File

@ -17,10 +17,11 @@ package org.apache.lucene.demo.html;
* limitations under the License. * limitations under the License.
*/ */
import java.util.*; import java.util.HashMap;
import java.util.Map;
public class Entities { public class Entities {
static final Hashtable decoder = new Hashtable(300); static final Map decoder = new HashMap(300);
static final String[] encoder = new String[0x100]; static final String[] encoder = new String[0x100];
static final String decode(String entity) { static final String decode(String entity) {

View File

@ -2952,7 +2952,7 @@ public class IndexWriter {
final SegmentInfo info = sis.info(j); final SegmentInfo info = sis.info(j);
docCount += info.docCount; docCount += info.docCount;
assert !segmentInfos.contains(info); assert !segmentInfos.contains(info);
segmentInfos.addElement(info); // add each info segmentInfos.add(info); // add each info
} }
} }
} }
@ -3077,7 +3077,7 @@ public class IndexWriter {
SegmentInfo info = sis.info(j); SegmentInfo info = sis.info(j);
assert !segmentInfos.contains(info): "dup info dir=" + info.dir + " name=" + info.name; assert !segmentInfos.contains(info): "dup info dir=" + info.dir + " name=" + info.name;
docCount += info.docCount; docCount += info.docCount;
segmentInfos.addElement(info); // add each info segmentInfos.add(info); // add each info
} }
} }
} }
@ -3287,10 +3287,10 @@ public class IndexWriter {
} }
synchronized(this) { synchronized(this) {
segmentInfos.setSize(0); // pop old infos & add new segmentInfos.clear(); // pop old infos & add new
info = new SegmentInfo(mergedName, docCount, directory, false, true, info = new SegmentInfo(mergedName, docCount, directory, false, true,
-1, null, false, merger.hasProx()); -1, null, false, merger.hasProx());
segmentInfos.addElement(info); segmentInfos.add(info);
} }
// Notify DocumentsWriter that the flushed count just increased // Notify DocumentsWriter that the flushed count just increased
@ -3650,7 +3650,7 @@ public class IndexWriter {
docWriter.pushDeletes(); docWriter.pushDeletes();
if (flushDocs) if (flushDocs)
segmentInfos.addElement(newSegment); segmentInfos.add(newSegment);
if (flushDeletes) { if (flushDeletes) {
flushDeletesCount++; flushDeletesCount++;

View File

@ -17,13 +17,14 @@ package org.apache.lucene.index;
* limitations under the License. * limitations under the License.
*/ */
import org.apache.lucene.document.Document;
import org.apache.lucene.document.FieldSelector;
import java.io.IOException; import java.io.IOException;
import java.util.Collection; import java.util.Collection;
import java.util.Hashtable; import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.FieldSelector;
import org.apache.lucene.index.MultiSegmentReader.MultiTermDocs; import org.apache.lucene.index.MultiSegmentReader.MultiTermDocs;
import org.apache.lucene.index.MultiSegmentReader.MultiTermEnum; import org.apache.lucene.index.MultiSegmentReader.MultiTermEnum;
import org.apache.lucene.index.MultiSegmentReader.MultiTermPositions; import org.apache.lucene.index.MultiSegmentReader.MultiTermPositions;
@ -36,7 +37,7 @@ public class MultiReader extends IndexReader {
protected IndexReader[] subReaders; protected IndexReader[] subReaders;
private int[] starts; // 1st docno for each segment private int[] starts; // 1st docno for each segment
private boolean[] decrefOnClose; // remember which subreaders to decRef on close private boolean[] decrefOnClose; // remember which subreaders to decRef on close
private Hashtable normsCache = new Hashtable(); private Map normsCache = new HashMap();
private int maxDoc = 0; private int maxDoc = 0;
private int numDocs = -1; private int numDocs = -1;
private boolean hasDeletions = false; private boolean hasDeletions = false;
@ -288,7 +289,9 @@ public class MultiReader extends IndexReader {
protected void doSetNorm(int n, String field, byte value) protected void doSetNorm(int n, String field, byte value)
throws CorruptIndexException, IOException { throws CorruptIndexException, IOException {
synchronized (normsCache) {
normsCache.remove(field); // clear cache normsCache.remove(field); // clear cache
}
int i = readerIndex(n); // find segment num int i = readerIndex(n); // find segment num
subReaders[i].setNorm(n-starts[i], field, value); // dispatch subReaders[i].setNorm(n-starts[i], field, value); // dispatch
} }

View File

@ -17,26 +17,26 @@ package org.apache.lucene.index;
* limitations under the License. * limitations under the License.
*/ */
import org.apache.lucene.document.Document;
import org.apache.lucene.document.FieldSelector;
import org.apache.lucene.store.Directory;
import java.io.IOException; import java.io.IOException;
import java.util.Collection; import java.util.Collection;
import java.util.Collections;
import java.util.HashMap; import java.util.HashMap;
import java.util.HashSet; import java.util.HashSet;
import java.util.Hashtable;
import java.util.Iterator; import java.util.Iterator;
import java.util.Map; import java.util.Map;
import java.util.Set; import java.util.Set;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.FieldSelector;
import org.apache.lucene.store.Directory;
/** /**
* An IndexReader which reads indexes with multiple segments. * An IndexReader which reads indexes with multiple segments.
*/ */
class MultiSegmentReader extends DirectoryIndexReader { class MultiSegmentReader extends DirectoryIndexReader {
protected SegmentReader[] subReaders; protected SegmentReader[] subReaders;
private int[] starts; // 1st docno for each segment private int[] starts; // 1st docno for each segment
private Hashtable normsCache = new Hashtable(); private Map normsCache = new HashMap();
private int maxDoc = 0; private int maxDoc = 0;
private int numDocs = -1; private int numDocs = -1;
private boolean hasDeletions = false; private boolean hasDeletions = false;
@ -149,14 +149,15 @@ class MultiSegmentReader extends DirectoryIndexReader {
// try to copy unchanged norms from the old normsCache to the new one // try to copy unchanged norms from the old normsCache to the new one
if (oldNormsCache != null) { if (oldNormsCache != null) {
Iterator it = oldNormsCache.keySet().iterator(); Iterator it = oldNormsCache.entrySet().iterator();
while (it.hasNext()) { while (it.hasNext()) {
String field = (String) it.next(); Map.Entry entry = (Map.Entry) it.next();
String field = (String) entry.getKey();
if (!hasNorms(field)) { if (!hasNorms(field)) {
continue; continue;
} }
byte[] oldBytes = (byte[]) oldNormsCache.get(field); byte[] oldBytes = (byte[]) entry.getValue();
byte[] bytes = new byte[maxDoc()]; byte[] bytes = new byte[maxDoc()];
@ -353,7 +354,9 @@ class MultiSegmentReader extends DirectoryIndexReader {
protected void doSetNorm(int n, String field, byte value) protected void doSetNorm(int n, String field, byte value)
throws CorruptIndexException, IOException { throws CorruptIndexException, IOException {
synchronized (normsCache) {
normsCache.remove(field); // clear cache normsCache.remove(field); // clear cache
}
int i = readerIndex(n); // find segment num int i = readerIndex(n); // find segment num
subReaders[i].setNorm(n-starts[i], field, value); // dispatch subReaders[i].setNorm(n-starts[i], field, value); // dispatch
} }

View File

@ -91,7 +91,7 @@ final class SegmentInfos extends Vector {
private static PrintStream infoStream; private static PrintStream infoStream;
public final SegmentInfo info(int i) { public final SegmentInfo info(int i) {
return (SegmentInfo) elementAt(i); return (SegmentInfo) get(i);
} }
/** /**
@ -231,7 +231,7 @@ final class SegmentInfos extends Vector {
} }
for (int i = input.readInt(); i > 0; i--) { // read segmentInfos for (int i = input.readInt(); i > 0; i--) { // read segmentInfos
addElement(new SegmentInfo(directory, format, input)); add(new SegmentInfo(directory, format, input));
} }
if(format >= 0){ // in old format the version number may be at the end of the file if(format >= 0){ // in old format the version number may be at the end of the file
@ -337,7 +337,7 @@ final class SegmentInfos extends Vector {
public Object clone() { public Object clone() {
SegmentInfos sis = (SegmentInfos) super.clone(); SegmentInfos sis = (SegmentInfos) super.clone();
for(int i=0;i<sis.size();i++) { for(int i=0;i<sis.size();i++) {
sis.setElementAt(sis.info(i).clone(), i); sis.set(i, sis.info(i).clone());
} }
return sis; return sis;
} }

View File

@ -17,17 +17,18 @@ package org.apache.lucene.index;
* limitations under the License. * limitations under the License.
*/ */
import java.util.Vector;
import java.util.Iterator;
import java.util.Collection;
import java.io.IOException; import java.io.IOException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Iterator;
import java.util.List;
import org.apache.lucene.document.FieldSelector;
import org.apache.lucene.document.Document; import org.apache.lucene.document.Document;
import org.apache.lucene.document.FieldSelector;
import org.apache.lucene.document.FieldSelectorResult; import org.apache.lucene.document.FieldSelectorResult;
import org.apache.lucene.store.Directory; import org.apache.lucene.store.Directory;
import org.apache.lucene.store.IndexOutput;
import org.apache.lucene.store.IndexInput; import org.apache.lucene.store.IndexInput;
import org.apache.lucene.store.IndexOutput;
/** /**
* The SegmentMerger class combines two or more Segments, represented by an IndexReader ({@link #add}, * The SegmentMerger class combines two or more Segments, represented by an IndexReader ({@link #add},
@ -49,7 +50,7 @@ final class SegmentMerger {
private String segment; private String segment;
private int termIndexInterval = IndexWriter.DEFAULT_TERM_INDEX_INTERVAL; private int termIndexInterval = IndexWriter.DEFAULT_TERM_INDEX_INTERVAL;
private Vector readers = new Vector(); private List readers = new ArrayList();
private FieldInfos fieldInfos; private FieldInfos fieldInfos;
private int mergedDocs; private int mergedDocs;
@ -93,7 +94,7 @@ final class SegmentMerger {
* @param reader * @param reader
*/ */
final void add(IndexReader reader) { final void add(IndexReader reader) {
readers.addElement(reader); readers.add(reader);
} }
/** /**
@ -102,7 +103,7 @@ final class SegmentMerger {
* @return The ith reader to be merged * @return The ith reader to be merged
*/ */
final IndexReader segmentReader(int i) { final IndexReader segmentReader(int i) {
return (IndexReader) readers.elementAt(i); return (IndexReader) readers.get(i);
} }
/** /**
@ -152,18 +153,18 @@ final class SegmentMerger {
*/ */
final void closeReaders() throws IOException { final void closeReaders() throws IOException {
for (int i = 0; i < readers.size(); i++) { // close readers for (int i = 0; i < readers.size(); i++) { // close readers
IndexReader reader = (IndexReader) readers.elementAt(i); IndexReader reader = (IndexReader) readers.get(i);
reader.close(); reader.close();
} }
} }
final Vector createCompoundFile(String fileName) final List createCompoundFile(String fileName)
throws IOException { throws IOException {
CompoundFileWriter cfsWriter = CompoundFileWriter cfsWriter =
new CompoundFileWriter(directory, fileName, checkAbort); new CompoundFileWriter(directory, fileName, checkAbort);
Vector files = List files =
new Vector(IndexFileNames.COMPOUND_EXTENSIONS.length + 1); new ArrayList(IndexFileNames.COMPOUND_EXTENSIONS.length + 1);
// Basic files // Basic files
for (int i = 0; i < IndexFileNames.COMPOUND_EXTENSIONS.length; i++) { for (int i = 0; i < IndexFileNames.COMPOUND_EXTENSIONS.length; i++) {
@ -229,7 +230,7 @@ final class SegmentMerger {
// FieldInfos, then we can do a bulk copy of the // FieldInfos, then we can do a bulk copy of the
// stored fields: // stored fields:
for (int i = 0; i < readers.size(); i++) { for (int i = 0; i < readers.size(); i++) {
IndexReader reader = (IndexReader) readers.elementAt(i); IndexReader reader = (IndexReader) readers.get(i);
if (reader instanceof SegmentReader) { if (reader instanceof SegmentReader) {
SegmentReader segmentReader = (SegmentReader) reader; SegmentReader segmentReader = (SegmentReader) reader;
boolean same = true; boolean same = true;
@ -261,14 +262,14 @@ final class SegmentMerger {
// name -> number mapping are the same. So, we start // name -> number mapping are the same. So, we start
// with the fieldInfos of the last segment in this // with the fieldInfos of the last segment in this
// case, to keep that numbering. // case, to keep that numbering.
final SegmentReader sr = (SegmentReader) readers.elementAt(readers.size()-1); final SegmentReader sr = (SegmentReader) readers.get(readers.size()-1);
fieldInfos = (FieldInfos) sr.fieldInfos.clone(); fieldInfos = (FieldInfos) sr.fieldInfos.clone();
} else { } else {
fieldInfos = new FieldInfos(); // merge field names fieldInfos = new FieldInfos(); // merge field names
} }
for (int i = 0; i < readers.size(); i++) { for (int i = 0; i < readers.size(); i++) {
IndexReader reader = (IndexReader) readers.elementAt(i); IndexReader reader = (IndexReader) readers.get(i);
if (reader instanceof SegmentReader) { if (reader instanceof SegmentReader) {
SegmentReader segmentReader = (SegmentReader) reader; SegmentReader segmentReader = (SegmentReader) reader;
for (int j = 0; j < segmentReader.getFieldInfos().size(); j++) { for (int j = 0; j < segmentReader.getFieldInfos().size(); j++) {
@ -307,7 +308,7 @@ final class SegmentMerger {
try { try {
for (int i = 0; i < readers.size(); i++) { for (int i = 0; i < readers.size(); i++) {
final IndexReader reader = (IndexReader) readers.elementAt(i); final IndexReader reader = (IndexReader) readers.get(i);
final SegmentReader matchingSegmentReader = matchingSegmentReaders[i]; final SegmentReader matchingSegmentReader = matchingSegmentReaders[i];
final FieldsReader matchingFieldsReader; final FieldsReader matchingFieldsReader;
final boolean hasMatchingReader; final boolean hasMatchingReader;
@ -385,7 +386,7 @@ final class SegmentMerger {
// are no deletions in any of these segments, so we // are no deletions in any of these segments, so we
// just sum numDocs() of each segment to get total docCount // just sum numDocs() of each segment to get total docCount
for (int i = 0; i < readers.size(); i++) for (int i = 0; i < readers.size(); i++)
docCount += ((IndexReader) readers.elementAt(i)).numDocs(); docCount += ((IndexReader) readers.get(i)).numDocs();
return docCount; return docCount;
} }
@ -418,7 +419,7 @@ final class SegmentMerger {
hasMatchingReader = false; hasMatchingReader = false;
matchingVectorsReader = null; matchingVectorsReader = null;
} }
IndexReader reader = (IndexReader) readers.elementAt(r); IndexReader reader = (IndexReader) readers.get(r);
final boolean hasDeletions = reader.hasDeletions(); final boolean hasDeletions = reader.hasDeletions();
int maxDoc = reader.maxDoc(); int maxDoc = reader.maxDoc();
for (int docNum = 0; docNum < maxDoc;) { for (int docNum = 0; docNum < maxDoc;) {
@ -510,7 +511,7 @@ final class SegmentMerger {
int base = 0; int base = 0;
final int readerCount = readers.size(); final int readerCount = readers.size();
for (int i = 0; i < readerCount; i++) { for (int i = 0; i < readerCount; i++) {
IndexReader reader = (IndexReader) readers.elementAt(i); IndexReader reader = (IndexReader) readers.get(i);
TermEnum termEnum = reader.terms(); TermEnum termEnum = reader.terms();
SegmentMergeInfo smi = new SegmentMergeInfo(base, termEnum, reader); SegmentMergeInfo smi = new SegmentMergeInfo(base, termEnum, reader);
int[] docMap = smi.getDocMap(); int[] docMap = smi.getDocMap();
@ -750,7 +751,7 @@ final class SegmentMerger {
output.writeBytes(NORMS_HEADER,NORMS_HEADER.length); output.writeBytes(NORMS_HEADER,NORMS_HEADER.length);
} }
for (int j = 0; j < readers.size(); j++) { for (int j = 0; j < readers.size(); j++) {
IndexReader reader = (IndexReader) readers.elementAt(j); IndexReader reader = (IndexReader) readers.get(j);
int maxDoc = reader.maxDoc(); int maxDoc = reader.maxDoc();
if (normBuffer == null || normBuffer.length < maxDoc) { if (normBuffer == null || normBuffer.length < maxDoc) {
// the buffer is too small for the current segment // the buffer is too small for the current segment

View File

@ -18,14 +18,15 @@ package org.apache.lucene.index;
*/ */
import java.io.IOException; import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays; import java.util.Arrays;
import java.util.Collection; import java.util.Collection;
import java.util.HashMap; import java.util.HashMap;
import java.util.HashSet; import java.util.HashSet;
import java.util.Iterator; import java.util.Iterator;
import java.util.List;
import java.util.Map; import java.util.Map;
import java.util.Set; import java.util.Set;
import java.util.Vector;
import org.apache.lucene.document.Document; import org.apache.lucene.document.Document;
import org.apache.lucene.document.FieldSelector; import org.apache.lucene.document.FieldSelector;
@ -695,8 +696,8 @@ class SegmentReader extends DirectoryIndexReader {
undeleteAll = true; undeleteAll = true;
} }
Vector files() throws IOException { List files() throws IOException {
return new Vector(si.files()); return new ArrayList(si.files());
} }
public TermEnum terms() { public TermEnum terms() {

View File

@ -17,6 +17,11 @@ package org.apache.lucene.queryParser;
* limitations under the License. * limitations under the License.
*/ */
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.Vector;
import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.BooleanClause;
import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.BooleanQuery;
@ -24,9 +29,6 @@ import org.apache.lucene.search.MultiPhraseQuery;
import org.apache.lucene.search.PhraseQuery; import org.apache.lucene.search.PhraseQuery;
import org.apache.lucene.search.Query; import org.apache.lucene.search.Query;
import java.util.Vector;
import java.util.Map;
/** /**
* A QueryParser which constructs queries to search multiple fields. * A QueryParser which constructs queries to search multiple fields.
* *
@ -97,7 +99,7 @@ public class MultiFieldQueryParser extends QueryParser
protected Query getFieldQuery(String field, String queryText, int slop) throws ParseException { protected Query getFieldQuery(String field, String queryText, int slop) throws ParseException {
if (field == null) { if (field == null) {
Vector clauses = new Vector(); List clauses = new ArrayList();
for (int i = 0; i < fields.length; i++) { for (int i = 0; i < fields.length; i++) {
Query q = super.getFieldQuery(fields[i], queryText); Query q = super.getFieldQuery(fields[i], queryText);
if (q != null) { if (q != null) {
@ -139,7 +141,7 @@ public class MultiFieldQueryParser extends QueryParser
protected Query getFuzzyQuery(String field, String termStr, float minSimilarity) throws ParseException protected Query getFuzzyQuery(String field, String termStr, float minSimilarity) throws ParseException
{ {
if (field == null) { if (field == null) {
Vector clauses = new Vector(); List clauses = new ArrayList();
for (int i = 0; i < fields.length; i++) { for (int i = 0; i < fields.length; i++) {
clauses.add(new BooleanClause(getFuzzyQuery(fields[i], termStr, minSimilarity), clauses.add(new BooleanClause(getFuzzyQuery(fields[i], termStr, minSimilarity),
BooleanClause.Occur.SHOULD)); BooleanClause.Occur.SHOULD));
@ -152,7 +154,7 @@ public class MultiFieldQueryParser extends QueryParser
protected Query getPrefixQuery(String field, String termStr) throws ParseException protected Query getPrefixQuery(String field, String termStr) throws ParseException
{ {
if (field == null) { if (field == null) {
Vector clauses = new Vector(); List clauses = new ArrayList();
for (int i = 0; i < fields.length; i++) { for (int i = 0; i < fields.length; i++) {
clauses.add(new BooleanClause(getPrefixQuery(fields[i], termStr), clauses.add(new BooleanClause(getPrefixQuery(fields[i], termStr),
BooleanClause.Occur.SHOULD)); BooleanClause.Occur.SHOULD));
@ -164,7 +166,7 @@ public class MultiFieldQueryParser extends QueryParser
protected Query getWildcardQuery(String field, String termStr) throws ParseException { protected Query getWildcardQuery(String field, String termStr) throws ParseException {
if (field == null) { if (field == null) {
Vector clauses = new Vector(); List clauses = new ArrayList();
for (int i = 0; i < fields.length; i++) { for (int i = 0; i < fields.length; i++) {
clauses.add(new BooleanClause(getWildcardQuery(fields[i], termStr), clauses.add(new BooleanClause(getWildcardQuery(fields[i], termStr),
BooleanClause.Occur.SHOULD)); BooleanClause.Occur.SHOULD));
@ -177,7 +179,7 @@ public class MultiFieldQueryParser extends QueryParser
protected Query getRangeQuery(String field, String part1, String part2, boolean inclusive) throws ParseException { protected Query getRangeQuery(String field, String part1, String part2, boolean inclusive) throws ParseException {
if (field == null) { if (field == null) {
Vector clauses = new Vector(); List clauses = new ArrayList();
for (int i = 0; i < fields.length; i++) { for (int i = 0; i < fields.length; i++) {
clauses.add(new BooleanClause(getRangeQuery(fields[i], part1, part2, inclusive), clauses.add(new BooleanClause(getRangeQuery(fields[i], part1, part2, inclusive),
BooleanClause.Occur.SHOULD)); BooleanClause.Occur.SHOULD));

View File

@ -342,7 +342,6 @@ public class QueryParser implements QueryParserConstants {
return useOldRangeQuery; return useOldRangeQuery;
} }
/** /**
* Set locale used by date range parsing. * Set locale used by date range parsing.
*/ */
@ -412,13 +411,20 @@ public class QueryParser implements QueryParserConstants {
return resolution; return resolution;
} }
/**
* @deprecated use {@link #addClause(List, int, int, Query)} instead.
*/
protected void addClause(Vector clauses, int conj, int mods, Query q) { protected void addClause(Vector clauses, int conj, int mods, Query q) {
addClause((List) clauses, conj, mods, q);
}
protected void addClause(List clauses, int conj, int mods, Query q) {
boolean required, prohibited; boolean required, prohibited;
// If this term is introduced by AND, make the preceding term required, // If this term is introduced by AND, make the preceding term required,
// unless it's already prohibited // unless it's already prohibited
if (clauses.size() > 0 && conj == CONJ_AND) { if (clauses.size() > 0 && conj == CONJ_AND) {
BooleanClause c = (BooleanClause) clauses.elementAt(clauses.size()-1); BooleanClause c = (BooleanClause) clauses.get(clauses.size()-1);
if (!c.isProhibited()) if (!c.isProhibited())
c.setOccur(BooleanClause.Occur.MUST); c.setOccur(BooleanClause.Occur.MUST);
} }
@ -428,7 +434,7 @@ public class QueryParser implements QueryParserConstants {
// unless it's prohibited (that means we leave -a OR b but +a OR b-->a OR b) // unless it's prohibited (that means we leave -a OR b but +a OR b-->a OR b)
// notice if the input is a OR b, first term is parsed as required; without // notice if the input is a OR b, first term is parsed as required; without
// this modification a OR b would parsed as +a OR b // this modification a OR b would parsed as +a OR b
BooleanClause c = (BooleanClause) clauses.elementAt(clauses.size()-1); BooleanClause c = (BooleanClause) clauses.get(clauses.size()-1);
if (!c.isProhibited()) if (!c.isProhibited())
c.setOccur(BooleanClause.Occur.SHOULD); c.setOccur(BooleanClause.Occur.SHOULD);
} }
@ -453,11 +459,11 @@ public class QueryParser implements QueryParserConstants {
required = (!prohibited && conj != CONJ_OR); required = (!prohibited && conj != CONJ_OR);
} }
if (required && !prohibited) if (required && !prohibited)
clauses.addElement(newBooleanClause(q, BooleanClause.Occur.MUST)); clauses.add(newBooleanClause(q, BooleanClause.Occur.MUST));
else if (!required && !prohibited) else if (!required && !prohibited)
clauses.addElement(newBooleanClause(q, BooleanClause.Occur.SHOULD)); clauses.add(newBooleanClause(q, BooleanClause.Occur.SHOULD));
else if (!required && prohibited) else if (!required && prohibited)
clauses.addElement(newBooleanClause(q, BooleanClause.Occur.MUST_NOT)); clauses.add(newBooleanClause(q, BooleanClause.Occur.MUST_NOT));
else else
throw new RuntimeException("Clause cannot be both required and prohibited"); throw new RuntimeException("Clause cannot be both required and prohibited");
} }
@ -471,7 +477,7 @@ public class QueryParser implements QueryParserConstants {
// PhraseQuery, or nothing based on the term count // PhraseQuery, or nothing based on the term count
TokenStream source = analyzer.tokenStream(field, new StringReader(queryText)); TokenStream source = analyzer.tokenStream(field, new StringReader(queryText));
Vector v = new Vector(); List list = new ArrayList();
final org.apache.lucene.analysis.Token reusableToken = new org.apache.lucene.analysis.Token(); final org.apache.lucene.analysis.Token reusableToken = new org.apache.lucene.analysis.Token();
org.apache.lucene.analysis.Token nextToken; org.apache.lucene.analysis.Token nextToken;
int positionCount = 0; int positionCount = 0;
@ -486,7 +492,7 @@ public class QueryParser implements QueryParserConstants {
} }
if (nextToken == null) if (nextToken == null)
break; break;
v.addElement(nextToken.clone()); list.add(nextToken.clone());
if (nextToken.getPositionIncrement() != 0) if (nextToken.getPositionIncrement() != 0)
positionCount += nextToken.getPositionIncrement(); positionCount += nextToken.getPositionIncrement();
else else
@ -499,18 +505,18 @@ public class QueryParser implements QueryParserConstants {
// ignore // ignore
} }
if (v.size() == 0) if (list.size() == 0)
return null; return null;
else if (v.size() == 1) { else if (list.size() == 1) {
nextToken = (org.apache.lucene.analysis.Token) v.elementAt(0); nextToken = (org.apache.lucene.analysis.Token) list.get(0);
return newTermQuery(new Term(field, nextToken.term())); return newTermQuery(new Term(field, nextToken.term()));
} else { } else {
if (severalTokensAtSamePosition) { if (severalTokensAtSamePosition) {
if (positionCount == 1) { if (positionCount == 1) {
// no phrase query: // no phrase query:
BooleanQuery q = newBooleanQuery(true); BooleanQuery q = newBooleanQuery(true);
for (int i = 0; i < v.size(); i++) { for (int i = 0; i < list.size(); i++) {
nextToken = (org.apache.lucene.analysis.Token) v.elementAt(i); nextToken = (org.apache.lucene.analysis.Token) list.get(i);
Query currentQuery = newTermQuery( Query currentQuery = newTermQuery(
new Term(field, nextToken.term())); new Term(field, nextToken.term()));
q.add(currentQuery, BooleanClause.Occur.SHOULD); q.add(currentQuery, BooleanClause.Occur.SHOULD);
@ -523,8 +529,8 @@ public class QueryParser implements QueryParserConstants {
mpq.setSlop(phraseSlop); mpq.setSlop(phraseSlop);
List multiTerms = new ArrayList(); List multiTerms = new ArrayList();
int position = -1; int position = -1;
for (int i = 0; i < v.size(); i++) { for (int i = 0; i < list.size(); i++) {
nextToken = (org.apache.lucene.analysis.Token) v.elementAt(i); nextToken = (org.apache.lucene.analysis.Token) list.get(i);
if (nextToken.getPositionIncrement() > 0 && multiTerms.size() > 0) { if (nextToken.getPositionIncrement() > 0 && multiTerms.size() > 0) {
if (enablePositionIncrements) { if (enablePositionIncrements) {
mpq.add((Term[])multiTerms.toArray(new Term[0]),position); mpq.add((Term[])multiTerms.toArray(new Term[0]),position);
@ -548,8 +554,8 @@ public class QueryParser implements QueryParserConstants {
PhraseQuery pq = newPhraseQuery(); PhraseQuery pq = newPhraseQuery();
pq.setSlop(phraseSlop); pq.setSlop(phraseSlop);
int position = -1; int position = -1;
for (int i = 0; i < v.size(); i++) { for (int i = 0; i < list.size(); i++) {
nextToken = (org.apache.lucene.analysis.Token) v.elementAt(i); nextToken = (org.apache.lucene.analysis.Token) list.get(i);
if (enablePositionIncrements) { if (enablePositionIncrements) {
position += nextToken.getPositionIncrement(); position += nextToken.getPositionIncrement();
pq.add(new Term(field, nextToken.term()),position); pq.add(new Term(field, nextToken.term()),position);
@ -740,13 +746,31 @@ public class QueryParser implements QueryParserConstants {
* Can be overridden by extending classes, to modify query being * Can be overridden by extending classes, to modify query being
* returned. * returned.
* *
* @param clauses Vector that contains {@link BooleanClause} instances * @param clauses List that contains {@link BooleanClause} instances
* to join.
*
* @return Resulting {@link Query} object.
* @exception ParseException throw in overridden method to disallow
* @deprecated use {@link #getBooleanQuery(List)} instead
*/
protected Query getBooleanQuery(Vector clauses) throws ParseException {
return getBooleanQuery((List) clauses, false);
}
/**
* Factory method for generating query, given a set of clauses.
* By default creates a boolean query composed of clauses passed in.
*
* Can be overridden by extending classes, to modify query being
* returned.
*
* @param clauses List that contains {@link BooleanClause} instances
* to join. * to join.
* *
* @return Resulting {@link Query} object. * @return Resulting {@link Query} object.
* @exception ParseException throw in overridden method to disallow * @exception ParseException throw in overridden method to disallow
*/ */
protected Query getBooleanQuery(Vector clauses) throws ParseException { protected Query getBooleanQuery(List clauses) throws ParseException {
return getBooleanQuery(clauses, false); return getBooleanQuery(clauses, false);
} }
@ -757,14 +781,35 @@ public class QueryParser implements QueryParserConstants {
* Can be overridden by extending classes, to modify query being * Can be overridden by extending classes, to modify query being
* returned. * returned.
* *
* @param clauses Vector that contains {@link BooleanClause} instances * @param clauses List that contains {@link BooleanClause} instances
* to join.
* @param disableCoord true if coord scoring should be disabled.
*
* @return Resulting {@link Query} object.
* @exception ParseException throw in overridden method to disallow
* @deprecated use {@link #getBooleanQuery(List, boolean)} instead
*/
protected Query getBooleanQuery(Vector clauses, boolean disableCoord)
throws ParseException
{
return getBooleanQuery((List) clauses, disableCoord);
}
/**
* Factory method for generating query, given a set of clauses.
* By default creates a boolean query composed of clauses passed in.
*
* Can be overridden by extending classes, to modify query being
* returned.
*
* @param clauses List that contains {@link BooleanClause} instances
* to join. * to join.
* @param disableCoord true if coord scoring should be disabled. * @param disableCoord true if coord scoring should be disabled.
* *
* @return Resulting {@link Query} object. * @return Resulting {@link Query} object.
* @exception ParseException throw in overridden method to disallow * @exception ParseException throw in overridden method to disallow
*/ */
protected Query getBooleanQuery(Vector clauses, boolean disableCoord) protected Query getBooleanQuery(List clauses, boolean disableCoord)
throws ParseException throws ParseException
{ {
if (clauses.size()==0) { if (clauses.size()==0) {
@ -772,7 +817,7 @@ public class QueryParser implements QueryParserConstants {
} }
BooleanQuery query = newBooleanQuery(disableCoord); BooleanQuery query = newBooleanQuery(disableCoord);
for (int i = 0; i < clauses.size(); i++) { for (int i = 0; i < clauses.size(); i++) {
query.add((BooleanClause)clauses.elementAt(i)); query.add((BooleanClause)clauses.get(i));
} }
return query; return query;
} }
@ -846,7 +891,6 @@ public class QueryParser implements QueryParserConstants {
return newPrefixQuery(t); return newPrefixQuery(t);
} }
/** /**
* Factory method for generating a query (similar to * Factory method for generating a query (similar to
* {@link #getWildcardQuery}). Called when parser parses * {@link #getWildcardQuery}). Called when parser parses
@ -872,7 +916,7 @@ public class QueryParser implements QueryParserConstants {
* removed, or kept only once if there was a double escape. * removed, or kept only once if there was a double escape.
* *
* Supports escaped unicode characters, e. g. translates * Supports escaped unicode characters, e. g. translates
* <code>A</code> to <code>A</code>. * <code>\\u0041</code> to <code>A</code>.
* *
*/ */
private String discardEscapeChar(String input) throws ParseException { private String discardEscapeChar(String input) throws ParseException {
@ -1056,7 +1100,7 @@ public class QueryParser implements QueryParserConstants {
} }
final public Query Query(String field) throws ParseException { final public Query Query(String field) throws ParseException {
Vector clauses = new Vector(); List clauses = new ArrayList();
Query q, firstQuery=null; Query q, firstQuery=null;
int conj, mods; int conj, mods;
mods = Modifiers(); mods = Modifiers();

View File

@ -366,7 +366,6 @@ public class QueryParser {
return useOldRangeQuery; return useOldRangeQuery;
} }
/** /**
* Set locale used by date range parsing. * Set locale used by date range parsing.
*/ */
@ -436,13 +435,20 @@ public class QueryParser {
return resolution; return resolution;
} }
/**
* @deprecated use {@link #addClause(List, int, int, Query)} instead.
*/
protected void addClause(Vector clauses, int conj, int mods, Query q) { protected void addClause(Vector clauses, int conj, int mods, Query q) {
addClause((List) clauses, conj, mods, q);
}
protected void addClause(List clauses, int conj, int mods, Query q) {
boolean required, prohibited; boolean required, prohibited;
// If this term is introduced by AND, make the preceding term required, // If this term is introduced by AND, make the preceding term required,
// unless it's already prohibited // unless it's already prohibited
if (clauses.size() > 0 && conj == CONJ_AND) { if (clauses.size() > 0 && conj == CONJ_AND) {
BooleanClause c = (BooleanClause) clauses.elementAt(clauses.size()-1); BooleanClause c = (BooleanClause) clauses.get(clauses.size()-1);
if (!c.isProhibited()) if (!c.isProhibited())
c.setOccur(BooleanClause.Occur.MUST); c.setOccur(BooleanClause.Occur.MUST);
} }
@ -452,7 +458,7 @@ public class QueryParser {
// unless it's prohibited (that means we leave -a OR b but +a OR b-->a OR b) // unless it's prohibited (that means we leave -a OR b but +a OR b-->a OR b)
// notice if the input is a OR b, first term is parsed as required; without // notice if the input is a OR b, first term is parsed as required; without
// this modification a OR b would parsed as +a OR b // this modification a OR b would parsed as +a OR b
BooleanClause c = (BooleanClause) clauses.elementAt(clauses.size()-1); BooleanClause c = (BooleanClause) clauses.get(clauses.size()-1);
if (!c.isProhibited()) if (!c.isProhibited())
c.setOccur(BooleanClause.Occur.SHOULD); c.setOccur(BooleanClause.Occur.SHOULD);
} }
@ -477,11 +483,11 @@ public class QueryParser {
required = (!prohibited && conj != CONJ_OR); required = (!prohibited && conj != CONJ_OR);
} }
if (required && !prohibited) if (required && !prohibited)
clauses.addElement(newBooleanClause(q, BooleanClause.Occur.MUST)); clauses.add(newBooleanClause(q, BooleanClause.Occur.MUST));
else if (!required && !prohibited) else if (!required && !prohibited)
clauses.addElement(newBooleanClause(q, BooleanClause.Occur.SHOULD)); clauses.add(newBooleanClause(q, BooleanClause.Occur.SHOULD));
else if (!required && prohibited) else if (!required && prohibited)
clauses.addElement(newBooleanClause(q, BooleanClause.Occur.MUST_NOT)); clauses.add(newBooleanClause(q, BooleanClause.Occur.MUST_NOT));
else else
throw new RuntimeException("Clause cannot be both required and prohibited"); throw new RuntimeException("Clause cannot be both required and prohibited");
} }
@ -495,7 +501,7 @@ public class QueryParser {
// PhraseQuery, or nothing based on the term count // PhraseQuery, or nothing based on the term count
TokenStream source = analyzer.tokenStream(field, new StringReader(queryText)); TokenStream source = analyzer.tokenStream(field, new StringReader(queryText));
Vector v = new Vector(); List list = new ArrayList();
final org.apache.lucene.analysis.Token reusableToken = new org.apache.lucene.analysis.Token(); final org.apache.lucene.analysis.Token reusableToken = new org.apache.lucene.analysis.Token();
org.apache.lucene.analysis.Token nextToken; org.apache.lucene.analysis.Token nextToken;
int positionCount = 0; int positionCount = 0;
@ -510,7 +516,7 @@ public class QueryParser {
} }
if (nextToken == null) if (nextToken == null)
break; break;
v.addElement(nextToken.clone()); list.add(nextToken.clone());
if (nextToken.getPositionIncrement() != 0) if (nextToken.getPositionIncrement() != 0)
positionCount += nextToken.getPositionIncrement(); positionCount += nextToken.getPositionIncrement();
else else
@ -523,18 +529,18 @@ public class QueryParser {
// ignore // ignore
} }
if (v.size() == 0) if (list.size() == 0)
return null; return null;
else if (v.size() == 1) { else if (list.size() == 1) {
nextToken = (org.apache.lucene.analysis.Token) v.elementAt(0); nextToken = (org.apache.lucene.analysis.Token) list.get(0);
return newTermQuery(new Term(field, nextToken.term())); return newTermQuery(new Term(field, nextToken.term()));
} else { } else {
if (severalTokensAtSamePosition) { if (severalTokensAtSamePosition) {
if (positionCount == 1) { if (positionCount == 1) {
// no phrase query: // no phrase query:
BooleanQuery q = newBooleanQuery(true); BooleanQuery q = newBooleanQuery(true);
for (int i = 0; i < v.size(); i++) { for (int i = 0; i < list.size(); i++) {
nextToken = (org.apache.lucene.analysis.Token) v.elementAt(i); nextToken = (org.apache.lucene.analysis.Token) list.get(i);
Query currentQuery = newTermQuery( Query currentQuery = newTermQuery(
new Term(field, nextToken.term())); new Term(field, nextToken.term()));
q.add(currentQuery, BooleanClause.Occur.SHOULD); q.add(currentQuery, BooleanClause.Occur.SHOULD);
@ -547,8 +553,8 @@ public class QueryParser {
mpq.setSlop(phraseSlop); mpq.setSlop(phraseSlop);
List multiTerms = new ArrayList(); List multiTerms = new ArrayList();
int position = -1; int position = -1;
for (int i = 0; i < v.size(); i++) { for (int i = 0; i < list.size(); i++) {
nextToken = (org.apache.lucene.analysis.Token) v.elementAt(i); nextToken = (org.apache.lucene.analysis.Token) list.get(i);
if (nextToken.getPositionIncrement() > 0 && multiTerms.size() > 0) { if (nextToken.getPositionIncrement() > 0 && multiTerms.size() > 0) {
if (enablePositionIncrements) { if (enablePositionIncrements) {
mpq.add((Term[])multiTerms.toArray(new Term[0]),position); mpq.add((Term[])multiTerms.toArray(new Term[0]),position);
@ -572,8 +578,8 @@ public class QueryParser {
PhraseQuery pq = newPhraseQuery(); PhraseQuery pq = newPhraseQuery();
pq.setSlop(phraseSlop); pq.setSlop(phraseSlop);
int position = -1; int position = -1;
for (int i = 0; i < v.size(); i++) { for (int i = 0; i < list.size(); i++) {
nextToken = (org.apache.lucene.analysis.Token) v.elementAt(i); nextToken = (org.apache.lucene.analysis.Token) list.get(i);
if (enablePositionIncrements) { if (enablePositionIncrements) {
position += nextToken.getPositionIncrement(); position += nextToken.getPositionIncrement();
pq.add(new Term(field, nextToken.term()),position); pq.add(new Term(field, nextToken.term()),position);
@ -764,13 +770,31 @@ public class QueryParser {
* Can be overridden by extending classes, to modify query being * Can be overridden by extending classes, to modify query being
* returned. * returned.
* *
* @param clauses Vector that contains {@link BooleanClause} instances * @param clauses List that contains {@link BooleanClause} instances
* to join.
*
* @return Resulting {@link Query} object.
* @exception ParseException throw in overridden method to disallow
* @deprecated use {@link #getBooleanQuery(List)} instead
*/
protected Query getBooleanQuery(Vector clauses) throws ParseException {
return getBooleanQuery((List) clauses, false);
}
/**
* Factory method for generating query, given a set of clauses.
* By default creates a boolean query composed of clauses passed in.
*
* Can be overridden by extending classes, to modify query being
* returned.
*
* @param clauses List that contains {@link BooleanClause} instances
* to join. * to join.
* *
* @return Resulting {@link Query} object. * @return Resulting {@link Query} object.
* @exception ParseException throw in overridden method to disallow * @exception ParseException throw in overridden method to disallow
*/ */
protected Query getBooleanQuery(Vector clauses) throws ParseException { protected Query getBooleanQuery(List clauses) throws ParseException {
return getBooleanQuery(clauses, false); return getBooleanQuery(clauses, false);
} }
@ -781,14 +805,35 @@ public class QueryParser {
* Can be overridden by extending classes, to modify query being * Can be overridden by extending classes, to modify query being
* returned. * returned.
* *
* @param clauses Vector that contains {@link BooleanClause} instances * @param clauses List that contains {@link BooleanClause} instances
* to join.
* @param disableCoord true if coord scoring should be disabled.
*
* @return Resulting {@link Query} object.
* @exception ParseException throw in overridden method to disallow
* @deprecated use {@link #getBooleanQuery(List, boolean)} instead
*/
protected Query getBooleanQuery(Vector clauses, boolean disableCoord)
throws ParseException
{
return getBooleanQuery((List) clauses, disableCoord);
}
/**
* Factory method for generating query, given a set of clauses.
* By default creates a boolean query composed of clauses passed in.
*
* Can be overridden by extending classes, to modify query being
* returned.
*
* @param clauses List that contains {@link BooleanClause} instances
* to join. * to join.
* @param disableCoord true if coord scoring should be disabled. * @param disableCoord true if coord scoring should be disabled.
* *
* @return Resulting {@link Query} object. * @return Resulting {@link Query} object.
* @exception ParseException throw in overridden method to disallow * @exception ParseException throw in overridden method to disallow
*/ */
protected Query getBooleanQuery(Vector clauses, boolean disableCoord) protected Query getBooleanQuery(List clauses, boolean disableCoord)
throws ParseException throws ParseException
{ {
if (clauses.size()==0) { if (clauses.size()==0) {
@ -796,7 +841,7 @@ public class QueryParser {
} }
BooleanQuery query = newBooleanQuery(disableCoord); BooleanQuery query = newBooleanQuery(disableCoord);
for (int i = 0; i < clauses.size(); i++) { for (int i = 0; i < clauses.size(); i++) {
query.add((BooleanClause)clauses.elementAt(i)); query.add((BooleanClause)clauses.get(i));
} }
return query; return query;
} }
@ -870,7 +915,6 @@ public class QueryParser {
return newPrefixQuery(t); return newPrefixQuery(t);
} }
/** /**
* Factory method for generating a query (similar to * Factory method for generating a query (similar to
* {@link #getWildcardQuery}). Called when parser parses * {@link #getWildcardQuery}). Called when parser parses
@ -896,7 +940,7 @@ public class QueryParser {
* removed, or kept only once if there was a double escape. * removed, or kept only once if there was a double escape.
* *
* Supports escaped unicode characters, e. g. translates * Supports escaped unicode characters, e. g. translates
* <code>\u0041</code> to <code>A</code>. * <code>\\u0041</code> to <code>A</code>.
* *
*/ */
private String discardEscapeChar(String input) throws ParseException { private String discardEscapeChar(String input) throws ParseException {
@ -1108,7 +1152,7 @@ Query TopLevelQuery(String field) :
Query Query(String field) : Query Query(String field) :
{ {
Vector clauses = new Vector(); List clauses = new ArrayList();
Query q, firstQuery=null; Query q, firstQuery=null;
int conj, mods; int conj, mods;
} }

View File

@ -19,7 +19,6 @@ package org.apache.lucene.search;
import java.io.IOException; import java.io.IOException;
import java.util.Set; import java.util.Set;
import java.util.Vector;
import java.util.ArrayList; import java.util.ArrayList;
import org.apache.lucene.index.Term; import org.apache.lucene.index.Term;

View File

@ -24,7 +24,8 @@ import java.io.IOException;
import java.io.RandomAccessFile; import java.io.RandomAccessFile;
import java.security.MessageDigest; import java.security.MessageDigest;
import java.security.NoSuchAlgorithmException; import java.security.NoSuchAlgorithmException;
import java.util.Hashtable; import java.util.HashMap;
import java.util.Map;
import org.apache.lucene.index.IndexFileNameFilter; import org.apache.lucene.index.IndexFileNameFilter;
@ -58,7 +59,7 @@ public class FSDirectory extends Directory {
* instance from the cache. See LUCENE-776 * instance from the cache. See LUCENE-776
* for some relevant discussion. * for some relevant discussion.
*/ */
private static final Hashtable DIRECTORIES = new Hashtable(); private static final Map DIRECTORIES = new HashMap();
private static boolean disableLocks = false; private static boolean disableLocks = false;

View File

@ -186,7 +186,7 @@ public class TestDoc extends LuceneTestCase {
merger.closeReaders(); merger.closeReaders();
if (useCompoundFile) { if (useCompoundFile) {
Vector filesToDelete = merger.createCompoundFile(merged + ".cfs"); List filesToDelete = merger.createCompoundFile(merged + ".cfs");
for (Iterator iter = filesToDelete.iterator(); iter.hasNext();) for (Iterator iter = filesToDelete.iterator(); iter.hasNext();)
directory.deleteFile((String) iter.next()); directory.deleteFile((String) iter.next());
} }

View File

@ -19,8 +19,10 @@ package org.apache.lucene.store;
import java.io.File; import java.io.File;
import java.io.IOException; import java.io.IOException;
import java.util.Enumeration; import java.util.Collections;
import java.util.Hashtable; import java.util.HashMap;
import java.util.Iterator;
import java.util.Map;
import org.apache.lucene.analysis.WhitespaceAnalyzer; import org.apache.lucene.analysis.WhitespaceAnalyzer;
import org.apache.lucene.document.Document; import org.apache.lucene.document.Document;
@ -60,8 +62,8 @@ public class TestLockFactory extends LuceneTestCase {
assertTrue("# calls to makeLock is 0 (after instantiating IndexWriter)", assertTrue("# calls to makeLock is 0 (after instantiating IndexWriter)",
lf.makeLockCount >= 1); lf.makeLockCount >= 1);
for(Enumeration e = lf.locksCreated.keys(); e.hasMoreElements();) { for(Iterator e = lf.locksCreated.keySet().iterator(); e.hasNext();) {
String lockName = (String) e.nextElement(); String lockName = (String) e.next();
MockLockFactory.MockLock lock = (MockLockFactory.MockLock) lf.locksCreated.get(lockName); MockLockFactory.MockLock lock = (MockLockFactory.MockLock) lf.locksCreated.get(lockName);
assertTrue("# calls to Lock.obtain is 0 (after instantiating IndexWriter)", assertTrue("# calls to Lock.obtain is 0 (after instantiating IndexWriter)",
lock.lockAttempts > 0); lock.lockAttempts > 0);
@ -522,7 +524,7 @@ public class TestLockFactory extends LuceneTestCase {
public class MockLockFactory extends LockFactory { public class MockLockFactory extends LockFactory {
public boolean lockPrefixSet; public boolean lockPrefixSet;
public Hashtable locksCreated = new Hashtable(); public Map locksCreated = Collections.synchronizedMap(new HashMap());
public int makeLockCount = 0; public int makeLockCount = 0;
public void setLockPrefix(String lockPrefix) { public void setLockPrefix(String lockPrefix) {