mirror of https://github.com/apache/lucene.git
LUCENE-1257: Generics:
demo,contrib/swing,contrib/wikipedia,contrib/wordnet,contrib/xml-query-parser git-svn-id: https://svn.apache.org/repos/asf/lucene/java/trunk@834414 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
a7f15d1589
commit
b756112999
|
@ -51,7 +51,7 @@ public class ListSearcher extends AbstractListModel {
|
|||
* The reference links between the decorated ListModel
|
||||
* and this list model based on search criteria
|
||||
*/
|
||||
private ArrayList rowToModelIndex = new ArrayList();
|
||||
private ArrayList<Integer> rowToModelIndex = new ArrayList<Integer>();
|
||||
|
||||
/**
|
||||
* In memory lucene index
|
||||
|
@ -256,12 +256,12 @@ public class ListSearcher extends AbstractListModel {
|
|||
searchString = null;
|
||||
rowToModelIndex.clear();
|
||||
for (int t=0; t<listModel.getSize(); t++){
|
||||
rowToModelIndex.add(Integer.valueOf(t));
|
||||
rowToModelIndex.add(t);
|
||||
}
|
||||
}
|
||||
|
||||
private int getModelRow(int row){
|
||||
return ((Integer) rowToModelIndex.get(row)).intValue();
|
||||
return rowToModelIndex.get(row);
|
||||
}
|
||||
|
||||
public int getSize() {
|
||||
|
|
|
@ -82,7 +82,7 @@ public class TableSearcher extends AbstractTableModel {
|
|||
* these keeps reference to the decorated table model for data
|
||||
* only rows that match the search criteria are linked
|
||||
*/
|
||||
private ArrayList rowToModelIndex = new ArrayList();
|
||||
private ArrayList<Integer> rowToModelIndex = new ArrayList<Integer>();
|
||||
|
||||
|
||||
//Lucene stuff.
|
||||
|
@ -285,7 +285,7 @@ public class TableSearcher extends AbstractTableModel {
|
|||
}
|
||||
|
||||
private int getModelRow(int row){
|
||||
return ((Integer) rowToModelIndex.get(row)).intValue();
|
||||
return rowToModelIndex.get(row);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -297,7 +297,7 @@ public class TableSearcher extends AbstractTableModel {
|
|||
searchString = null;
|
||||
rowToModelIndex.clear();
|
||||
for (int t=0; t<tableModel.getRowCount(); t++){
|
||||
rowToModelIndex.add(Integer.valueOf(t));
|
||||
rowToModelIndex.add(t);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -316,7 +316,7 @@ public class TableSearcher extends AbstractTableModel {
|
|||
}
|
||||
|
||||
@Override
|
||||
public Class getColumnClass(int column) {
|
||||
public Class<?> getColumnClass(int column) {
|
||||
return tableModel.getColumnClass(column);
|
||||
}
|
||||
|
||||
|
|
|
@ -115,8 +115,8 @@ public final class WikipediaTokenizer extends Tokenizer {
|
|||
private final WikipediaTokenizerImpl scanner;
|
||||
|
||||
private int tokenOutput = TOKENS_ONLY;
|
||||
private Set untokenizedTypes = Collections.EMPTY_SET;
|
||||
private Iterator tokens = null;
|
||||
private Set<String> untokenizedTypes = Collections.emptySet();
|
||||
private Iterator<AttributeSource.State> tokens = null;
|
||||
|
||||
private OffsetAttribute offsetAtt;
|
||||
private TypeAttribute typeAtt;
|
||||
|
@ -131,7 +131,7 @@ public final class WikipediaTokenizer extends Tokenizer {
|
|||
* @param input The Input Reader
|
||||
*/
|
||||
public WikipediaTokenizer(Reader input) {
|
||||
this(input, TOKENS_ONLY, Collections.EMPTY_SET);
|
||||
this(input, TOKENS_ONLY, Collections.<String>emptySet());
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -142,7 +142,7 @@ public final class WikipediaTokenizer extends Tokenizer {
|
|||
* @param tokenOutput One of {@link #TOKENS_ONLY}, {@link #UNTOKENIZED_ONLY}, {@link #BOTH}
|
||||
* @param untokenizedTypes
|
||||
*/
|
||||
public WikipediaTokenizer(Reader input, int tokenOutput, Set untokenizedTypes) {
|
||||
public WikipediaTokenizer(Reader input, int tokenOutput, Set<String> untokenizedTypes) {
|
||||
super(input);
|
||||
this.scanner = new WikipediaTokenizerImpl(input);
|
||||
init(tokenOutput, untokenizedTypes);
|
||||
|
@ -156,7 +156,7 @@ public final class WikipediaTokenizer extends Tokenizer {
|
|||
* @param tokenOutput One of {@link #TOKENS_ONLY}, {@link #UNTOKENIZED_ONLY}, {@link #BOTH}
|
||||
* @param untokenizedTypes
|
||||
*/
|
||||
public WikipediaTokenizer(AttributeFactory factory, Reader input, int tokenOutput, Set untokenizedTypes) {
|
||||
public WikipediaTokenizer(AttributeFactory factory, Reader input, int tokenOutput, Set<String> untokenizedTypes) {
|
||||
super(factory, input);
|
||||
this.scanner = new WikipediaTokenizerImpl(input);
|
||||
init(tokenOutput, untokenizedTypes);
|
||||
|
@ -170,13 +170,13 @@ public final class WikipediaTokenizer extends Tokenizer {
|
|||
* @param tokenOutput One of {@link #TOKENS_ONLY}, {@link #UNTOKENIZED_ONLY}, {@link #BOTH}
|
||||
* @param untokenizedTypes
|
||||
*/
|
||||
public WikipediaTokenizer(AttributeSource source, Reader input, int tokenOutput, Set untokenizedTypes) {
|
||||
public WikipediaTokenizer(AttributeSource source, Reader input, int tokenOutput, Set<String> untokenizedTypes) {
|
||||
super(source, input);
|
||||
this.scanner = new WikipediaTokenizerImpl(input);
|
||||
init(tokenOutput, untokenizedTypes);
|
||||
}
|
||||
|
||||
private void init(int tokenOutput, Set untokenizedTypes) {
|
||||
private void init(int tokenOutput, Set<String> untokenizedTypes) {
|
||||
this.tokenOutput = tokenOutput;
|
||||
this.untokenizedTypes = untokenizedTypes;
|
||||
this.offsetAtt = addAttribute(OffsetAttribute.class);
|
||||
|
@ -194,7 +194,7 @@ public final class WikipediaTokenizer extends Tokenizer {
|
|||
@Override
|
||||
public final boolean incrementToken() throws IOException {
|
||||
if (tokens != null && tokens.hasNext()){
|
||||
AttributeSource.State state = (AttributeSource.State) tokens.next();
|
||||
AttributeSource.State state = tokens.next();
|
||||
restoreState(state);
|
||||
return true;
|
||||
}
|
||||
|
@ -230,7 +230,7 @@ public final class WikipediaTokenizer extends Tokenizer {
|
|||
int lastPos = theStart + numAdded;
|
||||
int tmpTokType;
|
||||
int numSeen = 0;
|
||||
List tmp = new ArrayList();
|
||||
List<AttributeSource.State> tmp = new ArrayList<AttributeSource.State>();
|
||||
setupSavedToken(0, type);
|
||||
tmp.add(captureState());
|
||||
//while we can get a token and that token is the same type and we have not transitioned to a new wiki-item of the same type
|
||||
|
|
|
@ -110,8 +110,8 @@ public final class SynExpand {
|
|||
final float boost)
|
||||
throws IOException
|
||||
{
|
||||
final Set already = new HashSet(); // avoid dups
|
||||
List top = new LinkedList(); // needs to be separately listed..
|
||||
final Set<String> already = new HashSet<String>(); // avoid dups
|
||||
List<String> top = new LinkedList<String>(); // needs to be separately listed..
|
||||
final String field = ( f == null) ? "contents" : f;
|
||||
if ( a == null) a = new StandardAnalyzer(Version.LUCENE_CURRENT);
|
||||
|
||||
|
@ -127,7 +127,7 @@ public final class SynExpand {
|
|||
final BooleanQuery tmp = new BooleanQuery();
|
||||
|
||||
// [2] form query
|
||||
Iterator it = top.iterator();
|
||||
Iterator<String> it = top.iterator();
|
||||
while ( it.hasNext())
|
||||
{
|
||||
// [2a] add to level words in
|
||||
|
|
|
@ -120,8 +120,8 @@ public class SynLookup {
|
|||
final float boost)
|
||||
throws IOException
|
||||
{
|
||||
final Set already = new HashSet(); // avoid dups
|
||||
List top = new LinkedList(); // needs to be separately listed..
|
||||
final Set<String> already = new HashSet<String>(); // avoid dups
|
||||
List<String> top = new LinkedList<String>(); // needs to be separately listed..
|
||||
|
||||
// [1] Parse query into separate words so that when we expand we can avoid dups
|
||||
TokenStream ts = a.tokenStream( field, new StringReader( query));
|
||||
|
@ -135,11 +135,11 @@ public class SynLookup {
|
|||
final BooleanQuery tmp = new BooleanQuery();
|
||||
|
||||
// [2] form query
|
||||
Iterator it = top.iterator();
|
||||
Iterator<String> it = top.iterator();
|
||||
while ( it.hasNext())
|
||||
{
|
||||
// [2a] add to level words in
|
||||
String word = (String) it.next();
|
||||
String word = it.next();
|
||||
TermQuery tq = new TermQuery( new Term( field, word));
|
||||
tmp.add( tq, BooleanClause.Occur.SHOULD);
|
||||
|
||||
|
|
|
@ -106,11 +106,8 @@ public class SynonymMap {
|
|||
* <code>Character.isLetter()</code>.
|
||||
*/
|
||||
public String[] getSynonyms(String word) {
|
||||
Object syns = table.get(word);
|
||||
if (syns == null) return EMPTY;
|
||||
if (syns instanceof String) return new String[] {(String) syns};
|
||||
|
||||
String[] synonyms = (String[]) syns;
|
||||
String[] synonyms = table.get(word);
|
||||
if (synonyms == null) return EMPTY;
|
||||
String[] copy = new String[synonyms.length]; // copy for guaranteed immutability
|
||||
System.arraycopy(synonyms, 0, copy, 0, synonyms.length);
|
||||
return copy;
|
||||
|
@ -261,7 +258,7 @@ public class SynonymMap {
|
|||
|
||||
|
||||
/* Part D: compute index data structure */
|
||||
HashMap word2Syns = createIndex(word2Groups, group2Words);
|
||||
HashMap<String,String[]> word2Syns = createIndex(word2Groups, group2Words);
|
||||
|
||||
/* Part E: minimize memory consumption by a factor 3 (or so) */
|
||||
// if (true) return word2Syns;
|
||||
|
@ -308,7 +305,7 @@ public class SynonymMap {
|
|||
return word2Syns;
|
||||
}
|
||||
|
||||
private HashMap<String,String[]> optimize(HashMap word2Syns, HashMap<String,String> internedWords) {
|
||||
private HashMap<String,String[]> optimize(HashMap<String,String[]> word2Syns, HashMap<String,String> internedWords) {
|
||||
if (DEBUG) {
|
||||
System.err.println("before gc");
|
||||
for (int i=0; i < 10; i++) System.gc();
|
||||
|
@ -347,10 +344,8 @@ public class SynonymMap {
|
|||
for (int k=syns.length; --k >= 0; ) {
|
||||
syns[k] = internedWords.get(syns[k]);
|
||||
}
|
||||
Object replacement = syns;
|
||||
if (syns.length == 1) replacement = syns[0]; // minimize memory consumption some more
|
||||
word2Syns.remove(words[j]);
|
||||
word2Syns.put(internedWords.get(words[j]), replacement);
|
||||
word2Syns.put(internedWords.get(words[j]), syns);
|
||||
}
|
||||
|
||||
if (DEBUG) {
|
||||
|
|
|
@ -131,9 +131,9 @@ public class Syns2Index
|
|||
String line;
|
||||
|
||||
// maps a word to all the "groups" it's in
|
||||
final Map word2Nums = new TreeMap();
|
||||
final Map<String,List<String>> word2Nums = new TreeMap<String,List<String>>();
|
||||
// maps a group to all the words in it
|
||||
final Map num2Words = new TreeMap();
|
||||
final Map<String,List<String>> num2Words = new TreeMap<String,List<String>>();
|
||||
// number of rejected words
|
||||
int ndecent = 0;
|
||||
|
||||
|
@ -177,10 +177,10 @@ public class Syns2Index
|
|||
|
||||
// 1/2: word2Nums map
|
||||
// append to entry or add new one
|
||||
List lis =(List) word2Nums.get(word);
|
||||
List<String> lis = word2Nums.get(word);
|
||||
if (lis == null)
|
||||
{
|
||||
lis = new LinkedList();
|
||||
lis = new LinkedList<String>();
|
||||
lis.add(num);
|
||||
word2Nums.put(word, lis);
|
||||
}
|
||||
|
@ -188,10 +188,10 @@ public class Syns2Index
|
|||
lis.add(num);
|
||||
|
||||
// 2/2: num2Words map
|
||||
lis = (List) num2Words.get(num);
|
||||
lis = num2Words.get(num);
|
||||
if (lis == null)
|
||||
{
|
||||
lis = new LinkedList();
|
||||
lis = new LinkedList<String>();
|
||||
lis.add(word);
|
||||
num2Words.put(num, lis);
|
||||
}
|
||||
|
@ -236,7 +236,7 @@ public class Syns2Index
|
|||
* @param word2Nums
|
||||
* @param num2Words
|
||||
*/
|
||||
private static void index(String indexDir, Map word2Nums, Map num2Words)
|
||||
private static void index(String indexDir, Map<String,List<String>> word2Nums, Map<String,List<String>> num2Words)
|
||||
throws Throwable
|
||||
{
|
||||
int row = 0;
|
||||
|
@ -247,10 +247,10 @@ public class Syns2Index
|
|||
// override the specific index if it already exists
|
||||
IndexWriter writer = new IndexWriter(dir, ana, true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
writer.setUseCompoundFile(true); // why?
|
||||
Iterator i1 = word2Nums.keySet().iterator();
|
||||
Iterator<String> i1 = word2Nums.keySet().iterator();
|
||||
while (i1.hasNext()) // for each word
|
||||
{
|
||||
String g = (String) i1.next();
|
||||
String g = i1.next();
|
||||
Document doc = new Document();
|
||||
|
||||
int n = index(word2Nums, num2Words, g, doc);
|
||||
|
@ -276,25 +276,25 @@ public class Syns2Index
|
|||
/**
|
||||
* Given the 2 maps fills a document for 1 word.
|
||||
*/
|
||||
private static int index(Map word2Nums, Map num2Words, String g, Document doc)
|
||||
private static int index(Map<String,List<String>> word2Nums, Map<String,List<String>> num2Words, String g, Document doc)
|
||||
throws Throwable
|
||||
{
|
||||
List keys = (List) word2Nums.get(g); // get list of key#'s
|
||||
Iterator i2 = keys.iterator();
|
||||
List<String> keys = word2Nums.get(g); // get list of key#'s
|
||||
Iterator<String> i2 = keys.iterator();
|
||||
|
||||
Set already = new TreeSet(); // keep them sorted
|
||||
Set<String> already = new TreeSet<String>(); // keep them sorted
|
||||
|
||||
// pass 1: fill up 'already' with all words
|
||||
while (i2.hasNext()) // for each key#
|
||||
{
|
||||
already.addAll((List) num2Words.get(i2.next())); // get list of words
|
||||
already.addAll(num2Words.get(i2.next())); // get list of words
|
||||
}
|
||||
int num = 0;
|
||||
already.remove(g); // of course a word is it's own syn
|
||||
Iterator it = already.iterator();
|
||||
Iterator<String> it = already.iterator();
|
||||
while (it.hasNext())
|
||||
{
|
||||
String cur = (String) it.next();
|
||||
String cur = it.next();
|
||||
// don't store things like 'pit bull' -> 'american pit bull'
|
||||
if (!isDecent(cur))
|
||||
{
|
||||
|
|
|
@ -29,10 +29,10 @@ import org.w3c.dom.Element;
|
|||
*/
|
||||
public class FilterBuilderFactory implements FilterBuilder {
|
||||
|
||||
HashMap builders=new HashMap();
|
||||
HashMap<String,FilterBuilder> builders=new HashMap<String,FilterBuilder>();
|
||||
|
||||
public Filter getFilter(Element n) throws ParserException {
|
||||
FilterBuilder builder=(FilterBuilder) builders.get(n.getNodeName());
|
||||
FilterBuilder builder= builders.get(n.getNodeName());
|
||||
if(builder==null)
|
||||
{
|
||||
throw new ParserException("No FilterBuilder defined for node "+n.getNodeName());
|
||||
|
@ -45,6 +45,6 @@ public class FilterBuilderFactory implements FilterBuilder {
|
|||
}
|
||||
public FilterBuilder getFilterBuilder(String nodeName)
|
||||
{
|
||||
return (FilterBuilder) builders.get(nodeName);
|
||||
return builders.get(nodeName);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -29,10 +29,10 @@ import org.w3c.dom.Element;
|
|||
*/
|
||||
public class QueryBuilderFactory implements QueryBuilder {
|
||||
|
||||
HashMap builders=new HashMap();
|
||||
HashMap<String,QueryBuilder> builders=new HashMap<String,QueryBuilder>();
|
||||
|
||||
public Query getQuery(Element n) throws ParserException {
|
||||
QueryBuilder builder=(QueryBuilder) builders.get(n.getNodeName());
|
||||
QueryBuilder builder= builders.get(n.getNodeName());
|
||||
if(builder==null)
|
||||
{
|
||||
throw new ParserException("No QueryObjectBuilder defined for node "+n.getNodeName());
|
||||
|
@ -45,7 +45,7 @@ public class QueryBuilderFactory implements QueryBuilder {
|
|||
}
|
||||
public QueryBuilder getQueryBuilder(String nodeName)
|
||||
{
|
||||
return (QueryBuilder) builders.get(nodeName);
|
||||
return builders.get(nodeName);
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -55,7 +55,7 @@ public class QueryTemplateManager
|
|||
static DocumentBuilderFactory dbf = DocumentBuilderFactory.newInstance ();
|
||||
static TransformerFactory tFactory = TransformerFactory.newInstance();
|
||||
|
||||
HashMap compiledTemplatesCache=new HashMap();
|
||||
HashMap<String,Templates> compiledTemplatesCache=new HashMap<String,Templates>();
|
||||
Templates defaultCompiledTemplates=null;
|
||||
|
||||
|
||||
|
@ -77,13 +77,13 @@ public class QueryTemplateManager
|
|||
}
|
||||
public String getQueryAsXmlString(Properties formProperties,String queryTemplateName) throws SAXException, IOException, ParserConfigurationException, TransformerException
|
||||
{
|
||||
Templates ts=(Templates) compiledTemplatesCache.get(queryTemplateName);
|
||||
Templates ts= compiledTemplatesCache.get(queryTemplateName);
|
||||
return getQueryAsXmlString(formProperties, ts);
|
||||
}
|
||||
|
||||
public Document getQueryAsDOM(Properties formProperties,String queryTemplateName) throws SAXException, IOException, ParserConfigurationException, TransformerException
|
||||
{
|
||||
Templates ts=(Templates) compiledTemplatesCache.get(queryTemplateName);
|
||||
Templates ts= compiledTemplatesCache.get(queryTemplateName);
|
||||
return getQueryAsDOM(formProperties, ts);
|
||||
}
|
||||
public String getQueryAsXmlString(Properties formProperties) throws SAXException, IOException, ParserConfigurationException, TransformerException
|
||||
|
|
|
@ -53,7 +53,7 @@ public class CachedFilterBuilder implements FilterBuilder {
|
|||
private QueryBuilderFactory queryFactory;
|
||||
private FilterBuilderFactory filterFactory;
|
||||
|
||||
private LRUCache filterCache = null;
|
||||
private LRUCache<Object,Filter> filterCache = null;
|
||||
|
||||
private int cacheSize;
|
||||
|
||||
|
@ -72,7 +72,7 @@ public class CachedFilterBuilder implements FilterBuilder {
|
|||
|
||||
if (filterCache == null)
|
||||
{
|
||||
filterCache = new LRUCache(cacheSize);
|
||||
filterCache = new LRUCache<Object,Filter>(cacheSize);
|
||||
}
|
||||
|
||||
// Test to see if child Element is a query or filter that needs to be
|
||||
|
@ -90,7 +90,7 @@ public class CachedFilterBuilder implements FilterBuilder {
|
|||
f = filterFactory.getFilter(childElement);
|
||||
cacheKey = f;
|
||||
}
|
||||
Filter cachedFilter = (Filter) filterCache.get(cacheKey);
|
||||
Filter cachedFilter = filterCache.get(cacheKey);
|
||||
if (cachedFilter != null)
|
||||
{
|
||||
return cachedFilter; // cache hit
|
||||
|
@ -109,7 +109,7 @@ public class CachedFilterBuilder implements FilterBuilder {
|
|||
return cachedFilter;
|
||||
}
|
||||
|
||||
static class LRUCache extends java.util.LinkedHashMap
|
||||
static class LRUCache<K,V> extends java.util.LinkedHashMap<K,V>
|
||||
{
|
||||
public LRUCache(int maxsize)
|
||||
{
|
||||
|
@ -120,7 +120,7 @@ public class CachedFilterBuilder implements FilterBuilder {
|
|||
protected int maxsize;
|
||||
|
||||
@Override
|
||||
protected boolean removeEldestEntry(Entry eldest)
|
||||
protected boolean removeEldestEntry(Entry<K,V> eldest)
|
||||
{
|
||||
return size() > maxsize;
|
||||
}
|
||||
|
|
|
@ -70,10 +70,10 @@ public class LikeThisQueryBuilder implements QueryBuilder {
|
|||
//TODO MoreLikeThis needs to ideally have per-field stopWords lists - until then
|
||||
//I use all analyzers/fields to generate multi-field compatible stop list
|
||||
String stopWords=e.getAttribute("stopWords");
|
||||
Set stopWordsSet=null;
|
||||
Set<String> stopWordsSet=null;
|
||||
if((stopWords!=null)&&(fields!=null))
|
||||
{
|
||||
stopWordsSet=new HashSet();
|
||||
stopWordsSet=new HashSet<String>();
|
||||
for (int i = 0; i < fields.length; i++)
|
||||
{
|
||||
TokenStream ts = analyzer.tokenStream(fields[i],new StringReader(stopWords));
|
||||
|
|
|
@ -41,7 +41,7 @@ public class SpanNearBuilder extends SpanBuilderBase
|
|||
String slopString=DOMUtils.getAttributeOrFail(e,"slop");
|
||||
int slop=Integer.parseInt(slopString);
|
||||
boolean inOrder=DOMUtils.getAttribute(e,"inOrder",false);
|
||||
ArrayList spans=new ArrayList();
|
||||
ArrayList<SpanQuery> spans=new ArrayList<SpanQuery>();
|
||||
for (Node kid = e.getFirstChild(); kid != null; kid = kid.getNextSibling())
|
||||
{
|
||||
if (kid.getNodeType() == Node.ELEMENT_NODE)
|
||||
|
@ -49,7 +49,7 @@ public class SpanNearBuilder extends SpanBuilderBase
|
|||
spans.add(factory.getSpanQuery((Element) kid));
|
||||
}
|
||||
}
|
||||
SpanQuery[] spanQueries=(SpanQuery[]) spans.toArray(new SpanQuery[spans.size()]);
|
||||
SpanQuery[] spanQueries= spans.toArray(new SpanQuery[spans.size()]);
|
||||
SpanNearQuery snq=new SpanNearQuery(spanQueries,slop,inOrder);
|
||||
return snq;
|
||||
}
|
||||
|
|
|
@ -41,7 +41,7 @@ public class SpanOrBuilder extends SpanBuilderBase
|
|||
|
||||
public SpanQuery getSpanQuery(Element e) throws ParserException
|
||||
{
|
||||
ArrayList clausesList=new ArrayList();
|
||||
ArrayList<SpanQuery> clausesList=new ArrayList<SpanQuery>();
|
||||
for (Node kid = e.getFirstChild(); kid != null; kid = kid.getNextSibling())
|
||||
{
|
||||
if (kid.getNodeType() == Node.ELEMENT_NODE)
|
||||
|
@ -50,7 +50,7 @@ public class SpanOrBuilder extends SpanBuilderBase
|
|||
clausesList.add(clause);
|
||||
}
|
||||
}
|
||||
SpanQuery[] clauses=(SpanQuery[]) clausesList.toArray(new SpanQuery[clausesList.size()]);
|
||||
SpanQuery[] clauses= clausesList.toArray(new SpanQuery[clausesList.size()]);
|
||||
SpanOrQuery soq = new SpanOrQuery(clauses);
|
||||
soq.setBoost(DOMUtils.getAttribute(e,"boost",1.0f));
|
||||
return soq;
|
||||
|
|
|
@ -54,7 +54,7 @@ public class SpanOrTermsBuilder extends SpanBuilderBase
|
|||
|
||||
try
|
||||
{
|
||||
ArrayList clausesList=new ArrayList();
|
||||
ArrayList<SpanQuery> clausesList=new ArrayList<SpanQuery>();
|
||||
TokenStream ts=analyzer.tokenStream(fieldName,new StringReader(value));
|
||||
TermAttribute termAtt = ts.addAttribute(TermAttribute.class);
|
||||
|
||||
|
@ -62,7 +62,7 @@ public class SpanOrTermsBuilder extends SpanBuilderBase
|
|||
SpanTermQuery stq=new SpanTermQuery(new Term(fieldName, termAtt.term()));
|
||||
clausesList.add(stq);
|
||||
}
|
||||
SpanOrQuery soq=new SpanOrQuery((SpanQuery[]) clausesList.toArray(new SpanQuery[clausesList.size()]));
|
||||
SpanOrQuery soq=new SpanOrQuery(clausesList.toArray(new SpanQuery[clausesList.size()]));
|
||||
soq.setBoost(DOMUtils.getAttribute(e,"boost",1.0f));
|
||||
return soq;
|
||||
}
|
||||
|
|
|
@ -28,7 +28,7 @@ import org.w3c.dom.Element;
|
|||
*/
|
||||
public class SpanQueryBuilderFactory implements SpanQueryBuilder {
|
||||
|
||||
HashMap builders=new HashMap();
|
||||
HashMap<String,SpanQueryBuilder> builders=new HashMap<String,SpanQueryBuilder>();
|
||||
|
||||
public Query getQuery(Element e) throws ParserException {
|
||||
return getSpanQuery(e);
|
||||
|
@ -39,7 +39,7 @@ public class SpanQueryBuilderFactory implements SpanQueryBuilder {
|
|||
}
|
||||
public SpanQuery getSpanQuery(Element e) throws ParserException
|
||||
{
|
||||
SpanQueryBuilder builder=(SpanQueryBuilder) builders.get(e.getNodeName());
|
||||
SpanQueryBuilder builder= builders.get(e.getNodeName());
|
||||
if(builder==null)
|
||||
{
|
||||
throw new ParserException("No SpanQueryObjectBuilder defined for node "+e.getNodeName());
|
||||
|
|
|
@ -21,7 +21,7 @@ import java.util.HashMap;
|
|||
import java.util.Map;
|
||||
|
||||
public class Entities {
|
||||
static final Map decoder = new HashMap(300);
|
||||
static final Map<String,String> decoder = new HashMap<String,String>(300);
|
||||
static final String[] encoder = new String[0x100];
|
||||
|
||||
static final String decode(String entity) {
|
||||
|
|
|
@ -27,7 +27,7 @@ public final class Tags {
|
|||
/**
|
||||
* contains all tags for which whitespaces have to be inserted for proper tokenization
|
||||
*/
|
||||
public static final Set WS_ELEMS = Collections.synchronizedSet(new HashSet());
|
||||
public static final Set<String> WS_ELEMS = Collections.synchronizedSet(new HashSet<String>());
|
||||
|
||||
static{
|
||||
WS_ELEMS.add("<hr");
|
||||
|
|
Loading…
Reference in New Issue