LUCENE-2065: use generics throughout unit tests

git-svn-id: https://svn.apache.org/repos/asf/lucene/java/trunk@887181 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Michael McCandless 2009-12-04 13:07:47 +00:00
parent a0bf23d762
commit d1d9781458
92 changed files with 433 additions and 553 deletions

View File

@ -66,6 +66,9 @@ Test Cases
* LUCENE-1844: Speed up the unit tests (Mark Miller, Erick Erickson,
Mike McCandless)
* LUCENE-2065: Use Java 5 generics throughout our unit tests. (Kay
Kay via Mike McCandless)
======================= Release 3.0.0 2009-11-25 =======================
Changes in backwards compatibility policy

View File

@ -110,7 +110,7 @@ public class PositionBasedTermVectorMapper extends TermVectorMapper{
*
* @return A map between field names and a Map. The sub-Map key is the position as the integer, the value is {@link org.apache.lucene.index.PositionBasedTermVectorMapper.TVPositionInfo}.
*/
public Map<String, Map<Integer, TVPositionInfo>> getFieldToTerms() {
public Map<String,Map<Integer,TVPositionInfo>> getFieldToTerms() {
return fieldToTerms;
}

View File

@ -35,7 +35,7 @@ public class CachingWrapperFilter extends Filter {
/**
* A transient Filter cache (package private because of test)
*/
transient Map<IndexReader, DocIdSet> cache;
transient Map<IndexReader,DocIdSet> cache;
private final ReentrantLock lock = new ReentrantLock();

View File

@ -51,7 +51,7 @@ public abstract class AttributeImpl implements Cloneable, Serializable, Attribut
@Override
public String toString() {
StringBuilder buffer = new StringBuilder();
Class clazz = this.getClass();
Class<?> clazz = this.getClass();
Field[] fields = clazz.getDeclaredFields();
try {
for (int i = 0; i < fields.length; i++) {

View File

@ -212,7 +212,7 @@ public final class FieldCacheSanityChecker {
if (seen.contains(rf)) continue;
List kids = getAllDecendentReaderKeys(rf.readerKey);
List<Object> kids = getAllDecendentReaderKeys(rf.readerKey);
for (Object kidKey : kids) {
ReaderField kid = new ReaderField(kidKey, rf.fieldName);
@ -270,7 +270,7 @@ public final class FieldCacheSanityChecker {
* the hierarchy of subReaders building up a list of the objects
* returned by obj.getFieldCacheKey()
*/
private List getAllDecendentReaderKeys(Object seed) {
private List<Object> getAllDecendentReaderKeys(Object seed) {
List<Object> all = new ArrayList<Object>(17); // will grow as we iter
all.add(seed);
for (int i = 0; i < all.size(); i++) {

View File

@ -33,7 +33,6 @@ import org.apache.lucene.store.Directory;
import org.apache.lucene.store.RAMDirectory;
import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.util.Version;
import org.apache.lucene.util._TestUtil;
/**
* A very simple demo used in the API documentation (src/java/overview.html).

View File

@ -30,7 +30,6 @@ import org.apache.lucene.queryParser.*;
import org.apache.lucene.util.Version;
import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.util.Version;
import junit.framework.TestSuite;
import junit.textui.TestRunner;

View File

@ -19,7 +19,6 @@ package org.apache.lucene;
* limitations under the License.
*/
import java.util.Iterator;
import java.util.Collection;
import java.io.File;
import java.io.IOException;
@ -79,7 +78,7 @@ public class TestSnapshotDeletionPolicy extends LuceneTestCase
writer.commit();
}
}
IndexCommit cp = (IndexCommit) dp.snapshot();
IndexCommit cp = dp.snapshot();
copyFiles(dir, cp);
writer.close();
copyFiles(dir, cp);
@ -181,7 +180,7 @@ public class TestSnapshotDeletionPolicy extends LuceneTestCase
public void backupIndex(Directory dir, SnapshotDeletionPolicy dp) throws Exception {
// To backup an index we first take a snapshot:
try {
copyFiles(dir, (IndexCommit) dp.snapshot());
copyFiles(dir, dp.snapshot());
} finally {
// Make sure to release the snapshot, otherwise these
// files will never be deleted during this IndexWriter
@ -195,10 +194,8 @@ public class TestSnapshotDeletionPolicy extends LuceneTestCase
// While we hold the snapshot, and nomatter how long
// we take to do the backup, the IndexWriter will
// never delete the files in the snapshot:
Collection files = cp.getFileNames();
Iterator it = files.iterator();
while(it.hasNext()) {
final String fileName = (String) it.next();
Collection<String> files = cp.getFileNames();
for (final String fileName : files) {
// NOTE: in a real backup you would not use
// readFile; you would need to use something else
// that copies the file to a backup location. This

View File

@ -17,7 +17,6 @@ package org.apache.lucene.analysis;
* limitations under the License.
*/
import java.util.Set;
import java.io.StringReader;
import java.io.IOException;

View File

@ -1871,7 +1871,7 @@ public class TestASCIIFoldingFilter extends BaseTokenStreamTestCase {
};
// Construct input text and expected output tokens
List expectedOutputTokens = new ArrayList();
List<String> expectedOutputTokens = new ArrayList<String>();
StringBuilder inputText = new StringBuilder();
for (int n = 0 ; n < foldings.length ; n += 2) {
if (n > 0) {
@ -1892,9 +1892,9 @@ public class TestASCIIFoldingFilter extends BaseTokenStreamTestCase {
TokenStream stream = new WhitespaceTokenizer(new StringReader(inputText.toString()));
ASCIIFoldingFilter filter = new ASCIIFoldingFilter(stream);
TermAttribute termAtt = filter.getAttribute(TermAttribute.class);
Iterator expectedIter = expectedOutputTokens.iterator();
Iterator<String> expectedIter = expectedOutputTokens.iterator();
while (expectedIter.hasNext()) {;
assertTermEquals((String)expectedIter.next(), filter, termAtt);
assertTermEquals(expectedIter.next(), filter, termAtt);
}
assertFalse(filter.incrementToken());
}

View File

@ -18,7 +18,6 @@
package org.apache.lucene.analysis;
import java.io.StringReader;
import java.util.List;
public class TestMappingCharFilter extends BaseTokenStreamTestCase {

View File

@ -1,13 +1,9 @@
package org.apache.lucene.analysis;
import org.apache.lucene.analysis.standard.StandardAnalyzer;
import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
import org.apache.lucene.analysis.tokenattributes.TermAttribute;
import org.apache.lucene.analysis.tokenattributes.TypeAttribute;
import org.apache.lucene.util.Version;
import java.io.StringReader;
/**
* Copyright 2004 The Apache Software Foundation

View File

@ -30,7 +30,7 @@ import java.util.HashSet;
public class TestStopAnalyzer extends BaseTokenStreamTestCase {
private StopAnalyzer stop = new StopAnalyzer(Version.LUCENE_CURRENT);
private Set inValidTokens = new HashSet();
private Set<Object> inValidTokens = new HashSet<Object>();
public TestStopAnalyzer(String s) {
super(s);
@ -40,7 +40,7 @@ public class TestStopAnalyzer extends BaseTokenStreamTestCase {
protected void setUp() throws Exception {
super.setUp();
Iterator it = StopAnalyzer.ENGLISH_STOP_WORDS_SET.iterator();
Iterator<?> it = StopAnalyzer.ENGLISH_STOP_WORDS_SET.iterator();
while(it.hasNext()) {
inValidTokens.add(it.next());
}
@ -59,7 +59,7 @@ public class TestStopAnalyzer extends BaseTokenStreamTestCase {
}
public void testStopList() throws IOException {
Set stopWordsSet = new HashSet();
Set<Object> stopWordsSet = new HashSet<Object>();
stopWordsSet.add("good");
stopWordsSet.add("test");
stopWordsSet.add("analyzer");
@ -78,7 +78,7 @@ public class TestStopAnalyzer extends BaseTokenStreamTestCase {
}
public void testStopListPositions() throws IOException {
Set stopWordsSet = new HashSet();
Set<Object> stopWordsSet = new HashSet<Object>();
stopWordsSet.add("good");
stopWordsSet.add("test");
stopWordsSet.add("analyzer");

View File

@ -37,7 +37,7 @@ public class TestStopFilter extends BaseTokenStreamTestCase {
public void testExactCase() throws IOException {
StringReader reader = new StringReader("Now is The Time");
Set<String> stopWords = new HashSet(Arrays.asList("is", "the", "Time"));
Set<String> stopWords = new HashSet<String>(Arrays.asList("is", "the", "Time"));
TokenStream stream = new StopFilter(Version.LUCENE_CURRENT, new WhitespaceTokenizer(reader), stopWords, false);
final TermAttribute termAtt = stream.getAttribute(TermAttribute.class);
assertTrue(stream.incrementToken());
@ -49,7 +49,7 @@ public class TestStopFilter extends BaseTokenStreamTestCase {
public void testIgnoreCase() throws IOException {
StringReader reader = new StringReader("Now is The Time");
Set<String> stopWords = new HashSet(Arrays.asList( "is", "the", "Time" ));
Set<Object> stopWords = new HashSet<Object>(Arrays.asList( "is", "the", "Time" ));
TokenStream stream = new StopFilter(Version.LUCENE_CURRENT, new WhitespaceTokenizer(reader), stopWords, true);
final TermAttribute termAtt = stream.getAttribute(TermAttribute.class);
assertTrue(stream.incrementToken());
@ -60,7 +60,7 @@ public class TestStopFilter extends BaseTokenStreamTestCase {
public void testStopFilt() throws IOException {
StringReader reader = new StringReader("Now is The Time");
String[] stopWords = new String[] { "is", "the", "Time" };
Set stopSet = StopFilter.makeStopSet(Version.LUCENE_CURRENT, stopWords);
Set<Object> stopSet = StopFilter.makeStopSet(Version.LUCENE_CURRENT, stopWords);
TokenStream stream = new StopFilter(Version.LUCENE_CURRENT, new WhitespaceTokenizer(reader), stopSet);
final TermAttribute termAtt = stream.getAttribute(TermAttribute.class);
assertTrue(stream.incrementToken());
@ -75,16 +75,16 @@ public class TestStopFilter extends BaseTokenStreamTestCase {
*/
public void testStopPositons() throws IOException {
StringBuilder sb = new StringBuilder();
ArrayList a = new ArrayList();
ArrayList<String> a = new ArrayList<String>();
for (int i=0; i<20; i++) {
String w = English.intToEnglish(i).trim();
sb.append(w).append(" ");
if (i%3 != 0) a.add(w);
}
log(sb.toString());
String stopWords[] = (String[]) a.toArray(new String[0]);
String stopWords[] = a.toArray(new String[0]);
for (int i=0; i<a.size(); i++) log("Stop: "+stopWords[i]);
Set stopSet = StopFilter.makeStopSet(Version.LUCENE_CURRENT, stopWords);
Set<Object> stopSet = StopFilter.makeStopSet(Version.LUCENE_CURRENT, stopWords);
// with increments
StringReader reader = new StringReader(sb.toString());
StopFilter stpf = new StopFilter(Version.LUCENE_24, new WhitespaceTokenizer(reader), stopSet);
@ -94,8 +94,8 @@ public class TestStopFilter extends BaseTokenStreamTestCase {
stpf = new StopFilter(Version.LUCENE_CURRENT, new WhitespaceTokenizer(reader), stopSet);
doTestStopPositons(stpf,false);
// with increments, concatenating two stop filters
ArrayList a0 = new ArrayList();
ArrayList a1 = new ArrayList();
ArrayList<String> a0 = new ArrayList<String>();
ArrayList<String> a1 = new ArrayList<String>();
for (int i=0; i<a.size(); i++) {
if (i%2==0) {
a0.add(a.get(i));
@ -103,12 +103,12 @@ public class TestStopFilter extends BaseTokenStreamTestCase {
a1.add(a.get(i));
}
}
String stopWords0[] = (String[]) a0.toArray(new String[0]);
String stopWords0[] = a0.toArray(new String[0]);
for (int i=0; i<a0.size(); i++) log("Stop0: "+stopWords0[i]);
String stopWords1[] = (String[]) a1.toArray(new String[0]);
String stopWords1[] = a1.toArray(new String[0]);
for (int i=0; i<a1.size(); i++) log("Stop1: "+stopWords1[i]);
Set stopSet0 = StopFilter.makeStopSet(Version.LUCENE_CURRENT, stopWords0);
Set stopSet1 = StopFilter.makeStopSet(Version.LUCENE_CURRENT, stopWords1);
Set<Object> stopSet0 = StopFilter.makeStopSet(Version.LUCENE_CURRENT, stopWords0);
Set<Object> stopSet1 = StopFilter.makeStopSet(Version.LUCENE_CURRENT, stopWords1);
reader = new StringReader(sb.toString());
StopFilter stpf0 = new StopFilter(Version.LUCENE_CURRENT, new WhitespaceTokenizer(reader), stopSet0); // first part of the set
stpf0.setEnablePositionIncrements(true);

View File

@ -26,8 +26,7 @@ import org.apache.lucene.util.Version;
import java.io.IOException;
import java.io.StringReader;
import java.util.ArrayList;
import java.util.List;
/**
* tests for the TestTeeSinkTokenFilter

View File

@ -126,7 +126,7 @@ public class TestSimpleAttributeImpls extends LuceneTestCase {
}
public static final AttributeImpl assertCopyIsEqual(AttributeImpl att) throws Exception {
AttributeImpl copy = (AttributeImpl) att.getClass().newInstance();
AttributeImpl copy = att.getClass().newInstance();
att.copyTo(copy);
assertEquals("Copied instance must be equal", att, copy);
assertEquals("Copied instance's hashcode must be equal", att.hashCode(), copy.hashCode());

View File

@ -9,7 +9,6 @@ import java.util.TimeZone;
import java.util.Locale;
import org.apache.lucene.util.LocalizedTestCase;
import org.apache.lucene.util.LuceneTestCase;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more

View File

@ -29,7 +29,7 @@ public class TestNumberTools extends LuceneTestCase {
}
public void testMax() {
// make sure the constants convert to their equivelents
// make sure the constants convert to their equivalents
assertEquals(Long.MAX_VALUE, NumberTools
.stringToLong(NumberTools.MAX_STRING_VALUE));
assertEquals(NumberTools.MAX_STRING_VALUE, NumberTools

View File

@ -109,7 +109,7 @@ class DocHelper {
public static Map nameValues = null;
public static Map<String,Object> nameValues = null;
// ordered list of all the fields...
// could use LinkedHashMap for this purpose if Java1.4 is OK
@ -130,17 +130,16 @@ class DocHelper {
largeLazyField//placeholder for large field, since this is null. It must always be last
};
// Map<String fieldName, Fieldable field>
public static Map all=new HashMap();
public static Map indexed=new HashMap();
public static Map stored=new HashMap();
public static Map unstored=new HashMap();
public static Map unindexed=new HashMap();
public static Map termvector=new HashMap();
public static Map notermvector=new HashMap();
public static Map lazy= new HashMap();
public static Map noNorms=new HashMap();
public static Map noTf=new HashMap();
public static Map<String,Fieldable> all =new HashMap<String,Fieldable>();
public static Map<String,Fieldable> indexed =new HashMap<String,Fieldable>();
public static Map<String,Fieldable> stored =new HashMap<String,Fieldable>();
public static Map<String,Fieldable> unstored=new HashMap<String,Fieldable>();
public static Map<String,Fieldable> unindexed=new HashMap<String,Fieldable>();
public static Map<String,Fieldable> termvector=new HashMap<String,Fieldable>();
public static Map<String,Fieldable> notermvector=new HashMap<String,Fieldable>();
public static Map<String,Fieldable> lazy= new HashMap<String,Fieldable>();
public static Map<String,Fieldable> noNorms=new HashMap<String,Fieldable>();
public static Map<String,Fieldable> noTf=new HashMap<String,Fieldable>();
static {
//Initialize the large Lazy Field
@ -175,14 +174,14 @@ class DocHelper {
}
private static void add(Map map, Fieldable field) {
private static void add(Map<String,Fieldable> map, Fieldable field) {
map.put(field.name(), field);
}
static
{
nameValues = new HashMap();
nameValues = new HashMap<String,Object>();
nameValues.put(TEXT_FIELD_1_KEY, FIELD_1_TEXT);
nameValues.put(TEXT_FIELD_2_KEY, FIELD_2_TEXT);
nameValues.put(TEXT_FIELD_3_KEY, FIELD_3_TEXT);

View File

@ -20,8 +20,6 @@ import org.apache.lucene.util.*;
import org.apache.lucene.store.*;
import org.apache.lucene.document.*;
import org.apache.lucene.analysis.*;
import org.apache.lucene.search.*;
import org.apache.lucene.queryParser.*;
import java.util.Random;
import java.io.File;

View File

@ -286,7 +286,7 @@ public class TestBackwardsCompatibility extends LuceneTestCase
for(int i=0;i<35;i++) {
if (!reader.isDeleted(i)) {
Document d = reader.document(i);
List fields = d.getFields();
List<Fieldable> fields = d.getFields();
if (!oldName.startsWith("19.") &&
!oldName.startsWith("20.") &&
!oldName.startsWith("21.") &&
@ -295,19 +295,19 @@ public class TestBackwardsCompatibility extends LuceneTestCase
if (d.getField("content3") == null) {
final int numFields = oldName.startsWith("29.") ? 7 : 5;
assertEquals(numFields, fields.size());
Field f = (Field) d.getField("id");
Field f = d.getField("id");
assertEquals(""+i, f.stringValue());
f = (Field) d.getField("utf8");
f = d.getField("utf8");
assertEquals("Lu\uD834\uDD1Ece\uD834\uDD60ne \u0000 \u2620 ab\ud917\udc17cd", f.stringValue());
f = (Field) d.getField("autf8");
f = d.getField("autf8");
assertEquals("Lu\uD834\uDD1Ece\uD834\uDD60ne \u0000 \u2620 ab\ud917\udc17cd", f.stringValue());
f = (Field) d.getField("content2");
f = d.getField("content2");
assertEquals("here is more content with aaa aaa aaa", f.stringValue());
f = (Field) d.getField("fie\u2C77ld");
f = d.getField("fie\u2C77ld");
assertEquals("field with non-ascii name", f.stringValue());
}
}

View File

@ -21,7 +21,7 @@ import org.apache.lucene.util.LuceneTestCase;
public class TestByteSlices extends LuceneTestCase {
private static class ByteBlockAllocator extends ByteBlockPool.Allocator {
ArrayList freeByteBlocks = new ArrayList();
ArrayList<byte[]> freeByteBlocks = new ArrayList<byte[]>();
/* Allocate another byte[] from the shared pool */
@Override
@ -31,7 +31,7 @@ public class TestByteSlices extends LuceneTestCase {
if (0 == size)
b = new byte[DocumentsWriter.BYTE_BLOCK_SIZE];
else
b = (byte[]) freeByteBlocks.remove(size-1);
b = freeByteBlocks.remove(size-1);
return b;
}

View File

@ -58,7 +58,7 @@ public class TestCheckIndex extends LuceneTestCase {
fail();
}
final CheckIndex.Status.SegmentInfoStatus seg = (CheckIndex.Status.SegmentInfoStatus) indexStatus.segmentInfos.get(0);
final CheckIndex.Status.SegmentInfoStatus seg = indexStatus.segmentInfos.get(0);
assertTrue(seg.openReaderPassed);
assertNotNull(seg.diagnostics);
@ -84,7 +84,7 @@ public class TestCheckIndex extends LuceneTestCase {
assertEquals(18, seg.termVectorStatus.totVectors);
assertTrue(seg.diagnostics.size() > 0);
final List onlySegments = new ArrayList();
final List<String> onlySegments = new ArrayList<String>();
onlySegments.add("_0");
assertTrue(checker.checkIndex(onlySegments).clean == true);

View File

@ -19,7 +19,6 @@ package org.apache.lucene.index;
import java.io.IOException;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Set;
import java.util.Collection;
@ -43,14 +42,14 @@ import org.apache.lucene.util.LuceneTestCase;
public class TestDeletionPolicy extends LuceneTestCase
{
private void verifyCommitOrder(List commits) throws IOException {
final IndexCommit firstCommit = ((IndexCommit) commits.get(0));
private void verifyCommitOrder(List<? extends IndexCommit> commits) throws IOException {
final IndexCommit firstCommit = commits.get(0);
long last = SegmentInfos.generationFromSegmentsFileName(firstCommit.getSegmentsFileName());
assertEquals(last, firstCommit.getGeneration());
long lastVersion = firstCommit.getVersion();
long lastTimestamp = firstCommit.getTimestamp();
for(int i=1;i<commits.size();i++) {
final IndexCommit commit = ((IndexCommit) commits.get(i));
final IndexCommit commit = commits.get(i);
long now = SegmentInfos.generationFromSegmentsFileName(commit.getSegmentsFileName());
long nowVersion = commit.getVersion();
long nowTimestamp = commit.getTimestamp();
@ -68,12 +67,12 @@ public class TestDeletionPolicy extends LuceneTestCase
int numOnInit;
int numOnCommit;
Directory dir;
public void onInit(List commits) throws IOException {
public void onInit(List<? extends IndexCommit> commits) throws IOException {
verifyCommitOrder(commits);
numOnInit++;
}
public void onCommit(List commits) throws IOException {
IndexCommit lastCommit = (IndexCommit) commits.get(commits.size()-1);
public void onCommit(List<? extends IndexCommit> commits) throws IOException {
IndexCommit lastCommit = commits.get(commits.size()-1);
IndexReader r = IndexReader.open(dir, true);
assertEquals("lastCommit.isOptimized()=" + lastCommit.isOptimized() + " vs IndexReader.isOptimized=" + r.isOptimized(), r.isOptimized(), lastCommit.isOptimized());
r.close();
@ -89,18 +88,16 @@ public class TestDeletionPolicy extends LuceneTestCase
class KeepNoneOnInitDeletionPolicy implements IndexDeletionPolicy {
int numOnInit;
int numOnCommit;
public void onInit(List commits) throws IOException {
public void onInit(List<? extends IndexCommit> commits) throws IOException {
verifyCommitOrder(commits);
numOnInit++;
// On init, delete all commit points:
Iterator it = commits.iterator();
while(it.hasNext()) {
final IndexCommit commit = (IndexCommit) it.next();
for (final IndexCommit commit : commits) {
commit.delete();
assertTrue(commit.isDeleted());
}
}
public void onCommit(List commits) throws IOException {
public void onCommit(List<? extends IndexCommit> commits) throws IOException {
verifyCommitOrder(commits);
int size = commits.size();
// Delete all but last one:
@ -116,25 +113,25 @@ public class TestDeletionPolicy extends LuceneTestCase
int numOnCommit;
int numToKeep;
int numDelete;
Set seen = new HashSet();
Set<String> seen = new HashSet<String>();
public KeepLastNDeletionPolicy(int numToKeep) {
this.numToKeep = numToKeep;
}
public void onInit(List commits) throws IOException {
public void onInit(List<? extends IndexCommit> commits) throws IOException {
verifyCommitOrder(commits);
numOnInit++;
// do no deletions on init
doDeletes(commits, false);
}
public void onCommit(List commits) throws IOException {
public void onCommit(List<? extends IndexCommit> commits) throws IOException {
verifyCommitOrder(commits);
doDeletes(commits, true);
}
private void doDeletes(List commits, boolean isCommit) {
private void doDeletes(List<? extends IndexCommit> commits, boolean isCommit) {
// Assert that we really are only called for each new
// commit:
@ -169,23 +166,21 @@ public class TestDeletionPolicy extends LuceneTestCase
this.expirationTimeSeconds = seconds;
}
public void onInit(List commits) throws IOException {
public void onInit(List<? extends IndexCommit> commits) throws IOException {
verifyCommitOrder(commits);
onCommit(commits);
}
public void onCommit(List commits) throws IOException {
public void onCommit(List<? extends IndexCommit> commits) throws IOException {
verifyCommitOrder(commits);
IndexCommit lastCommit = (IndexCommit) commits.get(commits.size()-1);
IndexCommit lastCommit = commits.get(commits.size()-1);
// Any commit older than expireTime should be deleted:
double expireTime = dir.fileModified(lastCommit.getSegmentsFileName())/1000.0 - expirationTimeSeconds;
Iterator it = commits.iterator();
while(it.hasNext()) {
IndexCommit commit = (IndexCommit) it.next();
for (final IndexCommit commit : commits) {
double modTime = dir.fileModified(commit.getSegmentsFileName())/1000.0;
if (commit != lastCommit && modTime < expireTime) {
commit.delete();
@ -297,14 +292,12 @@ public class TestDeletionPolicy extends LuceneTestCase
assertEquals(2, policy.numOnCommit);
// Test listCommits
Collection commits = IndexReader.listCommits(dir);
Collection<IndexCommit> commits = IndexReader.listCommits(dir);
// 1 from opening writer + 2 from closing writer
assertEquals(3, commits.size());
Iterator it = commits.iterator();
// Make sure we can open a reader on each commit:
while(it.hasNext()) {
IndexCommit commit = (IndexCommit) it.next();
for (final IndexCommit commit : commits) {
IndexReader r = IndexReader.open(commit, null, false);
r.close();
}
@ -356,12 +349,10 @@ public class TestDeletionPolicy extends LuceneTestCase
}
writer.close();
Collection commits = IndexReader.listCommits(dir);
Collection<IndexCommit> commits = IndexReader.listCommits(dir);
assertEquals(6, commits.size());
IndexCommit lastCommit = null;
Iterator it = commits.iterator();
while(it.hasNext()) {
IndexCommit commit = (IndexCommit) it.next();
for (final IndexCommit commit : commits) {
if (lastCommit == null || commit.getGeneration() > lastCommit.getGeneration())
lastCommit = commit;
}

View File

@ -22,7 +22,7 @@ import java.io.FileWriter;
import java.io.IOException;
import java.io.PrintWriter;
import java.io.StringWriter;
import java.util.Iterator;
import java.util.LinkedList;
import java.util.List;
@ -48,7 +48,7 @@ public class TestDoc extends LuceneTestCase {
private File workDir;
private File indexDir;
private LinkedList files;
private LinkedList<File> files;
/** Set the test case. This test case needs
@ -66,7 +66,7 @@ public class TestDoc extends LuceneTestCase {
Directory directory = FSDirectory.open(indexDir);
directory.close();
files = new LinkedList();
files = new LinkedList<File>();
files.add(createOutput("test.txt",
"This is the first test file"
));
@ -188,9 +188,9 @@ public class TestDoc extends LuceneTestCase {
merger.closeReaders();
if (useCompoundFile) {
List filesToDelete = merger.createCompoundFile(merged + ".cfs");
for (Iterator iter = filesToDelete.iterator(); iter.hasNext();)
si1.dir.deleteFile((String) iter.next());
List<String> filesToDelete = merger.createCompoundFile(merged + ".cfs");
for (final String fileToDelete : filesToDelete)
si1.dir.deleteFile(fileToDelete);
}
return new SegmentInfo(merged, si1.docCount + si2.docCount, si1.dir, useCompoundFile, true);

View File

@ -100,10 +100,10 @@ public class TestFieldsReader extends LuceneTestCase {
FieldsReader reader = new FieldsReader(dir, TEST_SEGMENT_NAME, fieldInfos);
assertTrue(reader != null);
assertTrue(reader.size() == 1);
Set loadFieldNames = new HashSet();
Set<String> loadFieldNames = new HashSet<String>();
loadFieldNames.add(DocHelper.TEXT_FIELD_1_KEY);
loadFieldNames.add(DocHelper.TEXT_FIELD_UTF1_KEY);
Set lazyFieldNames = new HashSet();
Set<String> lazyFieldNames = new HashSet<String>();
//new String[]{DocHelper.LARGE_LAZY_FIELD_KEY, DocHelper.LAZY_FIELD_KEY, DocHelper.LAZY_FIELD_BINARY_KEY};
lazyFieldNames.add(DocHelper.LARGE_LAZY_FIELD_KEY);
lazyFieldNames.add(DocHelper.LAZY_FIELD_KEY);
@ -150,10 +150,10 @@ public class TestFieldsReader extends LuceneTestCase {
FieldsReader reader = new FieldsReader(dir, TEST_SEGMENT_NAME, fieldInfos);
assertTrue(reader != null);
assertTrue(reader.size() == 1);
Set loadFieldNames = new HashSet();
Set<String> loadFieldNames = new HashSet<String>();
loadFieldNames.add(DocHelper.TEXT_FIELD_1_KEY);
loadFieldNames.add(DocHelper.TEXT_FIELD_UTF1_KEY);
Set lazyFieldNames = new HashSet();
Set<String> lazyFieldNames = new HashSet<String>();
lazyFieldNames.add(DocHelper.LARGE_LAZY_FIELD_KEY);
lazyFieldNames.add(DocHelper.LAZY_FIELD_KEY);
lazyFieldNames.add(DocHelper.LAZY_FIELD_BINARY_KEY);
@ -183,9 +183,10 @@ public class TestFieldsReader extends LuceneTestCase {
Document doc = reader.doc(0, fieldSelector);
assertTrue("doc is null and it shouldn't be", doc != null);
int count = 0;
List l = doc.getFields();
for (Iterator iter = l.iterator(); iter.hasNext();) {
Field field = (Field) iter.next();
List<Fieldable> l = doc.getFields();
for (final Fieldable fieldable : l ) {
Field field = (Field) fieldable;
assertTrue("field is null and it shouldn't be", field != null);
String sv = field.stringValue();
assertTrue("sv is null and it shouldn't be", sv != null);
@ -220,9 +221,9 @@ public class TestFieldsReader extends LuceneTestCase {
long lazyTime = 0;
long regularTime = 0;
int length = 50;
Set lazyFieldNames = new HashSet();
Set<String> lazyFieldNames = new HashSet<String>();
lazyFieldNames.add(DocHelper.LARGE_LAZY_FIELD_KEY);
SetBasedFieldSelector fieldSelector = new SetBasedFieldSelector(Collections.EMPTY_SET, lazyFieldNames);
SetBasedFieldSelector fieldSelector = new SetBasedFieldSelector(Collections. <String> emptySet(), lazyFieldNames);
for (int i = 0; i < length; i++) {
reader = new FieldsReader(tmpDir, TEST_SEGMENT_NAME, fieldInfos);

View File

@ -28,7 +28,6 @@ import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import java.io.*;
import java.util.*;
import java.util.zip.*;
/*
Verify we can read the pre-2.1 file format, do searches
@ -155,33 +154,34 @@ public class TestIndexFileDeleter extends LuceneTestCase
Arrays.sort(files);
Arrays.sort(files2);
Set dif = difFiles(files, files2);
Set<String> dif = difFiles(files, files2);
if (!Arrays.equals(files, files2)) {
fail("IndexFileDeleter failed to delete unreferenced extra files: should have deleted " + (filesPre.length-files.length) + " files but only deleted " + (filesPre.length - files2.length) + "; expected files:\n " + asString(files) + "\n actual files:\n " + asString(files2)+"\ndif: "+dif);
}
}
private static Set difFiles(String[] files1, String[] files2) {
Set set1 = new HashSet();
Set set2 = new HashSet();
Set extra = new HashSet();
private static Set<String> difFiles(String[] files1, String[] files2) {
Set<String> set1 = new HashSet<String>();
Set<String> set2 = new HashSet<String>();
Set<String> extra = new HashSet<String>();
for (int x=0; x < files1.length; x++) {
set1.add(files1[x]);
}
for (int x=0; x < files2.length; x++) {
set2.add(files2[x]);
}
Iterator i1 = set1.iterator();
Iterator<String> i1 = set1.iterator();
while (i1.hasNext()) {
Object o = i1.next();
String o = i1.next();
if (!set2.contains(o)) {
extra.add(o);
}
}
Iterator i2 = set2.iterator();
Iterator<String> i2 = set2.iterator();
while (i2.hasNext()) {
Object o = i2.next();
String o = i2.next();
if (!set1.contains(o)) {
extra.add(o);
}

View File

@ -25,9 +25,11 @@ import java.util.Arrays;
import java.util.Collection;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.HashMap;
import java.util.Set;
import java.util.SortedSet;
import junit.framework.TestSuite;
import junit.textui.TestRunner;
@ -72,7 +74,7 @@ public class TestIndexReader extends LuceneTestCase
public void testCommitUserData() throws Exception {
RAMDirectory d = new MockRAMDirectory();
Map commitUserData = new HashMap();
Map<String,String> commitUserData = new HashMap<String,String>();
commitUserData.put("foo", "fighters");
// set up writer
@ -156,7 +158,7 @@ public class TestIndexReader extends LuceneTestCase
writer.close();
// set up reader
IndexReader reader = IndexReader.open(d, false);
Collection fieldNames = reader.getFieldNames(IndexReader.FieldOption.ALL);
Collection<String> fieldNames = reader.getFieldNames(IndexReader.FieldOption.ALL);
assertTrue(fieldNames.contains("keyword"));
assertTrue(fieldNames.contains("text"));
assertTrue(fieldNames.contains("unindexed"));
@ -260,12 +262,12 @@ public class TestIndexReader extends LuceneTestCase
IndexReader reader = IndexReader.open(d, false);
FieldSortedTermVectorMapper mapper = new FieldSortedTermVectorMapper(new TermVectorEntryFreqSortedComparator());
reader.getTermFreqVector(0, mapper);
Map map = mapper.getFieldToTerms();
Map<String,SortedSet<TermVectorEntry>> map = mapper.getFieldToTerms();
assertTrue("map is null and it shouldn't be", map != null);
assertTrue("map Size: " + map.size() + " is not: " + 4, map.size() == 4);
Set set = (Set) map.get("termvector");
for (Iterator iterator = set.iterator(); iterator.hasNext();) {
TermVectorEntry entry = (TermVectorEntry) iterator.next();
Set<TermVectorEntry> set = map.get("termvector");
for (Iterator<TermVectorEntry> iterator = set.iterator(); iterator.hasNext();) {
TermVectorEntry entry = iterator.next();
assertTrue("entry is null and it shouldn't be", entry != null);
System.out.println("Entry: " + entry);
}
@ -380,9 +382,9 @@ public class TestIndexReader extends LuceneTestCase
for (int i = 0; i < bin.length; i++) {
assertEquals(bin[i], data1[i + b1.getBinaryOffset()]);
}
Set lazyFields = new HashSet();
Set<String> lazyFields = new HashSet<String>();
lazyFields.add("bin1");
FieldSelector sel = new SetBasedFieldSelector(new HashSet(), lazyFields);
FieldSelector sel = new SetBasedFieldSelector(new HashSet<String>(), lazyFields);
doc = reader.document(reader.maxDoc() - 1, sel);
Fieldable[] fieldables = doc.getFieldables("bin1");
assertNotNull(fieldables);
@ -1340,19 +1342,19 @@ public class TestIndexReader extends LuceneTestCase
assertEquals("Only one index is optimized.", index1.isOptimized(), index2.isOptimized());
// check field names
Collection fields1 = index1.getFieldNames(FieldOption.ALL);
Collection fields2 = index1.getFieldNames(FieldOption.ALL);
Collection<String> fields1 = index1.getFieldNames(FieldOption.ALL);
Collection<String> fields2 = index1.getFieldNames(FieldOption.ALL);
assertEquals("IndexReaders have different numbers of fields.", fields1.size(), fields2.size());
Iterator it1 = fields1.iterator();
Iterator it2 = fields1.iterator();
Iterator<String> it1 = fields1.iterator();
Iterator<String> it2 = fields1.iterator();
while (it1.hasNext()) {
assertEquals("Different field names.", (String) it1.next(), (String) it2.next());
assertEquals("Different field names.", it1.next(), it2.next());
}
// check norms
it1 = fields1.iterator();
while (it1.hasNext()) {
String curField = (String) it1.next();
String curField = it1.next();
byte[] norms1 = index1.norms(curField);
byte[] norms2 = index2.norms(curField);
if (norms1 != null && norms2 != null)
@ -1378,14 +1380,14 @@ public class TestIndexReader extends LuceneTestCase
if (!index1.isDeleted(i)) {
Document doc1 = index1.document(i);
Document doc2 = index2.document(i);
fields1 = doc1.getFields();
fields2 = doc2.getFields();
assertEquals("Different numbers of fields for doc " + i + ".", fields1.size(), fields2.size());
it1 = fields1.iterator();
it2 = fields2.iterator();
while (it1.hasNext()) {
Field curField1 = (Field) it1.next();
Field curField2 = (Field) it2.next();
List<Fieldable> fieldable1 = doc1.getFields();
List<Fieldable> fieldable2 = doc2.getFields();
assertEquals("Different numbers of fields for doc " + i + ".", fieldable1.size(), fieldable2.size());
Iterator<Fieldable> itField1 = fieldable1.iterator();
Iterator<Fieldable> itField2 = fieldable2.iterator();
while (itField1.hasNext()) {
Field curField1 = (Field) itField1.next();
Field curField2 = (Field) itField2.next();
assertEquals("Different fields names for doc " + i + ".", curField1.name(), curField2.name());
assertEquals("Different field values for doc " + i + ".", curField1.stringValue(), curField2.stringValue());
}
@ -1587,15 +1589,11 @@ public class TestIndexReader extends LuceneTestCase
writer.addDocument(createDocument("a"));
writer.close();
Collection commits = IndexReader.listCommits(dir);
Iterator it = commits.iterator();
while(it.hasNext()) {
IndexCommit commit = (IndexCommit) it.next();
Collection files = commit.getFileNames();
HashSet seen = new HashSet();
Iterator it2 = files.iterator();
while(it2.hasNext()) {
String fileName = (String) it2.next();
Collection<IndexCommit> commits = IndexReader.listCommits(dir);
for (final IndexCommit commit : commits) {
Collection<String> files = commit.getFileNames();
HashSet<String> seen = new HashSet<String>();
for (final String fileName : files) {
assertTrue("file " + fileName + " was duplicated", !seen.contains(fileName));
seen.add(fileName);
}

View File

@ -55,9 +55,9 @@ public class TestIndexReaderCloneNorms extends LuceneTestCase {
private int numDocNorms;
private ArrayList norms;
private ArrayList<Float> norms;
private ArrayList modifiedNorms;
private ArrayList<Float> modifiedNorms;
private float lastNorm = 0;
@ -91,19 +91,19 @@ public class TestIndexReaderCloneNorms extends LuceneTestCase {
Directory dir1 = FSDirectory.open(indexDir1);
IndexWriter.unlock(dir1);
norms = new ArrayList();
modifiedNorms = new ArrayList();
norms = new ArrayList<Float>();
modifiedNorms = new ArrayList<Float>();
createIndex(dir1);
doTestNorms(dir1);
// test with a single index: index2
ArrayList norms1 = norms;
ArrayList modifiedNorms1 = modifiedNorms;
ArrayList<Float> norms1 = norms;
ArrayList<Float> modifiedNorms1 = modifiedNorms;
int numDocNorms1 = numDocNorms;
norms = new ArrayList();
modifiedNorms = new ArrayList();
norms = new ArrayList<Float>();
modifiedNorms = new ArrayList<Float>();
numDocNorms = 0;
File indexDir2 = new File(tempDir, "lucenetestindex2");
@ -282,10 +282,10 @@ public class TestIndexReaderCloneNorms extends LuceneTestCase {
String field = "f" + i;
byte b[] = ir.norms(field);
assertEquals("number of norms mismatches", numDocNorms, b.length);
ArrayList storedNorms = (i == 1 ? modifiedNorms : norms);
ArrayList<Float> storedNorms = (i == 1 ? modifiedNorms : norms);
for (int j = 0; j < b.length; j++) {
float norm = Similarity.getDefault().decodeNormValue(b[j]);
float norm1 = ((Float) storedNorms.get(j)).floatValue();
float norm1 = storedNorms.get(j).floatValue();
assertEquals("stored norm value of " + field + " for doc " + j + " is "
+ norm + " - a mismatch!", norm, norm1, 0.000001);
}

View File

@ -20,9 +20,10 @@ package org.apache.lucene.index;
import java.io.File;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Random;
import java.util.Map;
@ -732,13 +733,13 @@ public class TestIndexReaderReopen extends LuceneTestCase {
}
};
final List readers = Collections.synchronizedList(new ArrayList());
final List<ReaderCouple> readers = Collections.synchronizedList(new ArrayList<ReaderCouple>());
IndexReader firstReader = IndexReader.open(dir, false);
IndexReader reader = firstReader;
final Random rnd = newRandom();
ReaderThread[] threads = new ReaderThread[n];
final Set readersToClose = Collections.synchronizedSet(new HashSet());
final Set<IndexReader> readersToClose = Collections.synchronizedSet(new HashSet<IndexReader>());
for (int i = 0; i < n; i++) {
if (i % 2 == 0) {
@ -806,7 +807,7 @@ public class TestIndexReaderReopen extends LuceneTestCase {
while (!stopped) {
int numReaders = readers.size();
if (numReaders > 0) {
ReaderCouple c = (ReaderCouple) readers.get(rnd.nextInt(numReaders));
ReaderCouple c = readers.get(rnd.nextInt(numReaders));
TestIndexReader.assertIndexEquals(c.newReader, c.refreshedReader);
}
@ -845,17 +846,15 @@ public class TestIndexReaderReopen extends LuceneTestCase {
}
Iterator it = readersToClose.iterator();
while (it.hasNext()) {
((IndexReader) it.next()).close();
for (final IndexReader readerToClose : readersToClose) {
readerToClose.close();
}
firstReader.close();
reader.close();
it = readersToClose.iterator();
while (it.hasNext()) {
assertReaderClosed((IndexReader) it.next(), true, true);
for (final IndexReader readerToClose : readersToClose) {
assertReaderClosed(readerToClose, true, true);
}
assertReaderClosed(reader, true, true);
@ -1185,9 +1184,9 @@ public class TestIndexReaderReopen extends LuceneTestCase {
}
private static class KeepAllCommits implements IndexDeletionPolicy {
public void onInit(List commits) {
public void onInit(List<? extends IndexCommit> commits) {
}
public void onCommit(List commits) {
public void onCommit(List<? extends IndexCommit> commits) {
}
}
@ -1198,13 +1197,13 @@ public class TestIndexReaderReopen extends LuceneTestCase {
Document doc = new Document();
doc.add(new Field("id", ""+i, Field.Store.NO, Field.Index.NOT_ANALYZED));
writer.addDocument(doc);
Map data = new HashMap();
Map<String,String> data = new HashMap<String,String>();
data.put("index", i+"");
writer.commit(data);
}
for(int i=0;i<4;i++) {
writer.deleteDocuments(new Term("id", ""+i));
Map data = new HashMap();
Map<String,String> data = new HashMap<String,String>();
data.put("index", (4+i)+"");
writer.commit(data);
}
@ -1214,9 +1213,8 @@ public class TestIndexReaderReopen extends LuceneTestCase {
assertEquals(0, r.numDocs());
assertEquals(4, r.maxDoc());
Iterator it = IndexReader.listCommits(dir).iterator();
while(it.hasNext()) {
IndexCommit commit = (IndexCommit) it.next();
Collection<IndexCommit> commits = IndexReader.listCommits(dir);
for (final IndexCommit commit : commits) {
IndexReader r2 = r.reopen(commit);
assertTrue(r2 != r);
@ -1228,13 +1226,13 @@ public class TestIndexReaderReopen extends LuceneTestCase {
// expected
}
final Map s = commit.getUserData();
final Map<String,String> s = commit.getUserData();
final int v;
if (s.size() == 0) {
// First commit created by IW
v = -1;
} else {
v = Integer.parseInt((String) s.get("index"));
v = Integer.parseInt(s.get("index"));
}
if (v < 4) {
assertEquals(1+v, r2.numDocs());

View File

@ -48,6 +48,7 @@ import org.apache.lucene.analysis.tokenattributes.TermAttribute;
import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.Fieldable;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.PhraseQuery;
import org.apache.lucene.search.Query;
@ -2131,7 +2132,7 @@ public class TestIndexWriter extends LuceneTestCase {
writer.setMergeFactor(2);
final IndexWriter finalWriter = writer;
final ArrayList failure = new ArrayList();
final ArrayList<Throwable> failure = new ArrayList<Throwable>();
Thread t1 = new Thread() {
@Override
public void run() {
@ -2160,7 +2161,7 @@ public class TestIndexWriter extends LuceneTestCase {
};
if (failure.size() > 0)
throw (Throwable) failure.get(0);
throw failure.get(0);
t1.start();
@ -3475,14 +3476,14 @@ public class TestIndexWriter extends LuceneTestCase {
final TermAttribute termAtt = addAttribute(TermAttribute.class);
final PositionIncrementAttribute posIncrAtt = addAttribute(PositionIncrementAttribute.class);
final Iterator tokens = Arrays.asList(new String[]{"a","b","c"}).iterator();
final Iterator<String> tokens = Arrays.asList(new String[]{"a","b","c"}).iterator();
boolean first = true;
@Override
public boolean incrementToken() {
if (!tokens.hasNext()) return false;
clearAttributes();
termAtt.setTermBuffer((String) tokens.next());
termAtt.setTermBuffer( tokens.next());
posIncrAtt.setPositionIncrement(first ? 0 : 1);
first = false;
return true;
@ -3643,7 +3644,7 @@ public class TestIndexWriter extends LuceneTestCase {
Directory dir, dir2;
final static int NUM_INIT_DOCS = 17;
IndexWriter writer2;
final List failures = new ArrayList();
final List<Throwable> failures = new ArrayList<Throwable>();
volatile boolean didClose;
final IndexReader[] readers;
final int NUM_COPY;
@ -3992,7 +3993,7 @@ public class TestIndexWriter extends LuceneTestCase {
w.setMaxBufferedDocs(2);
for(int j=0;j<17;j++)
addDoc(w);
Map data = new HashMap();
Map<String,String> data = new HashMap<String,String>();
data.put("label", "test1");
w.commit(data);
w.close();
@ -4040,7 +4041,7 @@ public class TestIndexWriter extends LuceneTestCase {
// LUCENE-1429
public void testOutOfMemoryErrorCausesCloseToFail() throws Exception {
final List thrown = new ArrayList();
final List<Throwable> thrown = new ArrayList<Throwable>();
final IndexWriter writer = new IndexWriter(new MockRAMDirectory(), new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), IndexWriter.MaxFieldLength.UNLIMITED) {
@Override
@ -4562,7 +4563,7 @@ public class TestIndexWriter extends LuceneTestCase {
w.addDocument(doc);
IndexReader r = w.getReader();
doc = r.document(0);
Iterator it = doc.getFields().iterator();
Iterator<Fieldable> it = doc.getFields().iterator();
assertTrue(it.hasNext());
Field f = (Field) it.next();
assertEquals(f.name(), "zzz");

View File

@ -109,7 +109,7 @@ public class TestIndexWriterExceptions extends LuceneTestCase {
}
}
ThreadLocal doFail = new ThreadLocal();
ThreadLocal<Thread> doFail = new ThreadLocal<Thread>();
public class MockIndexWriter extends IndexWriter {
Random r = new java.util.Random(17);

View File

@ -306,10 +306,10 @@ public class TestIndexWriterReader extends LuceneTestCase {
final static int NUM_THREADS = 5;
final Thread[] threads = new Thread[NUM_THREADS];
IndexWriter mainWriter;
List deletedTerms = new ArrayList();
LinkedList toDeleteTerms = new LinkedList();
List<Term> deletedTerms = new ArrayList<Term>();
LinkedList<Term> toDeleteTerms = new LinkedList<Term>();
Random random;
final List failures = new ArrayList();
final List<Throwable> failures = new ArrayList<Throwable>();
public DeleteThreads(IndexWriter mainWriter) throws IOException {
this.mainWriter = mainWriter;
@ -326,7 +326,7 @@ public class TestIndexWriterReader extends LuceneTestCase {
Term getDeleteTerm() {
synchronized (toDeleteTerms) {
return (Term)toDeleteTerms.removeFirst();
return toDeleteTerms.removeFirst();
}
}
@ -373,7 +373,7 @@ public class TestIndexWriterReader extends LuceneTestCase {
int numDirs;
final Thread[] threads = new Thread[NUM_THREADS];
IndexWriter mainWriter;
final List failures = new ArrayList();
final List<Throwable> failures = new ArrayList<Throwable>();
IndexReader[] readers;
boolean didClose = false;
HeavyAtomicInt count = new HeavyAtomicInt(0);
@ -723,7 +723,7 @@ public class TestIndexWriterReader extends LuceneTestCase {
final float SECONDS = 0.5f;
final long endTime = (long) (System.currentTimeMillis() + 1000.*SECONDS);
final List excs = Collections.synchronizedList(new ArrayList());
final List<Throwable> excs = Collections.synchronizedList(new ArrayList<Throwable>());
final Thread[] threads = new Thread[NUM_THREAD];
for(int i=0;i<NUM_THREAD;i++) {
@ -787,7 +787,7 @@ public class TestIndexWriterReader extends LuceneTestCase {
final float SECONDS = 0.5f;
final long endTime = (long) (System.currentTimeMillis() + 1000.*SECONDS);
final List excs = Collections.synchronizedList(new ArrayList());
final List<Throwable> excs = Collections.synchronizedList(new ArrayList<Throwable>());
final Thread[] threads = new Thread[NUM_THREAD];
for(int i=0;i<NUM_THREAD;i++) {

View File

@ -46,7 +46,7 @@ public class TestLazyBug extends LuceneTestCase {
"this string is a bigger string, mary had a little lamb, little lamb, little lamb!"
};
private static Set dataset = new HashSet(Arrays.asList(data));
private static Set<String> dataset = new HashSet<String>(Arrays.asList(data));
private static String MAGIC_FIELD = "f"+(NUM_FIELDS/3);
@ -93,11 +93,11 @@ public class TestLazyBug extends LuceneTestCase {
Document d = reader.document(docs[i], SELECTOR);
d.get(MAGIC_FIELD);
List fields = d.getFields();
for (Iterator fi = fields.iterator(); fi.hasNext(); ) {
List<Fieldable> fields = d.getFields();
for (Iterator<Fieldable> fi = fields.iterator(); fi.hasNext(); ) {
Fieldable f=null;
try {
f = (Fieldable) fi.next();
f = fi.next();
String fname = f.name();
String fval = f.stringValue();
assertNotNull(docs[i]+" FIELD: "+fname, fval);

View File

@ -52,8 +52,8 @@ public class TestNorms extends LuceneTestCase {
private Similarity similarityOne;
private Analyzer anlzr;
private int numDocNorms;
private ArrayList norms;
private ArrayList modifiedNorms;
private ArrayList<Float> norms;
private ArrayList<Float> modifiedNorms;
private float lastNorm = 0;
private float normDelta = (float) 0.001;
@ -85,19 +85,19 @@ public class TestNorms extends LuceneTestCase {
File indexDir1 = new File(tempDir, "lucenetestindex1");
Directory dir1 = FSDirectory.open(indexDir1);
norms = new ArrayList();
modifiedNorms = new ArrayList();
norms = new ArrayList<Float>();
modifiedNorms = new ArrayList<Float>();
createIndex(dir1);
doTestNorms(dir1);
// test with a single index: index2
ArrayList norms1 = norms;
ArrayList modifiedNorms1 = modifiedNorms;
ArrayList<Float> norms1 = norms;
ArrayList<Float> modifiedNorms1 = modifiedNorms;
int numDocNorms1 = numDocNorms;
norms = new ArrayList();
modifiedNorms = new ArrayList();
norms = new ArrayList<Float>();
modifiedNorms = new ArrayList<Float>();
numDocNorms = 0;
File indexDir2 = new File(tempDir, "lucenetestindex2");
@ -187,10 +187,10 @@ public class TestNorms extends LuceneTestCase {
String field = "f"+i;
byte b[] = ir.norms(field);
assertEquals("number of norms mismatches",numDocNorms,b.length);
ArrayList storedNorms = (i==1 ? modifiedNorms : norms);
ArrayList<Float> storedNorms = (i==1 ? modifiedNorms : norms);
for (int j = 0; j < b.length; j++) {
float norm = similarityOne.decodeNormValue(b[j]);
float norm1 = ((Float)storedNorms.get(j)).floatValue();
float norm1 = storedNorms.get(j).floatValue();
assertEquals("stored norm value of "+field+" for doc "+j+" is "+norm+" - a mismatch!", norm, norm1, 0.000001);
}
}

View File

@ -71,7 +71,7 @@ public class TestParallelReader extends LuceneTestCase {
ParallelReader pr = new ParallelReader();
pr.add(IndexReader.open(dir1, false));
pr.add(IndexReader.open(dir2, false));
Collection fieldNames = pr.getFieldNames(IndexReader.FieldOption.ALL);
Collection<String> fieldNames = pr.getFieldNames(IndexReader.FieldOption.ALL);
assertEquals(4, fieldNames.size());
assertTrue(fieldNames.contains("f1"));
assertTrue(fieldNames.contains("f2"));

View File

@ -382,7 +382,7 @@ public class TestPayloads extends LuceneTestCase {
* This Analyzer uses an WhitespaceTokenizer and PayloadFilter.
*/
private static class PayloadAnalyzer extends Analyzer {
Map fieldToData = new HashMap();
Map<String,PayloadData> fieldToData = new HashMap<String,PayloadData>();
void setPayloadData(String field, byte[] data, int offset, int length) {
fieldToData.put(field, new PayloadData(0, data, offset, length));
@ -394,7 +394,7 @@ public class TestPayloads extends LuceneTestCase {
@Override
public TokenStream tokenStream(String fieldName, Reader reader) {
PayloadData payload = (PayloadData) fieldToData.get(fieldName);
PayloadData payload = fieldToData.get(fieldName);
TokenStream ts = new WhitespaceTokenizer(reader);
if (payload != null) {
if (payload.numFieldInstancesToSkip == 0) {
@ -550,10 +550,10 @@ public class TestPayloads extends LuceneTestCase {
}
private static class ByteArrayPool {
private List pool;
private List<byte[]> pool;
ByteArrayPool(int capacity, int size) {
pool = new ArrayList();
pool = new ArrayList<byte[]>();
for (int i = 0; i < capacity; i++) {
pool.add(new byte[size]);
}
@ -572,7 +572,7 @@ public class TestPayloads extends LuceneTestCase {
}
synchronized byte[] get() {
return (byte[]) pool.remove(0);
return pool.remove(0);
}
synchronized void release(byte[] b) {

View File

@ -19,7 +19,6 @@ import org.apache.lucene.util.LuceneTestCase;
import java.io.IOException;
import java.util.BitSet;
import java.util.Iterator;
import java.util.Map;
public class TestPositionBasedTermVectorMapper extends LuceneTestCase {
@ -69,19 +68,19 @@ public class TestPositionBasedTermVectorMapper extends LuceneTestCase {
mapper.map(token, 1, null, thePositions[i]);
}
Map map = mapper.getFieldToTerms();
Map<String,Map<Integer,PositionBasedTermVectorMapper.TVPositionInfo>> map = mapper.getFieldToTerms();
assertTrue("map is null and it shouldn't be", map != null);
assertTrue("map Size: " + map.size() + " is not: " + 1, map.size() == 1);
Map positions = (Map) map.get("test");
Map<Integer,PositionBasedTermVectorMapper.TVPositionInfo> positions = map.get("test");
assertTrue("thePositions is null and it shouldn't be", positions != null);
assertTrue("thePositions Size: " + positions.size() + " is not: " + numPositions, positions.size() == numPositions);
BitSet bits = new BitSet(numPositions);
for (Iterator iterator = positions.entrySet().iterator(); iterator.hasNext();) {
Map.Entry entry = (Map.Entry) iterator.next();
PositionBasedTermVectorMapper.TVPositionInfo info = (PositionBasedTermVectorMapper.TVPositionInfo) entry.getValue();
for (Map.Entry<Integer,PositionBasedTermVectorMapper.TVPositionInfo> entry : positions.entrySet()) {
PositionBasedTermVectorMapper.TVPositionInfo info = entry.getValue();
assertTrue("info is null and it shouldn't be", info != null);
int pos = ((Integer) entry.getKey()).intValue();
int pos = entry.getKey().intValue();
bits.set(pos);
assertTrue(info.getPosition() + " does not equal: " + pos, info.getPosition() == pos);
assertTrue("info.getOffsets() is null and it shouldn't be", info.getOffsets() != null);

View File

@ -85,7 +85,7 @@ public class TestSegmentMerger extends LuceneTestCase {
assertTrue(termDocs != null);
assertTrue(termDocs.next() == true);
Collection stored = mergedReader.getFieldNames(IndexReader.FieldOption.INDEXED_WITH_TERMVECTOR);
Collection<String> stored = mergedReader.getFieldNames(IndexReader.FieldOption.INDEXED_WITH_TERMVECTOR);
assertTrue(stored != null);
//System.out.println("stored size: " + stored.size());
assertTrue("We do not have 3 fields that were indexed with term vector",stored.size() == 3);

View File

@ -62,9 +62,8 @@ public class TestSegmentReader extends LuceneTestCase {
//There are 2 unstored fields on the document that are not preserved across writing
assertTrue(DocHelper.numFields(result) == DocHelper.numFields(testDoc) - DocHelper.unstored.size());
List fields = result.getFields();
for (Iterator iter = fields.iterator(); iter.hasNext();) {
Fieldable field = (Fieldable) iter.next();
List<Fieldable> fields = result.getFields();
for (final Fieldable field : fields ) {
assertTrue(field != null);
assertTrue(DocHelper.nameValues.containsKey(field.name()));
}
@ -84,19 +83,19 @@ public class TestSegmentReader extends LuceneTestCase {
}
public void testGetFieldNameVariations() {
Collection result = reader.getFieldNames(IndexReader.FieldOption.ALL);
Collection<String> result = reader.getFieldNames(IndexReader.FieldOption.ALL);
assertTrue(result != null);
assertTrue(result.size() == DocHelper.all.size());
for (Iterator iter = result.iterator(); iter.hasNext();) {
String s = (String) iter.next();
for (Iterator<String> iter = result.iterator(); iter.hasNext();) {
String s = iter.next();
//System.out.println("Name: " + s);
assertTrue(DocHelper.nameValues.containsKey(s) == true || s.equals(""));
}
result = reader.getFieldNames(IndexReader.FieldOption.INDEXED);
assertTrue(result != null);
assertTrue(result.size() == DocHelper.indexed.size());
for (Iterator iter = result.iterator(); iter.hasNext();) {
String s = (String) iter.next();
for (Iterator<String> iter = result.iterator(); iter.hasNext();) {
String s = iter.next();
assertTrue(DocHelper.indexed.containsKey(s) == true || s.equals(""));
}

View File

@ -21,7 +21,6 @@ import org.apache.lucene.store.*;
import org.apache.lucene.document.*;
import org.apache.lucene.analysis.*;
import org.apache.lucene.search.*;
import org.apache.lucene.queryParser.*;
import java.util.Random;
import java.io.File;
@ -155,7 +154,7 @@ public class TestStressIndexing extends LuceneTestCase {
modifier.close();
for(int i=0;i<numThread;i++)
assertTrue(!((TimedThread) threads[i]).failed);
assertTrue(! threads[i].failed);
//System.out.println(" Writer: " + indexerThread.count + " iterations");
//System.out.println("Searcher 1: " + searcherThread1.count + " searchers created");

View File

@ -73,7 +73,7 @@ public class TestStressIndexing2 extends LuceneTestCase {
// dir1 = FSDirectory.open("foofoofoo");
Directory dir2 = new MockRAMDirectory();
// mergeFactor=2; maxBufferedDocs=2; Map docs = indexRandom(1, 3, 2, dir1);
Map docs = indexRandom(10, 10, 100, dir1);
Map<String,Document> docs = indexRandom(10, 10, 100, dir1);
indexSerial(docs, dir2);
// verifying verify
@ -97,7 +97,7 @@ public class TestStressIndexing2 extends LuceneTestCase {
int range=r.nextInt(20)+1;
Directory dir1 = new MockRAMDirectory();
Directory dir2 = new MockRAMDirectory();
Map docs = indexRandom(nThreads, iter, range, dir1);
Map<String,Document> docs = indexRandom(nThreads, iter, range, dir1);
indexSerial(docs, dir2);
verifyEquals(dir1, dir2, "id");
}
@ -106,9 +106,9 @@ public class TestStressIndexing2 extends LuceneTestCase {
static Term idTerm = new Term("id","");
IndexingThread[] threads;
static Comparator fieldNameComparator = new Comparator() {
public int compare(Object o1, Object o2) {
return ((Fieldable)o1).name().compareTo(((Fieldable)o2).name());
static Comparator<Fieldable> fieldNameComparator = new Comparator<Fieldable>() {
public int compare(Fieldable o1, Fieldable o2) {
return o1.name().compareTo(o2.name());
}
};
@ -117,12 +117,12 @@ public class TestStressIndexing2 extends LuceneTestCase {
// everything.
public static class DocsAndWriter {
Map docs;
Map<String,Document> docs;
IndexWriter writer;
}
public DocsAndWriter indexRandomIWReader(int nThreads, int iterations, int range, Directory dir) throws IOException, InterruptedException {
Map docs = new HashMap();
Map<String,Document> docs = new HashMap<String,Document>();
IndexWriter w = new MockIndexWriter(dir, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.UNLIMITED);
w.setUseCompoundFile(false);
@ -172,8 +172,8 @@ public class TestStressIndexing2 extends LuceneTestCase {
return dw;
}
public Map indexRandom(int nThreads, int iterations, int range, Directory dir) throws IOException, InterruptedException {
Map docs = new HashMap();
public Map<String,Document> indexRandom(int nThreads, int iterations, int range, Directory dir) throws IOException, InterruptedException {
Map<String,Document> docs = new HashMap<String,Document>();
for(int iter=0;iter<3;iter++) {
IndexWriter w = new MockIndexWriter(dir, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.UNLIMITED);
w.setUseCompoundFile(false);
@ -217,14 +217,14 @@ public class TestStressIndexing2 extends LuceneTestCase {
}
public static void indexSerial(Map docs, Directory dir) throws IOException {
public static void indexSerial(Map<String,Document> docs, Directory dir) throws IOException {
IndexWriter w = new IndexWriter(dir, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.UNLIMITED);
// index all docs in a single thread
Iterator iter = docs.values().iterator();
Iterator<Document> iter = docs.values().iterator();
while (iter.hasNext()) {
Document d = (Document)iter.next();
ArrayList fields = new ArrayList();
Document d = iter.next();
ArrayList<Fieldable> fields = new ArrayList<Fieldable>();
fields.addAll(d.getFields());
// put fields in same order each time
Collections.sort(fields, fieldNameComparator);
@ -232,7 +232,7 @@ public class TestStressIndexing2 extends LuceneTestCase {
Document d1 = new Document();
d1.setBoost(d.getBoost());
for (int i=0; i<fields.size(); i++) {
d1.add((Fieldable) fields.get(i));
d1.add(fields.get(i));
}
w.addDocument(d1);
// System.out.println("indexing "+d1);
@ -391,8 +391,8 @@ public class TestStressIndexing2 extends LuceneTestCase {
}
public static void verifyEquals(Document d1, Document d2) {
List ff1 = d1.getFields();
List ff2 = d2.getFields();
List<Fieldable> ff1 = d1.getFields();
List<Fieldable> ff2 = d2.getFields();
Collections.sort(ff1, fieldNameComparator);
Collections.sort(ff2, fieldNameComparator);
@ -405,8 +405,8 @@ public class TestStressIndexing2 extends LuceneTestCase {
for (int i=0; i<ff1.size(); i++) {
Fieldable f1 = (Fieldable)ff1.get(i);
Fieldable f2 = (Fieldable)ff2.get(i);
Fieldable f1 = ff1.get(i);
Fieldable f2 = ff2.get(i);
if (f1.isBinary()) {
assert(f2.isBinary());
//TODO
@ -480,7 +480,7 @@ public class TestStressIndexing2 extends LuceneTestCase {
int base;
int range;
int iterations;
Map docs = new HashMap(); // Map<String,Document>
Map<String,Document> docs = new HashMap<String,Document>();
Random r;
public int nextInt(int lim) {
@ -561,7 +561,7 @@ public class TestStressIndexing2 extends LuceneTestCase {
public void indexDoc() throws IOException {
Document d = new Document();
ArrayList fields = new ArrayList();
ArrayList<Field> fields = new ArrayList<Field>();
String idString = getIdString();
Field idField = new Field(idTerm.field(), idString, Field.Store.YES, Field.Index.NOT_ANALYZED_NO_NORMS);
fields.add(idField);
@ -609,7 +609,7 @@ public class TestStressIndexing2 extends LuceneTestCase {
}
for (int i=0; i<fields.size(); i++) {
d.add((Fieldable) fields.get(i));
d.add(fields.get(i));
}
w.updateDocument(idTerm.createTerm(idString), d);
// System.out.println("indexing "+d);

View File

@ -25,7 +25,6 @@ import java.util.Map;
import java.util.SortedSet;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.Token;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
@ -258,13 +257,13 @@ public class TestTermVectorsReader extends LuceneTestCase {
assertTrue(reader != null);
SortedTermVectorMapper mapper = new SortedTermVectorMapper(new TermVectorEntryFreqSortedComparator());
reader.get(0, mapper);
SortedSet set = mapper.getTermVectorEntrySet();
SortedSet<TermVectorEntry> set = mapper.getTermVectorEntrySet();
assertTrue("set is null and it shouldn't be", set != null);
//three fields, 4 terms, all terms are the same
assertTrue("set Size: " + set.size() + " is not: " + 4, set.size() == 4);
//Check offsets and positions
for (Iterator iterator = set.iterator(); iterator.hasNext();) {
TermVectorEntry tve = (TermVectorEntry) iterator.next();
for (Iterator<TermVectorEntry> iterator = set.iterator(); iterator.hasNext();) {
TermVectorEntry tve = iterator.next();
assertTrue("tve is null and it shouldn't be", tve != null);
assertTrue("tve.getOffsets() is null and it shouldn't be", tve.getOffsets() != null);
assertTrue("tve.getPositions() is null and it shouldn't be", tve.getPositions() != null);
@ -278,8 +277,8 @@ public class TestTermVectorsReader extends LuceneTestCase {
//three fields, 4 terms, all terms are the same
assertTrue("set Size: " + set.size() + " is not: " + 4, set.size() == 4);
//Should have offsets and positions b/c we are munging all the fields together
for (Iterator iterator = set.iterator(); iterator.hasNext();) {
TermVectorEntry tve = (TermVectorEntry) iterator.next();
for (Iterator<TermVectorEntry> iterator = set.iterator(); iterator.hasNext();) {
TermVectorEntry tve = iterator.next();
assertTrue("tve is null and it shouldn't be", tve != null);
assertTrue("tve.getOffsets() is null and it shouldn't be", tve.getOffsets() != null);
assertTrue("tve.getPositions() is null and it shouldn't be", tve.getPositions() != null);
@ -289,14 +288,12 @@ public class TestTermVectorsReader extends LuceneTestCase {
FieldSortedTermVectorMapper fsMapper = new FieldSortedTermVectorMapper(new TermVectorEntryFreqSortedComparator());
reader.get(0, fsMapper);
Map map = fsMapper.getFieldToTerms();
Map<String,SortedSet<TermVectorEntry>> map = fsMapper.getFieldToTerms();
assertTrue("map Size: " + map.size() + " is not: " + testFields.length, map.size() == testFields.length);
for (Iterator iterator = map.entrySet().iterator(); iterator.hasNext();) {
Map.Entry entry = (Map.Entry) iterator.next();
SortedSet sortedSet = (SortedSet) entry.getValue();
for (Map.Entry<String,SortedSet<TermVectorEntry>> entry : map.entrySet()) {
SortedSet<TermVectorEntry> sortedSet = entry.getValue();
assertTrue("sortedSet Size: " + sortedSet.size() + " is not: " + 4, sortedSet.size() == 4);
for (Iterator inner = sortedSet.iterator(); inner.hasNext();) {
TermVectorEntry tve = (TermVectorEntry) inner.next();
for (final TermVectorEntry tve : sortedSet) {
assertTrue("tve is null and it shouldn't be", tve != null);
//Check offsets and positions.
assertTrue("tve is null and it shouldn't be", tve != null);
@ -320,12 +317,10 @@ public class TestTermVectorsReader extends LuceneTestCase {
reader.get(0, fsMapper);
map = fsMapper.getFieldToTerms();
assertTrue("map Size: " + map.size() + " is not: " + testFields.length, map.size() == testFields.length);
for (Iterator iterator = map.entrySet().iterator(); iterator.hasNext();) {
Map.Entry entry = (Map.Entry) iterator.next();
SortedSet sortedSet = (SortedSet) entry.getValue();
for (final Map.Entry<String,SortedSet<TermVectorEntry>> entry : map.entrySet()) {
SortedSet<TermVectorEntry> sortedSet = entry.getValue();
assertTrue("sortedSet Size: " + sortedSet.size() + " is not: " + 4, sortedSet.size() == 4);
for (Iterator inner = sortedSet.iterator(); inner.hasNext();) {
TermVectorEntry tve = (TermVectorEntry) inner.next();
for (final TermVectorEntry tve : sortedSet) {
assertTrue("tve is null and it shouldn't be", tve != null);
//Check offsets and positions.
assertTrue("tve is null and it shouldn't be", tve != null);

View File

@ -54,12 +54,12 @@ public class TestTransactionRollback extends LuceneTestCase {
// System.out.println("Attempting to rollback to "+id);
String ids="-"+id;
IndexCommit last=null;
Collection commits = IndexReader.listCommits(dir);
for (Iterator iterator = commits.iterator(); iterator.hasNext();) {
IndexCommit commit = (IndexCommit) iterator.next();
Map ud=commit.getUserData();
Collection<IndexCommit> commits = IndexReader.listCommits(dir);
for (Iterator<IndexCommit> iterator = commits.iterator(); iterator.hasNext();) {
IndexCommit commit = iterator.next();
Map<String,String> ud=commit.getUserData();
if (ud.size() > 0)
if (((String) ud.get("index")).endsWith(ids))
if (ud.get("index").endsWith(ids))
last=commit;
}
@ -68,7 +68,7 @@ public class TestTransactionRollback extends LuceneTestCase {
IndexWriter w = new IndexWriter(dir, new WhitespaceAnalyzer(),
new RollbackDeletionPolicy(id), MaxFieldLength.UNLIMITED, last);
Map data = new HashMap();
Map<String,String> data = new HashMap<String,String>();
data.put("index", "Rolled back to 1-"+id);
w.commit(data);
w.close();
@ -135,7 +135,7 @@ public class TestTransactionRollback extends LuceneTestCase {
w.addDocument(doc);
if (currentRecordId%10 == 0) {
Map data = new HashMap();
Map<String,String> data = new HashMap<String,String>();
data.put("index", "records 1-"+currentRecordId);
w.commit(data);
}
@ -152,18 +152,17 @@ public class TestTransactionRollback extends LuceneTestCase {
this.rollbackPoint = rollbackPoint;
}
public void onCommit(List commits) throws IOException {
public void onCommit(List<? extends IndexCommit> commits) throws IOException {
}
public void onInit(List commits) throws IOException {
for (Iterator iterator = commits.iterator(); iterator.hasNext();) {
IndexCommit commit = (IndexCommit) iterator.next();
Map userData=commit.getUserData();
public void onInit(List<? extends IndexCommit> commits) throws IOException {
for (final IndexCommit commit : commits) {
Map<String,String> userData=commit.getUserData();
if (userData.size() > 0) {
// Label for a commit point is "Records 1-30"
// This code reads the last id ("30" in this example) and deletes it
// if it is after the desired rollback point
String x = (String) userData.get("index");
String x = userData.get("index");
String lastVal = x.substring(x.lastIndexOf("-")+1);
int last = Integer.parseInt(lastVal);
if (last>rollbackPoint) {
@ -186,10 +185,10 @@ public class TestTransactionRollback extends LuceneTestCase {
class DeleteLastCommitPolicy implements IndexDeletionPolicy {
public void onCommit(List commits) throws IOException {}
public void onCommit(List<? extends IndexCommit> commits) throws IOException {}
public void onInit(List commits) throws IOException {
((IndexCommit) commits.get(commits.size()-1)).delete();
public void onInit(List<? extends IndexCommit> commits) throws IOException {
commits.get(commits.size()-1).delete();
}
}
@ -208,7 +207,7 @@ public class TestTransactionRollback extends LuceneTestCase {
// Keeps all commit points (used to build index)
class KeepAllDeletionPolicy implements IndexDeletionPolicy {
public void onCommit(List commits) throws IOException {}
public void onInit(List commits) throws IOException {}
public void onCommit(List<? extends IndexCommit> commits) throws IOException {}
public void onInit(List<? extends IndexCommit> commits) throws IOException {}
}
}

View File

@ -217,6 +217,6 @@ public class TestTransactions extends LuceneTestCase
threads[i].join();
for(int i=0;i<numThread;i++)
assertTrue(!((TimedThread) threads[i]).failed);
assertTrue(!threads[i].failed);
}
}

View File

@ -30,22 +30,22 @@ public class TestWordlistLoader extends LuceneTestCase {
public void testWordlistLoading() throws IOException {
String s = "ONE\n two \nthree";
HashSet wordSet1 = WordlistLoader.getWordSet(new StringReader(s));
HashSet<String> wordSet1 = WordlistLoader.getWordSet(new StringReader(s));
checkSet(wordSet1);
HashSet wordSet2 = WordlistLoader.getWordSet(new BufferedReader(new StringReader(s)));
HashSet<String> wordSet2 = WordlistLoader.getWordSet(new BufferedReader(new StringReader(s)));
checkSet(wordSet2);
}
public void testComments() throws Exception {
String s = "ONE\n two \nthree\n#comment";
HashSet wordSet1 = WordlistLoader.getWordSet(new StringReader(s), "#");
HashSet<String> wordSet1 = WordlistLoader.getWordSet(new StringReader(s), "#");
checkSet(wordSet1);
assertFalse(wordSet1.contains("#comment"));
assertFalse(wordSet1.contains("comment"));
}
private void checkSet(HashSet wordset) {
private void checkSet(HashSet<String> wordset) {
assertEquals(3, wordset.size());
assertTrue(wordset.contains("ONE")); // case is not modified
assertTrue(wordset.contains("two")); // surrounding whitespace is removed

View File

@ -17,7 +17,6 @@ package org.apache.lucene.queryParser;
* limitations under the License.
*/
import java.io.IOException;
import java.io.Reader;
import org.apache.lucene.analysis.Analyzer;

View File

@ -130,7 +130,7 @@ public class TestMultiFieldQueryParser extends LuceneTestCase {
}
public void testBoostsSimple() throws Exception {
Map boosts = new HashMap();
Map<String,Float> boosts = new HashMap<String,Float>();
boosts.put("b", Float.valueOf(5));
boosts.put("t", Float.valueOf(10));
String[] fields = {"b", "t"};
@ -218,7 +218,6 @@ public class TestMultiFieldQueryParser extends LuceneTestCase {
String[] fields = {"b", "t"};
//int[] flags = {MultiFieldQueryParser.REQUIRED_FIELD, MultiFieldQueryParser.PROHIBITED_FIELD};
BooleanClause.Occur[] flags = {BooleanClause.Occur.MUST, BooleanClause.Occur.MUST_NOT};
MultiFieldQueryParser parser = new MultiFieldQueryParser(Version.LUCENE_CURRENT, fields, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT));
Query q = MultiFieldQueryParser.parse(Version.LUCENE_CURRENT, "one", fields, flags, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT));//, fields, flags, new StandardAnalyzer());
assertEquals("+b:one -t:one", q.toString());

View File

@ -72,7 +72,7 @@ import org.apache.lucene.util.Version;
public class TestQueryParser extends LocalizedTestCase {
public TestQueryParser(String name) {
super(name, new HashSet(Arrays.asList(
super(name, new HashSet<String>(Arrays.asList(
"testLegacyDateRange", "testDateRange",
"testCJK", "testNumber", "testFarsiRangeCollating",
"testLocalDateFormat"
@ -798,7 +798,7 @@ public class TestQueryParser extends LocalizedTestCase {
public void testBoost()
throws Exception {
Set stopWords = new HashSet(1);
Set<Object> stopWords = new HashSet<Object>(1);
stopWords.add("on");
StandardAnalyzer oneStopAnalyzer = new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT, stopWords);
QueryParser qp = new QueryParser(Version.LUCENE_CURRENT, "field", oneStopAnalyzer);

View File

@ -18,7 +18,6 @@ package org.apache.lucene.search;
*/
import java.io.IOException;
import java.util.BitSet;
import java.util.WeakHashMap;
import junit.framework.TestCase;
@ -46,11 +45,11 @@ public class CachingWrapperFilterHelper extends CachingWrapperFilter {
@Override
public DocIdSet getDocIdSet(IndexReader reader) throws IOException {
if (cache == null) {
cache = new WeakHashMap();
cache = new WeakHashMap<IndexReader,DocIdSet>();
}
synchronized (cache) { // check cache
DocIdSet cached = (DocIdSet) cache.get(reader);
DocIdSet cached = cache.get(reader);
if (shouldHaveCache) {
TestCase.assertNotNull("Cache should have data ", cached);
} else {

View File

@ -45,7 +45,7 @@ public class CheckHits {
throws IOException {
String d = q.toString(defaultFieldName);
Set ignore = new TreeSet();
Set<Integer> ignore = new TreeSet<Integer>();
for (int i = 0; i < results.length; i++) {
ignore.add(Integer.valueOf(results[i]));
}
@ -85,11 +85,11 @@ public class CheckHits {
QueryUtils.check(query,searcher);
Set correct = new TreeSet();
Set<Integer> correct = new TreeSet<Integer>();
for (int i = 0; i < results.length; i++) {
correct.add(Integer.valueOf(results[i]));
}
final Set actual = new TreeSet();
final Set<Integer> actual = new TreeSet<Integer>();
final Collector c = new SetCollector(actual);
searcher.search(query, c);
@ -117,8 +117,8 @@ public class CheckHits {
}
public static class SetCollector extends Collector {
final Set bag;
public SetCollector(Set bag) {
final Set<Integer> bag;
public SetCollector(Set<Integer> bag) {
this.bag = bag;
}
private int base = 0;
@ -161,12 +161,12 @@ public class CheckHits {
ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs;
Set correct = new TreeSet();
Set<Integer> correct = new TreeSet<Integer>();
for (int i = 0; i < results.length; i++) {
correct.add(Integer.valueOf(results[i]));
}
Set actual = new TreeSet();
Set<Integer> actual = new TreeSet<Integer>();
for (int i = 0; i < hits.length; i++) {
actual.add(Integer.valueOf(hits[i].doc));
}

View File

@ -419,9 +419,9 @@ final class JustCompileSearch {
}
}
static final class JustCompileTopDocsCollector extends TopDocsCollector {
static final class JustCompileTopDocsCollector extends TopDocsCollector<ScoreDoc> {
protected JustCompileTopDocsCollector(PriorityQueue pq) {
protected JustCompileTopDocsCollector(PriorityQueue<ScoreDoc> pq) {
super(pq);
}

View File

@ -352,7 +352,7 @@ public class QueryUtils {
List<IndexReader> readerList = new ArrayList<IndexReader>();
ReaderUtil.gatherSubReaders(readerList, s.getIndexReader());
IndexReader[] readers = (IndexReader[]) readerList.toArray(new IndexReader[0]);
IndexReader[] readers = readerList.toArray(new IndexReader[0]);
for(int i = 0; i < readers.length; i++) {
IndexReader reader = readers[i];
Weight w = q.weight(s);
@ -413,7 +413,7 @@ public class QueryUtils {
List<IndexReader> readerList = new ArrayList<IndexReader>();
ReaderUtil.gatherSubReaders(readerList, s.getIndexReader());
IndexReader[] readers = (IndexReader[]) readerList.toArray(new IndexReader[0]);
IndexReader[] readers = readerList.toArray(new IndexReader[0]);
for(int i = 0; i < readers.length; i++) {
IndexReader reader = readers[i];
Weight w = q.weight(s);

View File

@ -18,24 +18,6 @@ package org.apache.lucene.search;
*/
import org.apache.lucene.store.RAMDirectory;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.Term;
import org.apache.lucene.analysis.WhitespaceAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.queryParser.QueryParser;
import org.apache.lucene.queryParser.ParseException;
import junit.framework.TestCase;
import java.util.Random;
import java.util.BitSet;
/**
* subclass of TestSimpleExplanations that verifies non matches.

View File

@ -153,7 +153,7 @@ implements Serializable {
// make a query without sorting first
ScoreDoc[] hitsByRank = searcher.search(query, null, 1000).scoreDocs;
checkHits(hitsByRank, "Sort by rank: "); // check for duplicates
Map resultMap = new TreeMap();
Map<Integer,Integer> resultMap = new TreeMap<Integer,Integer>();
// store hits in TreeMap - TreeMap does not allow duplicates; existing entries are silently overwritten
for(int hitid=0;hitid<hitsByRank.length; ++hitid) {
resultMap.put(
@ -190,7 +190,7 @@ implements Serializable {
*/
private void checkHits(ScoreDoc[] hits, String prefix) {
if(hits!=null) {
Map idMap = new TreeMap();
Map<Integer,Integer> idMap = new TreeMap<Integer,Integer>();
for(int docnum=0;docnum<hits.length;++docnum) {
Integer luceneId = null;
@ -200,7 +200,7 @@ implements Serializable {
message.append("Duplicate key for hit index = ");
message.append(docnum);
message.append(", previous index = ");
message.append(((Integer)idMap.get(luceneId)).toString());
message.append((idMap.get(luceneId)).toString());
message.append(", Lucene ID = ");
message.append(luceneId);
log(message.toString());

View File

@ -77,7 +77,7 @@ public class TestDocIdSet extends LuceneTestCase {
};
DocIdSetIterator iter = filteredSet.iterator();
ArrayList/*<Integer>*/ list = new ArrayList/*<Integer>*/();
ArrayList<Integer> list = new ArrayList<Integer>();
int doc = iter.advance(3);
if (doc != DocIdSetIterator.NO_MORE_DOCS) {
list.add(Integer.valueOf(doc));
@ -88,9 +88,9 @@ public class TestDocIdSet extends LuceneTestCase {
int[] docs = new int[list.size()];
int c=0;
Iterator/*<Integer>*/ intIter = list.iterator();
Iterator<Integer> intIter = list.iterator();
while(intIter.hasNext()) {
docs[c++] = ((Integer) intIter.next()).intValue();
docs[c++] = intIter.next().intValue();
}
int[] answer = new int[]{4,6,8};
boolean same = Arrays.equals(answer, docs);

View File

@ -30,7 +30,7 @@ import java.util.Map;
public class TestElevationComparator extends LuceneTestCase {
private final Map/*<String, Integer>*/ priority = new HashMap/*<String, Integer>*/();
private final Map<String,Integer> priority = new HashMap<String,Integer>();
//@Test
public void testSorting() throws Throwable {
@ -126,9 +126,9 @@ public class TestElevationComparator extends LuceneTestCase {
}
class ElevationComparatorSource extends FieldComparatorSource {
private final Map/*<String, Integer>*/ priority;
private final Map<String,Integer> priority;
public ElevationComparatorSource(final Map/*<String, Integer>*/ boosts) {
public ElevationComparatorSource(final Map<String,Integer> boosts) {
this.priority = boosts;
}
@ -152,7 +152,7 @@ class ElevationComparatorSource extends FieldComparatorSource {
private int docVal(int doc) throws IOException {
String id = idIndex.lookup[idIndex.order[doc]];
Integer prio = (Integer) priority.get(id);
Integer prio = priority.get(id);
return prio == null ? 0 : prio.intValue();
}

View File

@ -18,8 +18,7 @@ package org.apache.lucene.search;
*/
import java.io.IOException;
import java.text.Collator;
import java.util.Locale;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.Term;
@ -66,7 +65,7 @@ public class TestFieldCacheRangeFilter extends BaseTestRangeFilter {
Query q = new TermQuery(new Term("body","body"));
// test id, bounded on both ends
FieldCacheRangeFilter fcrf;
FieldCacheRangeFilter<String> fcrf;
result = search.search(q,fcrf = FieldCacheRangeFilter.newStringRange("id",minIP,maxIP,T,T), numDocs).scoreDocs;
assertTrue(fcrf.getDocIdSet(reader.getSequentialSubReaders()[0]).isCacheable());
assertEquals("find all", numDocs, result.length);
@ -213,7 +212,7 @@ public class TestFieldCacheRangeFilter extends BaseTestRangeFilter {
Query q = new TermQuery(new Term("body","body"));
// test id, bounded on both ends
FieldCacheRangeFilter fcrf;
FieldCacheRangeFilter<Short> fcrf;
result = search.search(q,fcrf=FieldCacheRangeFilter.newShortRange("id",minIdO,maxIdO,T,T), numDocs).scoreDocs;
assertTrue(fcrf.getDocIdSet(reader.getSequentialSubReaders()[0]).isCacheable());
assertEquals("find all", numDocs, result.length);
@ -305,7 +304,7 @@ public class TestFieldCacheRangeFilter extends BaseTestRangeFilter {
// test id, bounded on both ends
FieldCacheRangeFilter fcrf;
FieldCacheRangeFilter<Integer> fcrf;
result = search.search(q,fcrf=FieldCacheRangeFilter.newIntRange("id",minIdO,maxIdO,T,T), numDocs).scoreDocs;
assertTrue(fcrf.getDocIdSet(reader.getSequentialSubReaders()[0]).isCacheable());
assertEquals("find all", numDocs, result.length);
@ -397,7 +396,7 @@ public class TestFieldCacheRangeFilter extends BaseTestRangeFilter {
// test id, bounded on both ends
FieldCacheRangeFilter fcrf;
FieldCacheRangeFilter<Long> fcrf;
result = search.search(q,fcrf=FieldCacheRangeFilter.newLongRange("id",minIdO,maxIdO,T,T), numDocs).scoreDocs;
assertTrue(fcrf.getDocIdSet(reader.getSequentialSubReaders()[0]).isCacheable());
assertEquals("find all", numDocs, result.length);
@ -550,7 +549,7 @@ public class TestFieldCacheRangeFilter extends BaseTestRangeFilter {
assertTrue(reader.hasDeletions());
ScoreDoc[] result;
FieldCacheRangeFilter fcrf;
FieldCacheRangeFilter<Byte> fcrf;
Query q = new TermQuery(new Term("body","body"));
result = search.search(q,fcrf=FieldCacheRangeFilter.newByteRange("id",Byte.valueOf((byte) -20),Byte.valueOf((byte) 20),T,T), 100).scoreDocs;

View File

@ -54,20 +54,20 @@ public class TestFieldCacheTermsFilter extends LuceneTestCase {
ScoreDoc[] results;
MatchAllDocsQuery q = new MatchAllDocsQuery();
List terms = new ArrayList();
List<String> terms = new ArrayList<String>();
terms.add("5");
results = searcher.search(q, new FieldCacheTermsFilter(fieldName, (String[]) terms.toArray(new String[0])), numDocs).scoreDocs;
results = searcher.search(q, new FieldCacheTermsFilter(fieldName, terms.toArray(new String[0])), numDocs).scoreDocs;
assertEquals("Must match nothing", 0, results.length);
terms = new ArrayList();
terms = new ArrayList<String>();
terms.add("10");
results = searcher.search(q, new FieldCacheTermsFilter(fieldName, (String[]) terms.toArray(new String[0])), numDocs).scoreDocs;
results = searcher.search(q, new FieldCacheTermsFilter(fieldName, terms.toArray(new String[0])), numDocs).scoreDocs;
assertEquals("Must match 1", 1, results.length);
terms = new ArrayList();
terms = new ArrayList<String>();
terms.add("10");
terms.add("20");
results = searcher.search(q, new FieldCacheTermsFilter(fieldName, (String[]) terms.toArray(new String[0])), numDocs).scoreDocs;
results = searcher.search(q, new FieldCacheTermsFilter(fieldName, terms.toArray(new String[0])), numDocs).scoreDocs;
assertEquals("Must match 2", 2, results.length);
reader.close();

View File

@ -17,7 +17,6 @@ package org.apache.lucene.search;
* limitations under the License.
*/
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.index.Term;
import org.apache.lucene.index.TermEnum;
import org.apache.lucene.index.IndexReader;
@ -31,7 +30,6 @@ import org.apache.lucene.document.Field;
import org.apache.lucene.util.LuceneTestCase;
import java.io.IOException;
import java.util.HashSet;
import java.util.LinkedList;
import java.util.Collections;
@ -68,7 +66,7 @@ public class TestMultiPhraseQuery extends LuceneTestCase
query1.add(new Term("body", "blueberry"));
query2.add(new Term("body", "strawberry"));
LinkedList termsWithPrefix = new LinkedList();
LinkedList<Term> termsWithPrefix = new LinkedList<Term>();
IndexReader ir = IndexReader.open(indexStore, true);
// this TermEnum gives "piccadilly", "pie" and "pizza".
@ -81,9 +79,9 @@ public class TestMultiPhraseQuery extends LuceneTestCase
}
} while (te.next());
query1.add((Term[])termsWithPrefix.toArray(new Term[0]));
query1.add(termsWithPrefix.toArray(new Term[0]));
assertEquals("body:\"blueberry (piccadilly pie pizza)\"", query1.toString());
query2.add((Term[])termsWithPrefix.toArray(new Term[0]));
query2.add(termsWithPrefix.toArray(new Term[0]));
assertEquals("body:\"strawberry (piccadilly pie pizza)\"", query2.toString());
ScoreDoc[] result;
@ -103,7 +101,7 @@ public class TestMultiPhraseQuery extends LuceneTestCase
termsWithPrefix.add(te.term());
}
} while (te.next());
query3.add((Term[])termsWithPrefix.toArray(new Term[0]));
query3.add(termsWithPrefix.toArray(new Term[0]));
query3.add(new Term("body", "pizza"));
result = searcher.search(query3, null, 1000).scoreDocs;

View File

@ -253,9 +253,9 @@ public class TestMultiSearcher extends LuceneTestCase
assertTrue("document.getFields() Size: " + document.getFields().size() + " is not: " + 2, document.getFields().size() == 2);
//Should be one document from each directory
//they both have two fields, contents and other
Set ftl = new HashSet();
Set<String> ftl = new HashSet<String>();
ftl.add("other");
SetBasedFieldSelector fs = new SetBasedFieldSelector(ftl, Collections.EMPTY_SET);
SetBasedFieldSelector fs = new SetBasedFieldSelector(ftl, Collections. <String> emptySet());
document = searcher.doc(hits[0].doc, fs);
assertTrue("document is null and it shouldn't be", document != null);
assertTrue("document.getFields() Size: " + document.getFields().size() + " is not: " + 1, document.getFields().size() == 1);
@ -265,7 +265,7 @@ public class TestMultiSearcher extends LuceneTestCase
assertTrue("value is null and it shouldn't be", value != null);
ftl.clear();
ftl.add("contents");
fs = new SetBasedFieldSelector(ftl, Collections.EMPTY_SET);
fs = new SetBasedFieldSelector(ftl, Collections. <String> emptySet());
document = searcher.doc(hits[1].doc, fs);
value = document.get("contents");
assertTrue("value is null and it shouldn't be", value != null);

View File

@ -76,7 +76,7 @@ public class TestPhrasePrefixQuery
query1.add(new Term("body", "blueberry"));
query2.add(new Term("body", "strawberry"));
LinkedList termsWithPrefix = new LinkedList();
LinkedList<Term> termsWithPrefix = new LinkedList<Term>();
IndexReader ir = IndexReader.open(indexStore, true);
// this TermEnum gives "piccadilly", "pie" and "pizza".
@ -89,8 +89,8 @@ public class TestPhrasePrefixQuery
}
} while (te.next());
query1.add((Term[])termsWithPrefix.toArray(new Term[0]));
query2.add((Term[])termsWithPrefix.toArray(new Term[0]));
query1.add(termsWithPrefix.toArray(new Term[0]));
query2.add(termsWithPrefix.toArray(new Term[0]));
ScoreDoc[] result;
result = searcher.search(query1, null, 1000).scoreDocs;

View File

@ -66,7 +66,7 @@ public class TestPositiveScoresOnlyCollector extends LuceneTestCase {
}
Scorer s = new SimpleScorer();
TopDocsCollector tdc = TopScoreDocCollector.create(scores.length, true);
TopDocsCollector<ScoreDoc> tdc = TopScoreDocCollector.create(scores.length, true);
Collector c = new PositiveScoresOnlyCollector(tdc);
c.setScorer(s);
while (s.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) {

View File

@ -18,24 +18,7 @@ package org.apache.lucene.search;
*/
import org.apache.lucene.store.RAMDirectory;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.Term;
import org.apache.lucene.analysis.WhitespaceAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.queryParser.QueryParser;
import org.apache.lucene.queryParser.ParseException;
import junit.framework.TestCase;
import java.util.Random;
import java.util.BitSet;
/**
* subclass of TestSimpleExplanations that verifies non matches.

View File

@ -634,9 +634,9 @@ public class TestSort extends LuceneTestCase implements Serializable {
public void testNormalizedScores() throws Exception {
// capture relevancy scores
HashMap scoresX = getScores (full.search (queryX, null, 1000).scoreDocs, full);
HashMap scoresY = getScores (full.search (queryY, null, 1000).scoreDocs, full);
HashMap scoresA = getScores (full.search (queryA, null, 1000).scoreDocs, full);
HashMap<String,Float> scoresX = getScores (full.search (queryX, null, 1000).scoreDocs, full);
HashMap<String,Float> scoresY = getScores (full.search (queryY, null, 1000).scoreDocs, full);
HashMap<String,Float> scoresA = getScores (full.search (queryA, null, 1000).scoreDocs, full);
// we'll test searching locally, remote and multi
@ -977,9 +977,9 @@ public class TestSort extends LuceneTestCase implements Serializable {
assertEquals (expectedResult, buff.toString());
}
private HashMap getScores (ScoreDoc[] hits, Searcher searcher)
private HashMap<String,Float> getScores (ScoreDoc[] hits, Searcher searcher)
throws IOException {
HashMap scoreMap = new HashMap();
HashMap<String,Float> scoreMap = new HashMap<String,Float>();
int n = hits.length;
for (int i=0; i<n; ++i) {
Document doc = searcher.doc(hits[i].doc);
@ -991,15 +991,15 @@ public class TestSort extends LuceneTestCase implements Serializable {
}
// make sure all the values in the maps match
private void assertSameValues (HashMap m1, HashMap m2) {
private <K, V> void assertSameValues (HashMap<K,V> m1, HashMap<K,V> m2) {
int n = m1.size();
int m = m2.size();
assertEquals (n, m);
Iterator iter = m1.keySet().iterator();
Iterator<K> iter = m1.keySet().iterator();
while (iter.hasNext()) {
Object key = iter.next();
Object o1 = m1.get(key);
Object o2 = m2.get(key);
K key = iter.next();
V o1 = m1.get(key);
V o2 = m2.get(key);
if (o1 instanceof Float) {
assertEquals(((Float)o1).floatValue(), ((Float)o2).floatValue(), 1e-6);
} else {

View File

@ -16,7 +16,6 @@ package org.apache.lucene.search;
* limitations under the License.
*/
import java.util.Iterator;
import java.util.List;
import org.apache.lucene.analysis.SimpleAnalyzer;
@ -58,12 +57,11 @@ public class TestSpanQueryFilter extends LuceneTestCase {
DocIdSet docIdSet = result.getDocIdSet();
assertTrue("docIdSet is null and it shouldn't be", docIdSet != null);
assertContainsDocId("docIdSet doesn't contain docId 10", docIdSet, 10);
List spans = result.getPositions();
List<SpanFilterResult.PositionInfo> spans = result.getPositions();
assertTrue("spans is null and it shouldn't be", spans != null);
int size = getDocIdSetSize(docIdSet);
assertTrue("spans Size: " + spans.size() + " is not: " + size, spans.size() == size);
for (Iterator iterator = spans.iterator(); iterator.hasNext();) {
SpanFilterResult.PositionInfo info = (SpanFilterResult.PositionInfo) iterator.next();
for (final SpanFilterResult.PositionInfo info: spans) {
assertTrue("info is null and it shouldn't be", info != null);
//The doc should indicate the bit is on
assertContainsDocId("docIdSet doesn't contain docId " + info.getDoc(), docIdSet, info.getDoc());

View File

@ -21,7 +21,6 @@ import org.apache.lucene.analysis.WhitespaceAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.Term;
import org.apache.lucene.store.RAMDirectory;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.TokenStream;

View File

@ -76,7 +76,7 @@ public class TestTermScorer extends LuceneTestCase
indexReader.termDocs(allTerm), indexSearcher.getSimilarity(),
indexReader.norms(FIELD));
//we have 2 documents with the term all in them, one document for all the other values
final List docs = new ArrayList();
final List<TestHit> docs = new ArrayList<TestHit>();
//must call next first
@ -107,8 +107,8 @@ public class TestTermScorer extends LuceneTestCase
}
});
assertTrue("docs Size: " + docs.size() + " is not: " + 2, docs.size() == 2);
TestHit doc0 = (TestHit) docs.get(0);
TestHit doc5 = (TestHit) docs.get(1);
TestHit doc0 = docs.get(0);
TestHit doc5 = docs.get(1);
//The scores should be the same
assertTrue(doc0.score + " does not equal: " + doc5.score, doc0.score == doc5.score);
/*

View File

@ -28,7 +28,6 @@ import org.apache.lucene.util.English;
import java.io.IOException;
import java.util.HashMap;
import java.util.Iterator;
import java.util.Map;
import java.util.SortedSet;
@ -167,7 +166,6 @@ public class TestTermVectors extends LuceneTestCase {
}
else{
try{
TermPositionVector posVec = (TermPositionVector)vector[0];
assertTrue(false);
}
catch(ClassCastException ignore){
@ -208,7 +206,7 @@ public class TestTermVectors extends LuceneTestCase {
String test2 = "computer in a computer lab"; //5 terms
String test3 = "a chocolate lab grows old"; //5 terms
String test4 = "eating chocolate with a chocolate lab in an old chocolate colored computer lab"; //13 terms
Map test4Map = new HashMap();
Map<String,Integer> test4Map = new HashMap<String,Integer>();
test4Map.put("chocolate", Integer.valueOf(3));
test4Map.put("lab", Integer.valueOf(2));
test4Map.put("eating", Integer.valueOf(1));
@ -246,7 +244,7 @@ public class TestTermVectors extends LuceneTestCase {
TermDocs termDocs = knownSearcher.reader.termDocs();
//System.out.println("Terms: " + termEnum.size() + " Orig Len: " + termArray.length);
Similarity sim = knownSearcher.getSimilarity();
//Similarity sim = knownSearcher.getSimilarity();
while (termEnum.next() == true)
{
Term term = termEnum.term();
@ -258,11 +256,11 @@ public class TestTermVectors extends LuceneTestCase {
int freq = termDocs.freq();
//System.out.println("Doc Id: " + docId + " freq " + freq);
TermFreqVector vector = knownSearcher.reader.getTermFreqVector(docId, "field");
float tf = sim.tf(freq);
float idf = sim.idf(knownSearcher.docFreq(term), knownSearcher.maxDoc());
//float tf = sim.tf(freq);
//float idf = sim.idf(knownSearcher.docFreq(term), knownSearcher.maxDoc());
//float qNorm = sim.queryNorm()
//This is fine since we don't have stop words
float lNorm = sim.lengthNorm("field", vector.getTerms().length);
//float lNorm = sim.lengthNorm("field", vector.getTerms().length);
//float coord = sim.coord()
//System.out.println("TF: " + tf + " IDF: " + idf + " LenNorm: " + lNorm);
assertTrue(vector != null);
@ -283,7 +281,6 @@ public class TestTermVectors extends LuceneTestCase {
ScoreDoc[] hits = knownSearcher.search(query, null, 1000).scoreDocs;
//doc 3 should be the first hit b/c it is the shortest match
assertTrue(hits.length == 3);
float score = hits[0].score;
/*System.out.println("Hit 0: " + hits.id(0) + " Score: " + hits.score(0) + " String: " + hits.doc(0).toString());
System.out.println("Explain: " + knownSearcher.explain(query, hits.id(0)));
System.out.println("Hit 1: " + hits.id(1) + " Score: " + hits.score(1) + " String: " + hits.doc(1).toString());
@ -304,21 +301,20 @@ public class TestTermVectors extends LuceneTestCase {
//System.out.println("Term: " + term);
int freq = freqs[i];
assertTrue(test4.indexOf(term) != -1);
Integer freqInt = (Integer)test4Map.get(term);
Integer freqInt = test4Map.get(term);
assertTrue(freqInt != null);
assertTrue(freqInt.intValue() == freq);
}
SortedTermVectorMapper mapper = new SortedTermVectorMapper(new TermVectorEntryFreqSortedComparator());
knownSearcher.reader.getTermFreqVector(hits[1].doc, mapper);
SortedSet vectorEntrySet = mapper.getTermVectorEntrySet();
SortedSet<TermVectorEntry> vectorEntrySet = mapper.getTermVectorEntrySet();
assertTrue("mapper.getTermVectorEntrySet() Size: " + vectorEntrySet.size() + " is not: " + 10, vectorEntrySet.size() == 10);
TermVectorEntry last = null;
for (Iterator iterator = vectorEntrySet.iterator(); iterator.hasNext();) {
TermVectorEntry tve = (TermVectorEntry) iterator.next();
for (final TermVectorEntry tve : vectorEntrySet) {
if (tve != null && last != null)
{
assertTrue("terms are not properly sorted", last.getFrequency() >= tve.getFrequency());
Integer expectedFreq = (Integer) test4Map.get(tve.getTerm());
Integer expectedFreq = test4Map.get(tve.getTerm());
//we expect double the expectedFreq, since there are two fields with the exact same text and we are collapsing all fields
assertTrue("Frequency is not correct:", tve.getFrequency() == 2*expectedFreq.intValue());
}
@ -328,9 +324,9 @@ public class TestTermVectors extends LuceneTestCase {
FieldSortedTermVectorMapper fieldMapper = new FieldSortedTermVectorMapper(new TermVectorEntryFreqSortedComparator());
knownSearcher.reader.getTermFreqVector(hits[1].doc, fieldMapper);
Map map = fieldMapper.getFieldToTerms();
Map<String,SortedSet<TermVectorEntry>> map = fieldMapper.getFieldToTerms();
assertTrue("map Size: " + map.size() + " is not: " + 2, map.size() == 2);
vectorEntrySet = (SortedSet) map.get("field");
vectorEntrySet = map.get("field");
assertTrue("vectorEntrySet is null and it shouldn't be", vectorEntrySet != null);
assertTrue("vectorEntrySet Size: " + vectorEntrySet.size() + " is not: " + 10, vectorEntrySet.size() == 10);
knownSearcher.close();

View File

@ -85,9 +85,8 @@ public class TestThreadSafe extends LuceneTestCase {
}
);
List fields = doc.getFields();
for (int i=0; i<fields.size(); i++) {
Fieldable f = (Fieldable)fields.get(i);
List<Fieldable> fields = doc.getFields();
for (final Fieldable f : fields ) {
validateField(f);
}

View File

@ -30,7 +30,7 @@ import org.apache.lucene.util.LuceneTestCase;
public class TestTopDocsCollector extends LuceneTestCase {
private static final class MyTopsDocCollector extends TopDocsCollector {
private static final class MyTopsDocCollector extends TopDocsCollector<ScoreDoc> {
private int idx = 0;
private int base = 0;
@ -50,7 +50,7 @@ public class TestTopDocsCollector extends LuceneTestCase {
maxScore = results[0].score;
} else {
for (int i = pq.size(); i > 1; i--) { pq.pop(); }
maxScore = ((ScoreDoc) pq.pop()).score;
maxScore = pq.pop().score;
}
return new TopDocs(totalHits, results, maxScore);
@ -94,10 +94,10 @@ public class TestTopDocsCollector extends LuceneTestCase {
private Directory dir = new RAMDirectory();
private TopDocsCollector doSearch(int numResults) throws IOException {
private TopDocsCollector<ScoreDoc> doSearch(int numResults) throws IOException {
Query q = new MatchAllDocsQuery();
IndexSearcher searcher = new IndexSearcher(dir, true);
TopDocsCollector tdc = new MyTopsDocCollector(numResults);
TopDocsCollector<ScoreDoc> tdc = new MyTopsDocCollector(numResults);
searcher.search(q, tdc);
searcher.close();
return tdc;
@ -125,7 +125,7 @@ public class TestTopDocsCollector extends LuceneTestCase {
public void testInvalidArguments() throws Exception {
int numResults = 5;
TopDocsCollector tdc = doSearch(numResults);
TopDocsCollector<ScoreDoc> tdc = doSearch(numResults);
// start < 0
assertEquals(0, tdc.topDocs(-1).scoreDocs.length);
@ -145,17 +145,17 @@ public class TestTopDocsCollector extends LuceneTestCase {
}
public void testZeroResults() throws Exception {
TopDocsCollector tdc = new MyTopsDocCollector(5);
TopDocsCollector<ScoreDoc> tdc = new MyTopsDocCollector(5);
assertEquals(0, tdc.topDocs(0, 1).scoreDocs.length);
}
public void testFirstResultsPage() throws Exception {
TopDocsCollector tdc = doSearch(15);
TopDocsCollector<ScoreDoc> tdc = doSearch(15);
assertEquals(10, tdc.topDocs(0, 10).scoreDocs.length);
}
public void testSecondResultsPages() throws Exception {
TopDocsCollector tdc = doSearch(15);
TopDocsCollector<ScoreDoc> tdc = doSearch(15);
// ask for more results than are available
assertEquals(5, tdc.topDocs(10, 10).scoreDocs.length);
@ -169,12 +169,12 @@ public class TestTopDocsCollector extends LuceneTestCase {
}
public void testGetAllResults() throws Exception {
TopDocsCollector tdc = doSearch(15);
TopDocsCollector<ScoreDoc> tdc = doSearch(15);
assertEquals(15, tdc.topDocs().scoreDocs.length);
}
public void testGetResultsFromStart() throws Exception {
TopDocsCollector tdc = doSearch(15);
TopDocsCollector<ScoreDoc> tdc = doSearch(15);
// should bring all results
assertEquals(15, tdc.topDocs(0).scoreDocs.length);
@ -185,7 +185,7 @@ public class TestTopDocsCollector extends LuceneTestCase {
public void testMaxScore() throws Exception {
// ask for all results
TopDocsCollector tdc = doSearch(15);
TopDocsCollector<ScoreDoc> tdc = doSearch(15);
TopDocs td = tdc.topDocs();
assertEquals(MAX_SCORE, td.getMaxScore(), 0f);
@ -198,7 +198,7 @@ public class TestTopDocsCollector extends LuceneTestCase {
// This does not test the PQ's correctness, but whether topDocs()
// implementations return the results in decreasing score order.
public void testResultsOrder() throws Exception {
TopDocsCollector tdc = doSearch(15);
TopDocsCollector<ScoreDoc> tdc = doSearch(15);
ScoreDoc[] sd = tdc.topDocs().scoreDocs;
assertEquals(MAX_SCORE, sd[0].score, 0f);

View File

@ -59,7 +59,7 @@ public class TestTopScoreDocCollector extends LuceneTestCase {
bq.setMinimumNumberShouldMatch(1);
IndexSearcher searcher = new IndexSearcher(dir, true);
for (int i = 0; i < inOrder.length; i++) {
TopDocsCollector tdc = TopScoreDocCollector.create(3, inOrder[i]);
TopDocsCollector<ScoreDoc> tdc = TopScoreDocCollector.create(3, inOrder[i]);
assertEquals("org.apache.lucene.search.TopScoreDocCollector$" + actualTSDCClass[i], tdc.getClass().getName());
searcher.search(new MatchAllDocsQuery(), tdc);

View File

@ -19,7 +19,6 @@ package org.apache.lucene.search.function;
import java.io.IOException;
import java.util.HashMap;
import java.util.Iterator;
import org.apache.lucene.index.CorruptIndexException;
import org.apache.lucene.queryParser.QueryParser;
@ -184,11 +183,11 @@ public class TestCustomScoreQuery extends FunctionTestSetup {
TopDocs td5CustomMulAdd = s.search(q5CustomMulAdd,null,1000);
// put results in map so we can verify the scores although they have changed
HashMap h1 = topDocsToMap(td1);
HashMap h2CustomNeutral = topDocsToMap(td2CustomNeutral);
HashMap h3CustomMul = topDocsToMap(td3CustomMul);
HashMap h4CustomAdd = topDocsToMap(td4CustomAdd);
HashMap h5CustomMulAdd = topDocsToMap(td5CustomMulAdd);
HashMap<Integer,Float> h1 = topDocsToMap(td1);
HashMap<Integer,Float> h2CustomNeutral = topDocsToMap(td2CustomNeutral);
HashMap<Integer,Float> h3CustomMul = topDocsToMap(td3CustomMul);
HashMap<Integer,Float> h4CustomAdd = topDocsToMap(td4CustomAdd);
HashMap<Integer,Float> h5CustomMulAdd = topDocsToMap(td5CustomMulAdd);
verifyResults(boost, s,
h1, h2CustomNeutral, h3CustomMul, h4CustomAdd, h5CustomMulAdd,
@ -197,7 +196,7 @@ public class TestCustomScoreQuery extends FunctionTestSetup {
// verify results are as expected.
private void verifyResults(float boost, IndexSearcher s,
HashMap h1, HashMap h2customNeutral, HashMap h3CustomMul, HashMap h4CustomAdd, HashMap h5CustomMulAdd,
HashMap<Integer,Float> h1, HashMap<Integer,Float> h2customNeutral, HashMap<Integer,Float> h3CustomMul, HashMap<Integer,Float> h4CustomAdd, HashMap<Integer,Float> h5CustomMulAdd,
Query q1, Query q2, Query q3, Query q4, Query q5) throws Exception {
// verify numbers of matches
@ -214,8 +213,7 @@ public class TestCustomScoreQuery extends FunctionTestSetup {
QueryUtils.check(q5,s);
// verify scores ratios
for (Iterator it = h1.keySet().iterator(); it.hasNext();) {
Integer x = (Integer) it.next();
for (final Integer x : h1.keySet()) {
int doc = x.intValue();
log("doc = "+doc);
@ -224,22 +222,22 @@ public class TestCustomScoreQuery extends FunctionTestSetup {
log("fieldScore = "+fieldScore);
assertTrue("fieldScore should not be 0",fieldScore>0);
float score1 = ((Float)h1.get(x)).floatValue();
float score1 = h1.get(x).floatValue();
logResult("score1=", s, q1, doc, score1);
float score2 = ((Float)h2customNeutral.get(x)).floatValue();
float score2 = h2customNeutral.get(x).floatValue();
logResult("score2=", s, q2, doc, score2);
assertEquals("same score (just boosted) for neutral", boost * score1, score2, TEST_SCORE_TOLERANCE_DELTA);
float score3 = ((Float)h3CustomMul.get(x)).floatValue();
float score3 = h3CustomMul.get(x).floatValue();
logResult("score3=", s, q3, doc, score3);
assertEquals("new score for custom mul", boost * fieldScore * score1, score3, TEST_SCORE_TOLERANCE_DELTA);
float score4 = ((Float)h4CustomAdd.get(x)).floatValue();
float score4 = h4CustomAdd.get(x).floatValue();
logResult("score4=", s, q4, doc, score4);
assertEquals("new score for custom add", boost * (fieldScore + score1), score4, TEST_SCORE_TOLERANCE_DELTA);
float score5 = ((Float)h5CustomMulAdd.get(x)).floatValue();
float score5 = h5CustomMulAdd.get(x).floatValue();
logResult("score5=", s, q5, doc, score5);
assertEquals("new score for custom mul add", boost * fieldScore * (score1 + fieldScore), score5, TEST_SCORE_TOLERANCE_DELTA);
}
@ -253,8 +251,8 @@ public class TestCustomScoreQuery extends FunctionTestSetup {
// since custom scoring modifies the order of docs, map results
// by doc ids so that we can later compare/verify them
private HashMap topDocsToMap(TopDocs td) {
HashMap h = new HashMap();
private HashMap<Integer,Float> topDocsToMap(TopDocs td) {
HashMap<Integer,Float> h = new HashMap<Integer,Float>();
for (int i=0; i<td.totalHits; i++) {
h.put(Integer.valueOf(td.scoreDocs[i].doc), Float.valueOf(td.scoreDocs[i].score));
}

View File

@ -18,7 +18,6 @@ package org.apache.lucene.search.function;
*/
import org.apache.lucene.util.LuceneTestCase;
import junit.framework.Assert;
/**
* DocValues TestCase

View File

@ -157,7 +157,7 @@ public class TestFieldScoreQuery extends FunctionTestSetup {
// Test that values loaded for FieldScoreQuery are cached properly and consumes the proper RAM resources.
private void doTestCaching (String field, FieldScoreQuery.Type tp) throws CorruptIndexException, Exception {
// prepare expected array types for comparison
HashMap expectedArrayTypes = new HashMap();
HashMap<FieldScoreQuery.Type,Object> expectedArrayTypes = new HashMap<FieldScoreQuery.Type,Object>();
expectedArrayTypes.put(FieldScoreQuery.Type.BYTE, new byte[0]);
expectedArrayTypes.put(FieldScoreQuery.Type.SHORT, new short[0]);
expectedArrayTypes.put(FieldScoreQuery.Type.INT, new int[0]);

View File

@ -21,7 +21,6 @@ import java.util.Collection;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.LowerCaseTokenizer;
import org.apache.lucene.analysis.Token;
import org.apache.lucene.analysis.TokenFilter;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.tokenattributes.PayloadAttribute;
@ -91,7 +90,6 @@ public class TestPayloadNearQuery extends LuceneTestCase {
}
private PayloadNearQuery newPhraseQuery (String fieldName, String phrase, boolean inOrder) {
int n;
String[] words = phrase.split("[\\s]+");
SpanQuery clauses[] = new SpanQuery[words.length];
for (int i=0;i<clauses.length;i++) {
@ -159,7 +157,6 @@ public class TestPayloadNearQuery extends LuceneTestCase {
public void testPayloadNear() throws IOException {
SpanNearQuery q1, q2;
PayloadNearQuery query;
TopDocs hits;
//SpanNearQuery(clauses, 10000, false)
q1 = spanNearQuery("field2", "twenty two");
q2 = spanNearQuery("field2", "twenty three");

View File

@ -63,7 +63,7 @@ final class JustCompileSearchSpans {
}
@Override
public Collection getPayload() throws IOException {
public Collection<byte[]> getPayload() throws IOException {
throw new UnsupportedOperationException(UNSUPPORTED_MSG);
}
@ -96,7 +96,7 @@ final class JustCompileSearchSpans {
static final class JustCompilePayloadSpans extends Spans {
@Override
public Collection getPayload() throws IOException {
public Collection<byte[]> getPayload() throws IOException {
throw new UnsupportedOperationException(UNSUPPORTED_MSG);
}

View File

@ -168,7 +168,7 @@ public class TestFieldMaskingSpanQuery extends LuceneTestCase {
QueryUtils.checkEqual(q, qr);
HashSet set = new HashSet();
HashSet<Term> set = new HashSet<Term>();
qr.extractTerms(set);
assertEquals(2, set.size());
}

View File

@ -21,7 +21,6 @@ import java.io.Reader;
import java.io.StringReader;
import java.util.Collection;
import java.util.HashSet;
import java.util.Iterator;
import java.util.Set;
import org.apache.lucene.analysis.Analyzer;
@ -271,13 +270,13 @@ public class TestPayloadSpans extends LuceneTestCase {
Spans spans = snq.getSpans(is.getIndexReader());
TopDocs topDocs = is.search(snq, 1);
Set payloadSet = new HashSet();
Set<String> payloadSet = new HashSet<String>();
for (int i = 0; i < topDocs.scoreDocs.length; i++) {
while (spans.next()) {
Collection payloads = spans.getPayload();
Collection<byte[]> payloads = spans.getPayload();
for (Iterator it = payloads.iterator(); it.hasNext();) {
payloadSet.add(new String((byte[]) it.next()));
for (final byte [] payload : payloads) {
payloadSet.add(new String(payload));
}
}
}
@ -305,12 +304,12 @@ public class TestPayloadSpans extends LuceneTestCase {
Spans spans = snq.getSpans(is.getIndexReader());
TopDocs topDocs = is.search(snq, 1);
Set payloadSet = new HashSet();
Set<String> payloadSet = new HashSet<String>();
for (int i = 0; i < topDocs.scoreDocs.length; i++) {
while (spans.next()) {
Collection payloads = spans.getPayload();
for (Iterator it = payloads.iterator(); it.hasNext();) {
payloadSet.add(new String((byte[]) it.next()));
Collection<byte[]> payloads = spans.getPayload();
for (final byte[] payload : payloads) {
payloadSet.add(new String(payload));
}
}
}
@ -338,22 +337,21 @@ public class TestPayloadSpans extends LuceneTestCase {
Spans spans = snq.getSpans(is.getIndexReader());
TopDocs topDocs = is.search(snq, 1);
Set payloadSet = new HashSet();
Set<String> payloadSet = new HashSet<String>();
for (int i = 0; i < topDocs.scoreDocs.length; i++) {
while (spans.next()) {
Collection payloads = spans.getPayload();
Collection<byte[]> payloads = spans.getPayload();
for (Iterator it = payloads.iterator(); it.hasNext();) {
payloadSet.add(new String((byte[]) it.next()));
for (final byte [] payload : payloads) {
payloadSet.add(new String(payload));
}
}
}
assertEquals(2, payloadSet.size());
if(DEBUG) {
Iterator pit = payloadSet.iterator();
while (pit.hasNext()) {
System.out.println("match:" + pit.next());
}
for (final String payload : payloadSet)
System.out.println("match:" + payload);
}
assertTrue(payloadSet.contains("a:Noise:10"));
assertTrue(payloadSet.contains("k:Noise:11"));
@ -375,12 +373,10 @@ public class TestPayloadSpans extends LuceneTestCase {
IndexReader reader = searcher.getIndexReader();
PayloadSpanUtil psu = new PayloadSpanUtil(reader);
Collection payloads = psu.getPayloadsForQuery(new TermQuery(new Term(PayloadHelper.FIELD, "rr")));
Collection<byte[]> payloads = psu.getPayloadsForQuery(new TermQuery(new Term(PayloadHelper.FIELD, "rr")));
if(DEBUG)
System.out.println("Num payloads:" + payloads.size());
Iterator it = payloads.iterator();
while(it.hasNext()) {
byte[] bytes = (byte[]) it.next();
for (final byte [] bytes : payloads) {
if(DEBUG)
System.out.println(new String(bytes));
}
@ -405,10 +401,9 @@ public class TestPayloadSpans extends LuceneTestCase {
}
//See payload helper, for the PayloadHelper.FIELD field, there is a single byte payload at every token
if (spans.isPayloadAvailable()) {
Collection payload = spans.getPayload();
Collection<byte[]> payload = spans.getPayload();
assertTrue("payload Size: " + payload.size() + " is not: " + expectedNumPayloads, payload.size() == expectedNumPayloads);
for (Iterator iterator = payload.iterator(); iterator.hasNext();) {
byte[] thePayload = (byte[]) iterator.next();
for (final byte [] thePayload : payload) {
assertTrue("payload[0] Size: " + thePayload.length + " is not: " + expectedPayloadLength,
thePayload.length == expectedPayloadLength);
assertTrue(thePayload[0] + " does not equal: " + expectedFirstByte, thePayload[0] == expectedFirstByte);
@ -450,12 +445,10 @@ public class TestPayloadSpans extends LuceneTestCase {
if(DEBUG)
System.out.println("\nSpans Dump --");
if (spans.isPayloadAvailable()) {
Collection payload = spans.getPayload();
Collection<byte[]> payload = spans.getPayload();
if(DEBUG)
System.out.println("payloads for span:" + payload.size());
Iterator it = payload.iterator();
while(it.hasNext()) {
byte[] bytes = (byte[]) it.next();
for (final byte [] bytes : payload) {
if(DEBUG)
System.out.println("doc:" + spans.doc() + " s:" + spans.start() + " e:" + spans.end() + " "
+ new String(bytes));
@ -484,8 +477,8 @@ public class TestPayloadSpans extends LuceneTestCase {
class PayloadFilter extends TokenFilter {
String fieldName;
int numSeen = 0;
Set entities = new HashSet();
Set nopayload = new HashSet();
Set<String> entities = new HashSet<String>();
Set<String> nopayload = new HashSet<String>();
int pos;
PayloadAttribute payloadAtt;
TermAttribute termAtt;

View File

@ -18,24 +18,7 @@ package org.apache.lucene.search.spans;
*/
import org.apache.lucene.search.*;
import org.apache.lucene.store.RAMDirectory;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.Term;
import org.apache.lucene.analysis.WhitespaceAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.queryParser.QueryParser;
import org.apache.lucene.queryParser.ParseException;
import junit.framework.TestCase;
import java.util.Random;
import java.util.BitSet;
/**
* TestExplanations subclass focusing on span queries

View File

@ -56,9 +56,9 @@ public class MockRAMDirectory extends RAMDirectory {
if (openFiles == null)
openFiles = new HashMap<String,Integer>();
if (createdFiles == null)
createdFiles = new HashSet();
createdFiles = new HashSet<String>();
if (unSyncedFiles == null)
unSyncedFiles = new HashSet();
unSyncedFiles = new HashSet<String>();
}
public MockRAMDirectory() {
@ -89,9 +89,9 @@ public class MockRAMDirectory extends RAMDirectory {
* unsynced files. */
public synchronized void crash() throws IOException {
crashed = true;
openFiles = new HashMap();
openFiles = new HashMap<String,Integer>();
Iterator<String> it = unSyncedFiles.iterator();
unSyncedFiles = new HashSet();
unSyncedFiles = new HashSet<String>();
int count = 0;
while(it.hasNext()) {
String name = it.next();
@ -264,7 +264,7 @@ public class MockRAMDirectory extends RAMDirectory {
@Override
public synchronized void close() {
if (openFiles == null) {
openFiles = new HashMap();
openFiles = new HashMap<String,Integer>();
}
if (noDeleteOpenFile && openFiles.size() > 0) {
// RuntimeException instead of IOException because
@ -311,7 +311,7 @@ public class MockRAMDirectory extends RAMDirectory {
}
}
ArrayList failures;
ArrayList<Failure> failures;
/**
* add a Failure object to the list of objects to be evaluated
@ -319,7 +319,7 @@ public class MockRAMDirectory extends RAMDirectory {
*/
synchronized public void failOn(Failure fail) {
if (failures == null) {
failures = new ArrayList();
failures = new ArrayList<Failure>();
}
failures.add(fail);
}

View File

@ -45,7 +45,7 @@ public class MockRAMInputStream extends RAMInputStream {
// all clones get closed:
if (!isClone) {
synchronized(dir) {
Integer v = (Integer) dir.openFiles.get(name);
Integer v = dir.openFiles.get(name);
// Could be null when MockRAMDirectory.crash() was called
if (v != null) {
if (v.intValue() == 1) {

View File

@ -22,7 +22,6 @@ import java.io.FileOutputStream;
import java.io.IOException;
import java.io.OutputStream;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.List;
import java.util.Random;
@ -287,7 +286,7 @@ public class TestBufferedIndexInput extends LuceneTestCase {
private static class MockFSDirectory extends Directory {
List allIndexInputs = new ArrayList();
List<IndexInput> allIndexInputs = new ArrayList<IndexInput>();
Random rand;
@ -305,10 +304,9 @@ public class TestBufferedIndexInput extends LuceneTestCase {
}
public void tweakBufferSizes() {
Iterator it = allIndexInputs.iterator();
//int count = 0;
while(it.hasNext()) {
BufferedIndexInput bii = (BufferedIndexInput) it.next();
for (final IndexInput ip : allIndexInputs) {
BufferedIndexInput bii = (BufferedIndexInput) ip;
int bufferSize = 1024+(int) Math.abs(rand.nextInt() % 32768);
bii.setBufferSize(bufferSize);
//count++;

View File

@ -33,7 +33,7 @@ public class TestFileSwitchDirectory extends LuceneTestCase {
* @throws IOException
*/
public void testBasic() throws IOException {
Set fileExtensions = new HashSet();
Set<String> fileExtensions = new HashSet<String>();
fileExtensions.add("fdt");
fileExtensions.add("fdx");

View File

@ -31,13 +31,13 @@ public class TestHugeRamFile extends LuceneTestCase {
* buffers under maxint. */
private static class DenseRAMFile extends RAMFile {
private long capacity = 0;
private HashMap singleBuffers = new HashMap();
private HashMap<Integer,byte[]> singleBuffers = new HashMap<Integer,byte[]>();
@Override
byte[] newBuffer(int size) {
capacity += size;
if (capacity <= MAX_VALUE) {
// below maxint we reuse buffers
byte buf[] = (byte[]) singleBuffers.get(Integer.valueOf(size));
byte buf[] = singleBuffers.get(Integer.valueOf(size));
if (buf==null) {
buf = new byte[size];
//System.out.println("allocate: "+size);

View File

@ -21,7 +21,6 @@ import java.io.File;
import java.io.IOException;
import java.util.Collections;
import java.util.HashMap;
import java.util.Iterator;
import java.util.Map;
import org.apache.lucene.analysis.WhitespaceAnalyzer;
@ -63,8 +62,7 @@ public class TestLockFactory extends LuceneTestCase {
assertTrue("# calls to makeLock is 0 (after instantiating IndexWriter)",
lf.makeLockCount >= 1);
for(Iterator e = lf.locksCreated.keySet().iterator(); e.hasNext();) {
String lockName = (String) e.next();
for(final String lockName : lf.locksCreated.keySet()) {
MockLockFactory.MockLock lock = (MockLockFactory.MockLock) lf.locksCreated.get(lockName);
assertTrue("# calls to Lock.obtain is 0 (after instantiating IndexWriter)",
lock.lockAttempts > 0);
@ -341,7 +339,7 @@ public class TestLockFactory extends LuceneTestCase {
public class MockLockFactory extends LockFactory {
public boolean lockPrefixSet;
public Map locksCreated = Collections.synchronizedMap(new HashMap());
public Map<String,Lock> locksCreated = Collections.synchronizedMap(new HashMap<String,Lock>());
public int makeLockCount = 0;
@Override

View File

@ -43,7 +43,7 @@ public abstract class LocalizedTestCase extends LuceneTestCase {
/**
* An optional limited set of testcases that will run under different Locales.
*/
private final Set testWithDifferentLocales;
private final Set<String> testWithDifferentLocales;
public LocalizedTestCase() {
super();
@ -55,12 +55,12 @@ public abstract class LocalizedTestCase extends LuceneTestCase {
testWithDifferentLocales = null;
}
public LocalizedTestCase(Set testWithDifferentLocales) {
public LocalizedTestCase(Set<String> testWithDifferentLocales) {
super();
this.testWithDifferentLocales = testWithDifferentLocales;
}
public LocalizedTestCase(String name, Set testWithDifferentLocales) {
public LocalizedTestCase(String name, Set<String> testWithDifferentLocales) {
super(name);
this.testWithDifferentLocales = testWithDifferentLocales;
}

View File

@ -151,7 +151,7 @@ public abstract class LuceneTestCase extends TestCase {
* @param iter Each next() is toString()ed and logged on it's own line. If iter is null this is logged differnetly then an empty iterator.
* @param stream Stream to log messages to.
*/
public static void dumpIterator(String label, Iterator iter,
public static <T> void dumpIterator(String label, Iterator<T> iter,
PrintStream stream) {
stream.println("*** BEGIN "+label+" ***");
if (null == iter) {
@ -170,7 +170,7 @@ public abstract class LuceneTestCase extends TestCase {
*/
public static void dumpArray(String label, Object[] objs,
PrintStream stream) {
Iterator iter = (null == objs) ? null : Arrays.asList(objs).iterator();
Iterator<Object> iter = (null == objs) ? null : Arrays.asList(objs).iterator();
dumpIterator(label, iter, stream);
}

View File

@ -29,7 +29,7 @@ public class TestCloseableThreadLocal extends LuceneTestCase {
public void testNullValue() throws Exception {
// Tests that null can be set as a valid value (LUCENE-1805). This
// previously failed in get().
CloseableThreadLocal ctl = new CloseableThreadLocal();
CloseableThreadLocal<Object> ctl = new CloseableThreadLocal<Object>();
ctl.set(null);
assertNull(ctl.get());
}
@ -37,12 +37,11 @@ public class TestCloseableThreadLocal extends LuceneTestCase {
public void testDefaultValueWithoutSetting() throws Exception {
// LUCENE-1805: make sure default get returns null,
// twice in a row
CloseableThreadLocal ctl = new CloseableThreadLocal();
assertNull(ctl.get());
CloseableThreadLocal<Object> ctl = new CloseableThreadLocal<Object>();
assertNull(ctl.get());
}
public class InitValueThreadLocal extends CloseableThreadLocal {
public class InitValueThreadLocal extends CloseableThreadLocal<Object> {
@Override
protected Object initialValue() {
return TEST_VALUE;

View File

@ -17,9 +17,6 @@ package org.apache.lucene.util;
* limitations under the License.
*/
import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.util.OpenBitSet;
import java.util.Arrays;
import java.util.Collections;
import java.util.Iterator;
@ -174,7 +171,7 @@ public class TestNumericUtils extends LuceneTestCase {
/** Note: The neededBounds iterator must be unsigned (easier understanding what's happening) */
protected void assertLongRangeSplit(final long lower, final long upper, int precisionStep,
final boolean useBitSet, final Iterator neededBounds
final boolean useBitSet, final Iterator<Long> neededBounds
) throws Exception {
final OpenBitSet bits=useBitSet ? new OpenBitSet(upper-lower+1) : null;
@ -189,8 +186,8 @@ public class TestNumericUtils extends LuceneTestCase {
min ^= 0x8000000000000000L;
max ^= 0x8000000000000000L;
//System.out.println("Long.valueOf(0x"+Long.toHexString(min>>>shift)+"L),Long.valueOf(0x"+Long.toHexString(max>>>shift)+"L),");
assertEquals( "inner min bound", ((Long)neededBounds.next()).longValue(), min>>>shift);
assertEquals( "inner max bound", ((Long)neededBounds.next()).longValue(), max>>>shift);
assertEquals( "inner min bound", neededBounds.next().longValue(), min>>>shift);
assertEquals( "inner max bound", neededBounds.next().longValue(), max>>>shift);
}
}, precisionStep, lower, upper);
@ -246,7 +243,7 @@ public class TestNumericUtils extends LuceneTestCase {
}).iterator());
// a inverse range should produce no sub-ranges
assertLongRangeSplit(9500L, -5000L, 4, false, Collections.EMPTY_LIST.iterator());
assertLongRangeSplit(9500L, -5000L, 4, false, Collections. <Long> emptyList().iterator());
// a 0-length range should reproduce the range itsself
assertLongRangeSplit(9500L, 9500L, 4, false, Arrays.asList(new Long[]{
@ -256,7 +253,7 @@ public class TestNumericUtils extends LuceneTestCase {
/** Note: The neededBounds iterator must be unsigned (easier understanding what's happening) */
protected void assertIntRangeSplit(final int lower, final int upper, int precisionStep,
final boolean useBitSet, final Iterator neededBounds
final boolean useBitSet, final Iterator<Integer> neededBounds
) throws Exception {
final OpenBitSet bits=useBitSet ? new OpenBitSet(upper-lower+1) : null;
@ -271,8 +268,8 @@ public class TestNumericUtils extends LuceneTestCase {
min ^= 0x80000000;
max ^= 0x80000000;
//System.out.println("Integer.valueOf(0x"+Integer.toHexString(min>>>shift)+"),Integer.valueOf(0x"+Integer.toHexString(max>>>shift)+"),");
assertEquals( "inner min bound", ((Integer)neededBounds.next()).intValue(), min>>>shift);
assertEquals( "inner max bound", ((Integer)neededBounds.next()).intValue(), max>>>shift);
assertEquals( "inner min bound", neededBounds.next().intValue(), min>>>shift);
assertEquals( "inner max bound", neededBounds.next().intValue(), max>>>shift);
}
}, precisionStep, lower, upper);
@ -328,7 +325,7 @@ public class TestNumericUtils extends LuceneTestCase {
}).iterator());
// a inverse range should produce no sub-ranges
assertIntRangeSplit(9500, -5000, 4, false, Collections.EMPTY_LIST.iterator());
assertIntRangeSplit(9500, -5000, 4, false, Collections. <Integer> emptyList().iterator());
// a 0-length range should reproduce the range itsself
assertIntRangeSplit(9500, 9500, 4, false, Arrays.asList(new Integer[]{

View File

@ -21,7 +21,7 @@ import org.apache.lucene.util.LuceneTestCase;
public class BaseTestLRU extends LuceneTestCase {
protected void testCache(Cache cache, int n) throws Exception {
protected void testCache(Cache<Integer,Object> cache, int n) throws Exception {
Object dummy = new Object();
for (int i = 0; i < n; i++) {

View File

@ -21,7 +21,7 @@ public class TestDoubleBarrelLRUCache extends BaseTestLRU {
public void testLRUCache() throws Exception {
final int n = 100;
testCache(new DoubleBarrelLRUCache(n), n);
testCache(new DoubleBarrelLRUCache<Integer,Object>(n), n);
}
private class CacheThread extends Thread {