LUCENE-1944: Cleanup contrib to not use deprecated APIs

git-svn-id: https://svn.apache.org/repos/asf/lucene/java/trunk@821444 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Uwe Schindler 2009-10-03 23:24:33 +00:00
parent b75e96f2f4
commit 236baf9fcb
63 changed files with 309 additions and 312 deletions

View File

@ -62,7 +62,7 @@ public class QueryAutoStopWordAnalyzerTest extends BaseTokenStreamTestCase {
writer.addDocument(doc); writer.addDocument(doc);
} }
writer.close(); writer.close();
reader = IndexReader.open(dir); reader = IndexReader.open(dir, true);
protectedAnalyzer = new QueryAutoStopWordAnalyzer(appAnalyzer); protectedAnalyzer = new QueryAutoStopWordAnalyzer(appAnalyzer);
} }

View File

@ -79,7 +79,7 @@ public class ShingleAnalyzerWrapperTest extends BaseTokenStreamTestCase {
writer.close(); writer.close();
return new IndexSearcher(dir); return new IndexSearcher(dir, true);
} }
protected Hits queryParsingTest(Analyzer analyzer, String qs) throws Exception { protected Hits queryParsingTest(Analyzer analyzer, String qs) throws Exception {

View File

@ -27,6 +27,7 @@ import org.apache.lucene.document.Field;
import org.apache.lucene.document.DateTools; import org.apache.lucene.document.DateTools;
import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.Term; import org.apache.lucene.index.Term;
import org.apache.lucene.store.FSDirectory;
import org.apache.lucene.search.Hits; import org.apache.lucene.search.Hits;
import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Searcher; import org.apache.lucene.search.Searcher;
@ -256,133 +257,138 @@ public class IndexTask extends Task {
create = true; create = true;
} }
Searcher searcher = null; FSDirectory dir = FSDirectory.open(indexDir);
boolean checkLastModified = false;
if (!create) {
try {
searcher = new IndexSearcher(indexDir.getAbsolutePath());
checkLastModified = true;
} catch (IOException ioe) {
log("IOException: " + ioe.getMessage());
// Empty - ignore, which indicates to index all
// documents
}
}
log("checkLastModified = " + checkLastModified, Project.MSG_VERBOSE);
IndexWriter writer =
new IndexWriter(indexDir, analyzer, create, IndexWriter.MaxFieldLength.LIMITED);
writer.setUseCompoundFile(useCompoundIndex);
int totalFiles = 0;
int totalIndexed = 0;
int totalIgnored = 0;
try { try {
writer.setMergeFactor(mergeFactor); Searcher searcher = null;
boolean checkLastModified = false;
for (int i = 0; i < rcs.size(); i++) { if (!create) {
ResourceCollection rc = (ResourceCollection) rcs.elementAt(i); try {
if (rc.isFilesystemOnly()) { searcher = new IndexSearcher(dir, true);
Iterator resources = rc.iterator(); checkLastModified = true;
while (resources.hasNext()) { } catch (IOException ioe) {
Resource r = (Resource) resources.next(); log("IOException: " + ioe.getMessage());
if (!r.isExists() || !(r instanceof FileResource)) { // Empty - ignore, which indicates to index all
continue; // documents
}
totalFiles++;
File file = ((FileResource) r).getFile();
if (!file.exists() || !file.canRead()) {
throw new BuildException("File \"" +
file.getAbsolutePath()
+ "\" does not exist or is not readable.");
}
boolean indexIt = true;
if (checkLastModified) {
Term pathTerm =
new Term("path", file.getPath());
TermQuery query =
new TermQuery(pathTerm);
Hits hits = searcher.search(query);
// if document is found, compare the
// indexed last modified time with the
// current file
// - don't index if up to date
if (hits.length() > 0) {
Document doc = hits.doc(0);
String indexModified =
doc.get("modified").trim();
if (indexModified != null) {
long lastModified = 0;
try {
lastModified = DateTools.stringToTime(indexModified);
} catch (ParseException e) {
// if modified time is not parsable, skip
}
if (lastModified == file.lastModified()) {
// TODO: remove existing document
indexIt = false;
}
}
}
}
if (indexIt) {
try {
log("Indexing " + file.getPath(),
Project.MSG_VERBOSE);
Document doc =
handler.getDocument(file);
if (doc == null) {
totalIgnored++;
} else {
// Add the path of the file as a field named "path". Use a Keyword field, so
// that the index stores the path, and so that the path is searchable
doc.add(new Field("path", file.getPath(), Field.Store.YES, Field.Index.NOT_ANALYZED));
// Add the last modified date of the file a field named "modified". Use a
// Keyword field, so that it's searchable, but so that no attempt is made
// to tokenize the field into words.
doc.add(new Field("modified", DateTools.timeToString(file.lastModified(), DateTools.Resolution.MILLISECOND), Field.Store.YES, Field.Index.NOT_ANALYZED));
writer.addDocument(doc);
totalIndexed++;
}
} catch (DocumentHandlerException e) {
throw new BuildException(e);
}
}
}
// for j
} }
// if (fs != null)
} }
// for i
writer.optimize(); log("checkLastModified = " + checkLastModified, Project.MSG_VERBOSE);
}
//try IndexWriter writer =
finally { new IndexWriter(dir, analyzer, create, IndexWriter.MaxFieldLength.LIMITED);
// always make sure everything gets closed,
// no matter how we exit. writer.setUseCompoundFile(useCompoundIndex);
writer.close(); int totalFiles = 0;
if (searcher != null) { int totalIndexed = 0;
searcher.close(); int totalIgnored = 0;
try {
writer.setMergeFactor(mergeFactor);
for (int i = 0; i < rcs.size(); i++) {
ResourceCollection rc = (ResourceCollection) rcs.elementAt(i);
if (rc.isFilesystemOnly()) {
Iterator resources = rc.iterator();
while (resources.hasNext()) {
Resource r = (Resource) resources.next();
if (!r.isExists() || !(r instanceof FileResource)) {
continue;
}
totalFiles++;
File file = ((FileResource) r).getFile();
if (!file.exists() || !file.canRead()) {
throw new BuildException("File \"" +
file.getAbsolutePath()
+ "\" does not exist or is not readable.");
}
boolean indexIt = true;
if (checkLastModified) {
Term pathTerm =
new Term("path", file.getPath());
TermQuery query =
new TermQuery(pathTerm);
Hits hits = searcher.search(query);
// if document is found, compare the
// indexed last modified time with the
// current file
// - don't index if up to date
if (hits.length() > 0) {
Document doc = hits.doc(0);
String indexModified =
doc.get("modified").trim();
if (indexModified != null) {
long lastModified = 0;
try {
lastModified = DateTools.stringToTime(indexModified);
} catch (ParseException e) {
// if modified time is not parsable, skip
}
if (lastModified == file.lastModified()) {
// TODO: remove existing document
indexIt = false;
}
}
}
}
if (indexIt) {
try {
log("Indexing " + file.getPath(),
Project.MSG_VERBOSE);
Document doc =
handler.getDocument(file);
if (doc == null) {
totalIgnored++;
} else {
// Add the path of the file as a field named "path". Use a Keyword field, so
// that the index stores the path, and so that the path is searchable
doc.add(new Field("path", file.getPath(), Field.Store.YES, Field.Index.NOT_ANALYZED));
// Add the last modified date of the file a field named "modified". Use a
// Keyword field, so that it's searchable, but so that no attempt is made
// to tokenize the field into words.
doc.add(new Field("modified", DateTools.timeToString(file.lastModified(), DateTools.Resolution.MILLISECOND), Field.Store.YES, Field.Index.NOT_ANALYZED));
writer.addDocument(doc);
totalIndexed++;
}
} catch (DocumentHandlerException e) {
throw new BuildException(e);
}
}
}
// for j
}
// if (fs != null)
}
// for i
writer.optimize();
} }
//try
finally {
// always make sure everything gets closed,
// no matter how we exit.
writer.close();
if (searcher != null) {
searcher.close();
}
}
Date end = new Date();
log(totalIndexed + " out of " + totalFiles + " indexed (" +
totalIgnored + " ignored) in " + (end.getTime() - start.getTime()) +
" milliseconds");
} finally {
dir.close();
} }
Date end = new Date();
log(totalIndexed + " out of " + totalFiles + " indexed (" +
totalIgnored + " ignored) in " + (end.getTime() - start.getTime()) +
" milliseconds");
} }
public static class HandlerConfig implements DynamicConfigurator { public static class HandlerConfig implements DynamicConfigurator {

View File

@ -30,6 +30,7 @@ import org.apache.lucene.search.Hits;
import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query; import org.apache.lucene.search.Query;
import org.apache.lucene.search.Searcher; import org.apache.lucene.search.Searcher;
import org.apache.lucene.store.FSDirectory;
import org.apache.tools.ant.Project; import org.apache.tools.ant.Project;
import org.apache.tools.ant.types.FileSet; import org.apache.tools.ant.types.FileSet;
@ -43,10 +44,11 @@ public class IndexTaskTest extends TestCase {
"org.apache.lucene.ant.FileExtensionDocumentHandler"; "org.apache.lucene.ant.FileExtensionDocumentHandler";
private String docsDir = System.getProperty("docs.dir"); private String docsDir = System.getProperty("docs.dir");
private String indexDir = System.getProperty("index.dir"); private File indexDir = new File(System.getProperty("index.dir"));
private Searcher searcher; private Searcher searcher;
private Analyzer analyzer; private Analyzer analyzer;
private FSDirectory dir;
/** /**
@ -64,11 +66,12 @@ public class IndexTaskTest extends TestCase {
task.addFileset(fs); task.addFileset(fs);
task.setOverwrite(true); task.setOverwrite(true);
task.setDocumentHandler(docHandler); task.setDocumentHandler(docHandler);
task.setIndex(new File(indexDir)); task.setIndex(indexDir);
task.setProject(project); task.setProject(project);
task.execute(); task.execute();
searcher = new IndexSearcher(indexDir); dir = FSDirectory.open(indexDir);
searcher = new IndexSearcher(dir, true);
analyzer = new StopAnalyzer(); analyzer = new StopAnalyzer();
} }
@ -87,6 +90,7 @@ public class IndexTaskTest extends TestCase {
*/ */
public void tearDown() throws IOException { public void tearDown() throws IOException {
searcher.close(); searcher.close();
dir.close();
} }
} }

View File

@ -43,7 +43,7 @@ public class PrintReaderTask extends PerfTask {
Config config = getRunData().getConfig(); Config config = getRunData().getConfig();
IndexReader r = null; IndexReader r = null;
if (userData == null) if (userData == null)
r = IndexReader.open(dir); r = IndexReader.open(dir, true);
else else
r = OpenReaderTask.openCommitPoint(userData, dir, config, true); r = OpenReaderTask.openCommitPoint(userData, dir, config, true);
System.out.println("--> numDocs:"+r.numDocs()+" dels:"+r.numDeletedDocs()); System.out.println("--> numDocs:"+r.numDocs()+" dels:"+r.numDeletedDocs());

View File

@ -67,7 +67,7 @@ public abstract class ReadTask extends PerfTask {
IndexReader ir = getRunData().getIndexReader(); IndexReader ir = getRunData().getIndexReader();
if (ir == null) { if (ir == null) {
Directory dir = getRunData().getDirectory(); Directory dir = getRunData().getDirectory();
ir = IndexReader.open(dir); ir = IndexReader.open(dir, true);
closeReader = true; closeReader = true;
//res++; //this is confusing, comment it out //res++; //this is confusing, comment it out
} }

View File

@ -57,16 +57,14 @@ public class Algorithm {
currSequence.setDepth(0); currSequence.setDepth(0);
String taskPackage = PerfTask.class.getPackage().getName() + "."; String taskPackage = PerfTask.class.getPackage().getName() + ".";
Class paramClass[] = {PerfRunData.class};
PerfRunData paramObj[] = {runData};
while (stok.nextToken() != StreamTokenizer.TT_EOF) { while (stok.nextToken() != StreamTokenizer.TT_EOF) {
switch(stok.ttype) { switch(stok.ttype) {
case StreamTokenizer.TT_WORD: case StreamTokenizer.TT_WORD:
String s = stok.sval; String s = stok.sval;
Constructor cnstr = Class.forName(taskPackage+s+"Task").getConstructor(paramClass); Constructor<? extends PerfTask> cnstr = Class.forName(taskPackage+s+"Task")
PerfTask task = (PerfTask) cnstr.newInstance(paramObj); .asSubclass(PerfTask.class).getConstructor(PerfRunData.class);
PerfTask task = cnstr.newInstance(runData);
task.setDisableCounting(isDisableCountNextTask); task.setDisableCounting(isDisableCountNextTask);
isDisableCountNextTask = false; isDisableCountNextTask = false;
currSequence.addTask(task); currSequence.addTask(task);

View File

@ -24,6 +24,7 @@ import org.apache.lucene.benchmark.quality.utils.SubmissionReport;
import org.apache.lucene.benchmark.quality.*; import org.apache.lucene.benchmark.quality.*;
import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Searcher; import org.apache.lucene.search.Searcher;
import org.apache.lucene.store.FSDirectory;
import java.io.BufferedReader; import java.io.BufferedReader;
import java.io.File; import java.io.File;
@ -41,7 +42,8 @@ public class QueryDriver {
File topicsFile = new File(args[0]); File topicsFile = new File(args[0]);
File qrelsFile = new File(args[1]); File qrelsFile = new File(args[1]);
Searcher searcher = new IndexSearcher(args[3]); FSDirectory dir = FSDirectory.open(new File(args[3]));
Searcher searcher = new IndexSearcher(dir, true);
int maxResults = 1000; int maxResults = 1000;
String docNameField = "docname"; String docNameField = "docname";

View File

@ -85,7 +85,7 @@ public class QualityQueriesFinder {
private String [] bestTerms(String field,int numTerms) throws IOException { private String [] bestTerms(String field,int numTerms) throws IOException {
PriorityQueue pq = new TermsDfQueue(numTerms); PriorityQueue pq = new TermsDfQueue(numTerms);
IndexReader ir = IndexReader.open(dir); IndexReader ir = IndexReader.open(dir, true);
try { try {
int threshold = ir.maxDoc() / 10; // ignore words too common. int threshold = ir.maxDoc() / 10; // ignore words too common.
TermEnum terms = ir.terms(new Term(field,"")); TermEnum terms = ir.terms(new Term(field,""));

View File

@ -94,7 +94,7 @@ public class TestPerfTasksLogic extends TestCase {
// now we should be able to open the index for write. // now we should be able to open the index for write.
IndexWriter iw = new IndexWriter(benchmark.getRunData().getDirectory(),null,false, IndexWriter.MaxFieldLength.LIMITED); IndexWriter iw = new IndexWriter(benchmark.getRunData().getDirectory(),null,false, IndexWriter.MaxFieldLength.LIMITED);
iw.close(); iw.close();
IndexReader ir = IndexReader.open(benchmark.getRunData().getDirectory()); IndexReader ir = IndexReader.open(benchmark.getRunData().getDirectory(), true);
assertEquals("1000 docs were added to the index, this is what we expect to find!",1000,ir.numDocs()); assertEquals("1000 docs were added to the index, this is what we expect to find!",1000,ir.numDocs());
ir.close(); ir.close();
} }
@ -153,7 +153,7 @@ public class TestPerfTasksLogic extends TestCase {
// now we should be able to open the index for write. // now we should be able to open the index for write.
IndexWriter iw = new IndexWriter(benchmark.getRunData().getDirectory(),null,false, IndexWriter.MaxFieldLength.LIMITED); IndexWriter iw = new IndexWriter(benchmark.getRunData().getDirectory(),null,false, IndexWriter.MaxFieldLength.LIMITED);
iw.close(); iw.close();
IndexReader ir = IndexReader.open(benchmark.getRunData().getDirectory()); IndexReader ir = IndexReader.open(benchmark.getRunData().getDirectory(), true);
assertEquals("1000 docs were added to the index, this is what we expect to find!",1000,ir.numDocs()); assertEquals("1000 docs were added to the index, this is what we expect to find!",1000,ir.numDocs());
ir.close(); ir.close();
} }
@ -191,7 +191,7 @@ public class TestPerfTasksLogic extends TestCase {
// now we should be able to open the index for write. // now we should be able to open the index for write.
IndexWriter iw = new IndexWriter(benchmark.getRunData().getDirectory(),null,false,IndexWriter.MaxFieldLength.UNLIMITED); IndexWriter iw = new IndexWriter(benchmark.getRunData().getDirectory(),null,false,IndexWriter.MaxFieldLength.UNLIMITED);
iw.close(); iw.close();
IndexReader ir = IndexReader.open(benchmark.getRunData().getDirectory()); IndexReader ir = IndexReader.open(benchmark.getRunData().getDirectory(), true);
assertEquals("1000 docs were added to the index, this is what we expect to find!",1000,ir.numDocs()); assertEquals("1000 docs were added to the index, this is what we expect to find!",1000,ir.numDocs());
ir.close(); ir.close();
} }
@ -263,7 +263,7 @@ public class TestPerfTasksLogic extends TestCase {
// now we should be able to open the index for write. // now we should be able to open the index for write.
IndexWriter iw = new IndexWriter(benchmark.getRunData().getDirectory(),null,false,IndexWriter.MaxFieldLength.UNLIMITED); IndexWriter iw = new IndexWriter(benchmark.getRunData().getDirectory(),null,false,IndexWriter.MaxFieldLength.UNLIMITED);
iw.close(); iw.close();
IndexReader ir = IndexReader.open(benchmark.getRunData().getDirectory()); IndexReader ir = IndexReader.open(benchmark.getRunData().getDirectory(), true);
assertEquals("1 docs were added to the index, this is what we expect to find!",1,ir.numDocs()); assertEquals("1 docs were added to the index, this is what we expect to find!",1,ir.numDocs());
ir.close(); ir.close();
} }
@ -292,7 +292,7 @@ public class TestPerfTasksLogic extends TestCase {
Benchmark benchmark = execBenchmark(algLines); Benchmark benchmark = execBenchmark(algLines);
// 3. test number of docs in the index // 3. test number of docs in the index
IndexReader ir = IndexReader.open(benchmark.getRunData().getDirectory()); IndexReader ir = IndexReader.open(benchmark.getRunData().getDirectory(), true);
int ndocsExpected = 20; // Reuters20ContentSource exhausts after 20 docs. int ndocsExpected = 20; // Reuters20ContentSource exhausts after 20 docs.
assertEquals("wrong number of docs in the index!", ndocsExpected, ir.numDocs()); assertEquals("wrong number of docs in the index!", ndocsExpected, ir.numDocs());
ir.close(); ir.close();
@ -354,7 +354,7 @@ public class TestPerfTasksLogic extends TestCase {
IndexWriter iw = new IndexWriter(benchmark.getRunData().getDirectory(),null,false,IndexWriter.MaxFieldLength.UNLIMITED); IndexWriter iw = new IndexWriter(benchmark.getRunData().getDirectory(),null,false,IndexWriter.MaxFieldLength.UNLIMITED);
iw.close(); iw.close();
IndexReader ir = IndexReader.open(benchmark.getRunData().getDirectory()); IndexReader ir = IndexReader.open(benchmark.getRunData().getDirectory(), true);
assertEquals(numLines + " lines were created but " + ir.numDocs() + " docs are in the index", numLines, ir.numDocs()); assertEquals(numLines + " lines were created but " + ir.numDocs() + " docs are in the index", numLines, ir.numDocs());
ir.close(); ir.close();
@ -398,7 +398,7 @@ public class TestPerfTasksLogic extends TestCase {
} }
// Separately count how many tokens are actually in the index: // Separately count how many tokens are actually in the index:
IndexReader reader = IndexReader.open(benchmark.getRunData().getDirectory()); IndexReader reader = IndexReader.open(benchmark.getRunData().getDirectory(), true);
assertEquals(NUM_DOCS, reader.numDocs()); assertEquals(NUM_DOCS, reader.numDocs());
TermEnum terms = reader.terms(); TermEnum terms = reader.terms();
@ -442,7 +442,7 @@ public class TestPerfTasksLogic extends TestCase {
Benchmark benchmark = execBenchmark(algLines); Benchmark benchmark = execBenchmark(algLines);
// 3. test number of docs in the index // 3. test number of docs in the index
IndexReader ir = IndexReader.open(benchmark.getRunData().getDirectory()); IndexReader ir = IndexReader.open(benchmark.getRunData().getDirectory(), true);
int ndocsExpected = 2 * 20; // Reuters20ContentSource exhausts after 20 docs. int ndocsExpected = 2 * 20; // Reuters20ContentSource exhausts after 20 docs.
assertEquals("wrong number of docs in the index!", ndocsExpected, ir.numDocs()); assertEquals("wrong number of docs in the index!", ndocsExpected, ir.numDocs());
ir.close(); ir.close();
@ -524,7 +524,7 @@ public class TestPerfTasksLogic extends TestCase {
Benchmark benchmark = execBenchmark(algLines); Benchmark benchmark = execBenchmark(algLines);
// 3. test number of docs in the index // 3. test number of docs in the index
IndexReader ir = IndexReader.open(benchmark.getRunData().getDirectory()); IndexReader ir = IndexReader.open(benchmark.getRunData().getDirectory(), true);
int ndocsExpected = 20; // Reuters20ContentSource exhausts after 20 docs. int ndocsExpected = 20; // Reuters20ContentSource exhausts after 20 docs.
assertEquals("wrong number of docs in the index!", ndocsExpected, ir.numDocs()); assertEquals("wrong number of docs in the index!", ndocsExpected, ir.numDocs());
ir.close(); ir.close();
@ -560,7 +560,7 @@ public class TestPerfTasksLogic extends TestCase {
Benchmark benchmark = execBenchmark(algLines); Benchmark benchmark = execBenchmark(algLines);
// 3. test number of docs in the index // 3. test number of docs in the index
IndexReader ir = IndexReader.open(benchmark.getRunData().getDirectory()); IndexReader ir = IndexReader.open(benchmark.getRunData().getDirectory(), true);
int ndocsExpected = 20; // Reuters20ContentSource exhausts after 20 docs. int ndocsExpected = 20; // Reuters20ContentSource exhausts after 20 docs.
assertEquals("wrong number of docs in the index!", ndocsExpected, ir.numDocs()); assertEquals("wrong number of docs in the index!", ndocsExpected, ir.numDocs());
ir.close(); ir.close();
@ -604,7 +604,7 @@ public class TestPerfTasksLogic extends TestCase {
benchmark.getRunData().getIndexWriter().close(); benchmark.getRunData().getIndexWriter().close();
// 3. test number of docs in the index // 3. test number of docs in the index
IndexReader ir = IndexReader.open(benchmark.getRunData().getDirectory()); IndexReader ir = IndexReader.open(benchmark.getRunData().getDirectory(), true);
int ndocsExpected = 20; // Reuters20ContentSource exhausts after 20 docs. int ndocsExpected = 20; // Reuters20ContentSource exhausts after 20 docs.
assertEquals("wrong number of docs in the index!", ndocsExpected, ir.numDocs()); assertEquals("wrong number of docs in the index!", ndocsExpected, ir.numDocs());
ir.close(); ir.close();
@ -649,7 +649,7 @@ public class TestPerfTasksLogic extends TestCase {
benchmark.getRunData().getIndexWriter().close(); benchmark.getRunData().getIndexWriter().close();
// 3. test number of docs in the index // 3. test number of docs in the index
IndexReader ir = IndexReader.open(benchmark.getRunData().getDirectory()); IndexReader ir = IndexReader.open(benchmark.getRunData().getDirectory(), true);
int ndocsExpected = 20; // Reuters20ContentSource exhausts after 20 docs. int ndocsExpected = 20; // Reuters20ContentSource exhausts after 20 docs.
assertEquals("wrong number of docs in the index!", ndocsExpected, ir.numDocs()); assertEquals("wrong number of docs in the index!", ndocsExpected, ir.numDocs());
ir.close(); ir.close();
@ -692,7 +692,7 @@ public class TestPerfTasksLogic extends TestCase {
assertFalse(writer.getUseCompoundFile()); assertFalse(writer.getUseCompoundFile());
writer.close(); writer.close();
Directory dir = benchmark.getRunData().getDirectory(); Directory dir = benchmark.getRunData().getDirectory();
IndexReader reader = IndexReader.open(dir); IndexReader reader = IndexReader.open(dir, true);
TermFreqVector [] tfv = reader.getTermFreqVectors(0); TermFreqVector [] tfv = reader.getTermFreqVectors(0);
assertNotNull(tfv); assertNotNull(tfv);
assertTrue(tfv.length > 0); assertTrue(tfv.length > 0);
@ -731,7 +731,7 @@ public class TestPerfTasksLogic extends TestCase {
Benchmark benchmark = execBenchmark(algLines); Benchmark benchmark = execBenchmark(algLines);
// 3. test number of docs in the index // 3. test number of docs in the index
IndexReader ir = IndexReader.open(benchmark.getRunData().getDirectory()); IndexReader ir = IndexReader.open(benchmark.getRunData().getDirectory(), true);
int ndocsExpected = 20; // Reuters20ContentSource exhausts after 20 docs. int ndocsExpected = 20; // Reuters20ContentSource exhausts after 20 docs.
assertEquals("wrong number of docs in the index!", ndocsExpected, ir.numDocs()); assertEquals("wrong number of docs in the index!", ndocsExpected, ir.numDocs());
ir.close(); ir.close();

View File

@ -82,7 +82,7 @@ public class TestQualityRun extends TestCase {
// validate topics & judgments match each other // validate topics & judgments match each other
judge.validateData(qqs, logger); judge.validateData(qqs, logger);
IndexSearcher searcher = new IndexSearcher(FSDirectory.open(new File(workDir,"index"))); IndexSearcher searcher = new IndexSearcher(FSDirectory.open(new File(workDir,"index")), true);
QualityQueryParser qqParser = new SimpleQQParser("title","body"); QualityQueryParser qqParser = new SimpleQQParser("title","body");
QualityBenchmark qrun = new QualityBenchmark(qqs, qqParser, searcher, docNameField); QualityBenchmark qrun = new QualityBenchmark(qqs, qqParser, searcher, docNameField);

View File

@ -82,7 +82,7 @@ public class CollationTestBase extends TestCase {
Field.Store.YES, Field.Index.ANALYZED)); Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(doc); writer.addDocument(doc);
writer.close(); writer.close();
IndexSearcher is = new IndexSearcher(ramDir); IndexSearcher is = new IndexSearcher(ramDir, true);
AnalyzingQueryParser aqp = new AnalyzingQueryParser("content", analyzer); AnalyzingQueryParser aqp = new AnalyzingQueryParser("content", analyzer);
aqp.setLowercaseExpandedTerms(false); aqp.setLowercaseExpandedTerms(false);
@ -127,7 +127,7 @@ public class CollationTestBase extends TestCase {
Field.Store.YES, Field.Index.NOT_ANALYZED)); Field.Store.YES, Field.Index.NOT_ANALYZED));
writer.addDocument(doc); writer.addDocument(doc);
writer.close(); writer.close();
IndexSearcher searcher = new IndexSearcher(ramDir); IndexSearcher searcher = new IndexSearcher(ramDir, true);
Query query = new TermQuery(new Term("body","body")); Query query = new TermQuery(new Term("body","body"));
// Unicode order would include U+0633 in [ U+062F - U+0698 ], but Farsi // Unicode order would include U+0633 in [ U+062F - U+0698 ], but Farsi
@ -162,7 +162,7 @@ public class CollationTestBase extends TestCase {
Field.Store.YES, Field.Index.ANALYZED)); Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(doc); writer.addDocument(doc);
writer.close(); writer.close();
IndexSearcher searcher = new IndexSearcher(ramDir); IndexSearcher searcher = new IndexSearcher(ramDir, true);
Query query = new TermRangeQuery("content", firstBeg, firstEnd, true, true); Query query = new TermRangeQuery("content", firstBeg, firstEnd, true, true);
ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs; ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs;
@ -189,7 +189,7 @@ public class CollationTestBase extends TestCase {
writer.addDocument(doc); writer.addDocument(doc);
writer.close(); writer.close();
IndexReader reader = IndexReader.open(farsiIndex); IndexReader reader = IndexReader.open(farsiIndex, true);
IndexSearcher search = new IndexSearcher(reader); IndexSearcher search = new IndexSearcher(reader);
// Unicode order would include U+0633 in [ U+062F - U+0698 ], but Farsi // Unicode order would include U+0633 in [ U+062F - U+0698 ], but Farsi
@ -268,7 +268,7 @@ public class CollationTestBase extends TestCase {
} }
writer.optimize(); writer.optimize();
writer.close(); writer.close();
Searcher searcher = new IndexSearcher(indexStore); Searcher searcher = new IndexSearcher(indexStore, true);
Sort sort = new Sort(); Sort sort = new Sort();
Query queryX = new TermQuery(new Term ("contents", "x")); Query queryX = new TermQuery(new Term ("contents", "x"));

View File

@ -140,7 +140,7 @@ public class JEDirectory extends Directory {
throw new IOException("File does not exist: " + name); throw new IOException("File does not exist: " + name);
} }
public String[] list() throws IOException { public String[] listAll() throws IOException {
Cursor cursor = null; Cursor cursor = null;
List list = new ArrayList(); List list = new ArrayList();
@ -190,10 +190,6 @@ public class JEDirectory extends Directory {
return new JELock(); return new JELock();
} }
public void renameFile(String from, String to) throws IOException {
new File(from).rename(this, to);
}
public void touchFile(String name) throws IOException { public void touchFile(String name) throws IOException {
File file = new File(name); File file = new File(name);
long length = 0L; long length = 0L;

View File

@ -158,7 +158,7 @@ public class DbDirectory extends Directory {
throw new IOException("File does not exist: " + name); throw new IOException("File does not exist: " + name);
} }
public String[] list() public String[] listAll()
throws IOException throws IOException
{ {
Dbc cursor = null; Dbc cursor = null;
@ -216,12 +216,6 @@ public class DbDirectory extends Directory {
return new DbLock(); return new DbLock();
} }
public void renameFile(String from, String to)
throws IOException
{
new File(from).rename(this, to);
}
public void touchFile(String name) public void touchFile(String name)
throws IOException throws IOException
{ {

View File

@ -62,7 +62,7 @@ public class FieldTermStack {
writer.addDocument( doc ); writer.addDocument( doc );
writer.close(); writer.close();
IndexReader reader = IndexReader.open( dir ); IndexReader reader = IndexReader.open( dir, true );
FieldTermStack ftl = new FieldTermStack( reader, 0, "f", fieldQuery ); FieldTermStack ftl = new FieldTermStack( reader, 0, "f", fieldQuery );
reader.close(); reader.close();
} }

View File

@ -288,7 +288,7 @@ public abstract class AbstractTestCase extends TestCase {
writer.addDocument( doc ); writer.addDocument( doc );
writer.close(); writer.close();
reader = IndexReader.open( dir ); reader = IndexReader.open( dir, true );
} }
protected void makeIndexShortMV() throws Exception { protected void makeIndexShortMV() throws Exception {

View File

@ -125,6 +125,6 @@ public class SimpleFragmentsBuilderTest extends AbstractTestCase {
writer.addDocument( doc ); writer.addDocument( doc );
writer.close(); writer.close();
reader = IndexReader.open( dir ); reader = IndexReader.open( dir, true );
} }
} }

View File

@ -114,7 +114,7 @@ public class HighlighterTest extends BaseTokenStreamTestCase implements Formatte
Analyzer analyzer = new SimpleAnalyzer(); Analyzer analyzer = new SimpleAnalyzer();
QueryParser qp = new QueryParser(FIELD_NAME, analyzer); QueryParser qp = new QueryParser(FIELD_NAME, analyzer);
query = qp.parse("\"very long\""); query = qp.parse("\"very long\"");
searcher = new IndexSearcher(ramDir, false); searcher = new IndexSearcher(ramDir, true);
TopDocs hits = searcher.search(query, 10); TopDocs hits = searcher.search(query, 10);
QueryScorer scorer = new QueryScorer(query, FIELD_NAME); QueryScorer scorer = new QueryScorer(query, FIELD_NAME);
@ -564,7 +564,7 @@ public class HighlighterTest extends BaseTokenStreamTestCase implements Formatte
query = new ConstantScoreRangeQuery(FIELD_NAME, "kannedy", "kznnedy", true, true); query = new ConstantScoreRangeQuery(FIELD_NAME, "kannedy", "kznnedy", true, true);
searcher = new IndexSearcher(ramDir); searcher = new IndexSearcher(ramDir, true);
// can't rewrite ConstantScoreRangeQuery if you want to highlight it - // can't rewrite ConstantScoreRangeQuery if you want to highlight it -
// it rewrites to ConstantScoreQuery which cannot be highlighted // it rewrites to ConstantScoreQuery which cannot be highlighted
// query = unReWrittenQuery.rewrite(reader); // query = unReWrittenQuery.rewrite(reader);
@ -600,7 +600,7 @@ public class HighlighterTest extends BaseTokenStreamTestCase implements Formatte
query = new WildcardQuery(new Term(FIELD_NAME, "ken*")); query = new WildcardQuery(new Term(FIELD_NAME, "ken*"));
((WildcardQuery)query).setRewriteMethod(MultiTermQuery.CONSTANT_SCORE_FILTER_REWRITE); ((WildcardQuery)query).setRewriteMethod(MultiTermQuery.CONSTANT_SCORE_FILTER_REWRITE);
searcher = new IndexSearcher(ramDir); searcher = new IndexSearcher(ramDir, true);
// can't rewrite ConstantScore if you want to highlight it - // can't rewrite ConstantScore if you want to highlight it -
// it rewrites to ConstantScoreQuery which cannot be highlighted // it rewrites to ConstantScoreQuery which cannot be highlighted
// query = unReWrittenQuery.rewrite(reader); // query = unReWrittenQuery.rewrite(reader);
@ -1098,7 +1098,7 @@ public class HighlighterTest extends BaseTokenStreamTestCase implements Formatte
public void run() throws Exception { public void run() throws Exception {
numHighlights = 0; numHighlights = 0;
// test to show how rewritten query can still be used // test to show how rewritten query can still be used
searcher = new IndexSearcher(ramDir); searcher = new IndexSearcher(ramDir, true);
Analyzer analyzer = new StandardAnalyzer(); Analyzer analyzer = new StandardAnalyzer();
QueryParser parser = new QueryParser(FIELD_NAME, analyzer); QueryParser parser = new QueryParser(FIELD_NAME, analyzer);
@ -1218,7 +1218,7 @@ public class HighlighterTest extends BaseTokenStreamTestCase implements Formatte
writer1.addDocument(d); writer1.addDocument(d);
writer1.optimize(); writer1.optimize();
writer1.close(); writer1.close();
IndexReader reader1 = IndexReader.open(ramDir1); IndexReader reader1 = IndexReader.open(ramDir1, true);
// setup index 2 // setup index 2
RAMDirectory ramDir2 = new RAMDirectory(); RAMDirectory ramDir2 = new RAMDirectory();
@ -1229,11 +1229,11 @@ public class HighlighterTest extends BaseTokenStreamTestCase implements Formatte
writer2.addDocument(d); writer2.addDocument(d);
writer2.optimize(); writer2.optimize();
writer2.close(); writer2.close();
IndexReader reader2 = IndexReader.open(ramDir2); IndexReader reader2 = IndexReader.open(ramDir2, true);
IndexSearcher searchers[] = new IndexSearcher[2]; IndexSearcher searchers[] = new IndexSearcher[2];
searchers[0] = new IndexSearcher(ramDir1); searchers[0] = new IndexSearcher(ramDir1, true);
searchers[1] = new IndexSearcher(ramDir2); searchers[1] = new IndexSearcher(ramDir2, true);
MultiSearcher multiSearcher = new MultiSearcher(searchers); MultiSearcher multiSearcher = new MultiSearcher(searchers);
QueryParser parser = new QueryParser(FIELD_NAME, new StandardAnalyzer()); QueryParser parser = new QueryParser(FIELD_NAME, new StandardAnalyzer());
parser.setMultiTermRewriteMethod(MultiTermQuery.SCORING_BOOLEAN_QUERY_REWRITE); parser.setMultiTermRewriteMethod(MultiTermQuery.SCORING_BOOLEAN_QUERY_REWRITE);
@ -1513,7 +1513,7 @@ public class HighlighterTest extends BaseTokenStreamTestCase implements Formatte
String q = "t_text1:random"; String q = "t_text1:random";
QueryParser parser = new QueryParser( "t_text1", a ); QueryParser parser = new QueryParser( "t_text1", a );
Query query = parser.parse( q ); Query query = parser.parse( q );
IndexSearcher searcher = new IndexSearcher( dir ); IndexSearcher searcher = new IndexSearcher( dir, true );
// This scorer can return negative idf -> null fragment // This scorer can return negative idf -> null fragment
Scorer scorer = new QueryTermScorer( query, searcher.getIndexReader(), "t_text1" ); Scorer scorer = new QueryTermScorer( query, searcher.getIndexReader(), "t_text1" );
// This scorer doesn't use idf (patch version) // This scorer doesn't use idf (patch version)
@ -1539,7 +1539,7 @@ public class HighlighterTest extends BaseTokenStreamTestCase implements Formatte
* writer = new IndexWriter(ramDir,bigramAnalyzer , true); Document d = new * writer = new IndexWriter(ramDir,bigramAnalyzer , true); Document d = new
* Document(); Field f = new Field(FIELD_NAME, "java abc def", true, true, * Document(); Field f = new Field(FIELD_NAME, "java abc def", true, true,
* true); d.add(f); writer.addDocument(d); writer.close(); IndexReader reader = * true); d.add(f); writer.addDocument(d); writer.close(); IndexReader reader =
* IndexReader.open(ramDir); * IndexReader.open(ramDir, true);
* *
* IndexSearcher searcher=new IndexSearcher(reader); query = * IndexSearcher searcher=new IndexSearcher(reader); query =
* QueryParser.parse("abc", FIELD_NAME, bigramAnalyzer); * QueryParser.parse("abc", FIELD_NAME, bigramAnalyzer);
@ -1572,7 +1572,7 @@ public class HighlighterTest extends BaseTokenStreamTestCase implements Formatte
} }
public void doSearching(Query unReWrittenQuery) throws Exception { public void doSearching(Query unReWrittenQuery) throws Exception {
searcher = new IndexSearcher(ramDir); searcher = new IndexSearcher(ramDir, true);
// for any multi-term queries to work (prefix, wildcard, range,fuzzy etc) // for any multi-term queries to work (prefix, wildcard, range,fuzzy etc)
// you must use a rewritten query! // you must use a rewritten query!
query = unReWrittenQuery.rewrite(reader); query = unReWrittenQuery.rewrite(reader);
@ -1609,7 +1609,7 @@ public class HighlighterTest extends BaseTokenStreamTestCase implements Formatte
writer.optimize(); writer.optimize();
writer.close(); writer.close();
reader = IndexReader.open(ramDir); reader = IndexReader.open(ramDir, true);
numHighlights = 0; numHighlights = 0;
} }

View File

@ -62,7 +62,7 @@ public class TestEmptyIndex extends TestCase {
Directory d = new RAMDirectory(); Directory d = new RAMDirectory();
new IndexWriter(d, null, true, IndexWriter.MaxFieldLength.UNLIMITED).close(); new IndexWriter(d, null, true, IndexWriter.MaxFieldLength.UNLIMITED).close();
r = IndexReader.open(d); r = IndexReader.open(d, false);
testNorms(r); testNorms(r);
r.close(); r.close();
d.close(); d.close();
@ -96,7 +96,7 @@ public class TestEmptyIndex extends TestCase {
Directory d = new RAMDirectory(); Directory d = new RAMDirectory();
new IndexWriter(d, null, true, IndexWriter.MaxFieldLength.UNLIMITED).close(); new IndexWriter(d, null, true, IndexWriter.MaxFieldLength.UNLIMITED).close();
r = IndexReader.open(d); r = IndexReader.open(d, false);
termEnumTest(r); termEnumTest(r);
r.close(); r.close();
d.close(); d.close();

View File

@ -54,7 +54,7 @@ public class TestIndicesEquals extends TestCase {
// public void test2() throws Exception { // public void test2() throws Exception {
// FSDirectory fsdir = FSDirectory.open(new File("/tmp/fatcorpus")); // FSDirectory fsdir = FSDirectory.open(new File("/tmp/fatcorpus"));
// IndexReader ir = IndexReader.open(fsdir); // IndexReader ir = IndexReader.open(fsdir, false);
// InstantiatedIndex ii = new InstantiatedIndex(ir); // InstantiatedIndex ii = new InstantiatedIndex(ir);
// ir.close(); // ir.close();
// testEquals(fsdir, ii); // testEquals(fsdir, ii);
@ -74,7 +74,7 @@ public class TestIndicesEquals extends TestCase {
indexWriter.close(); indexWriter.close();
// test load ii from index reader // test load ii from index reader
IndexReader ir = IndexReader.open(dir); IndexReader ir = IndexReader.open(dir, false);
InstantiatedIndex ii = new InstantiatedIndex(ir); InstantiatedIndex ii = new InstantiatedIndex(ir);
ir.close(); ir.close();
@ -116,7 +116,7 @@ public class TestIndicesEquals extends TestCase {
private void testTermDocs(Directory aprioriIndex, InstantiatedIndex testIndex) throws Exception { private void testTermDocs(Directory aprioriIndex, InstantiatedIndex testIndex) throws Exception {
IndexReader aprioriReader = IndexReader.open(aprioriIndex); IndexReader aprioriReader = IndexReader.open(aprioriIndex, false);
IndexReader testReader = testIndex.indexReaderFactory(); IndexReader testReader = testIndex.indexReaderFactory();
TermEnum aprioriTermEnum = aprioriReader.terms(new Term("c", "danny")); TermEnum aprioriTermEnum = aprioriReader.terms(new Term("c", "danny"));
@ -216,7 +216,7 @@ public class TestIndicesEquals extends TestCase {
testEquals(aprioriIndex, testIndex); testEquals(aprioriIndex, testIndex);
// delete a few documents // delete a few documents
IndexReader ir = IndexReader.open(aprioriIndex); IndexReader ir = IndexReader.open(aprioriIndex, false);
ir.deleteDocument(3); ir.deleteDocument(3);
ir.deleteDocument(8); ir.deleteDocument(8);
ir.close(); ir.close();
@ -232,7 +232,7 @@ public class TestIndicesEquals extends TestCase {
protected void testEquals(Directory aprioriIndex, InstantiatedIndex testIndex) throws Exception { protected void testEquals(Directory aprioriIndex, InstantiatedIndex testIndex) throws Exception {
IndexReader aprioriReader = IndexReader.open(aprioriIndex); IndexReader aprioriReader = IndexReader.open(aprioriIndex, false);
IndexReader testReader = testIndex.indexReaderFactory(); IndexReader testReader = testIndex.indexReaderFactory();
assertEquals(aprioriReader.numDocs(), testReader.numDocs()); assertEquals(aprioriReader.numDocs(), testReader.numDocs());

View File

@ -42,7 +42,7 @@ public class TestSerialization extends TestCase {
iw.addDocument(doc); iw.addDocument(doc);
iw.close(); iw.close();
IndexReader ir = IndexReader.open(dir); IndexReader ir = IndexReader.open(dir, false);
InstantiatedIndex ii = new InstantiatedIndex(ir); InstantiatedIndex ii = new InstantiatedIndex(ir);
ir.close(); ir.close();

View File

@ -50,7 +50,7 @@ public class TestUnoptimizedReaderOnConstructor extends TestCase {
addDocument(iw, "All work and no play makes wendy a dull girl"); addDocument(iw, "All work and no play makes wendy a dull girl");
iw.close(); iw.close();
IndexReader unoptimizedReader = IndexReader.open(dir); IndexReader unoptimizedReader = IndexReader.open(dir, false);
unoptimizedReader.deleteDocument(2); unoptimizedReader.deleteDocument(2);
InstantiatedIndex ii; InstantiatedIndex ii;

View File

@ -31,6 +31,7 @@ import java.util.Map;
import java.util.Set; import java.util.Set;
import java.util.TreeMap; import java.util.TreeMap;
import java.util.Map.Entry; import java.util.Map.Entry;
import java.io.File;
import jline.ConsoleReader; import jline.ConsoleReader;
@ -54,6 +55,7 @@ import org.apache.lucene.search.Hits;
import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query; import org.apache.lucene.search.Query;
import org.apache.lucene.search.Searcher; import org.apache.lucene.search.Searcher;
import org.apache.lucene.store.FSDirectory;
/** /**
* Various methods that interact with Lucene and provide info about the * Various methods that interact with Lucene and provide info about the
@ -62,7 +64,7 @@ import org.apache.lucene.search.Searcher;
class LuceneMethods { class LuceneMethods {
private int numDocs; private int numDocs;
private String indexName; //directory of this index private FSDirectory indexName; //directory of this index
private java.util.Iterator fieldIterator; private java.util.Iterator fieldIterator;
private List fields; //Fields as a vector private List fields; //Fields as a vector
private List indexedFields; //Fields as a vector private List indexedFields; //Fields as a vector
@ -71,8 +73,8 @@ class LuceneMethods {
private Query query; //current query string private Query query; //current query string
private String analyzerClassFQN = null; // Analyzer class, if NULL, use default Analyzer private String analyzerClassFQN = null; // Analyzer class, if NULL, use default Analyzer
public LuceneMethods(String index) { public LuceneMethods(String index) throws IOException {
indexName = index; indexName = FSDirectory.open(new File(index));
message("Lucene CLI. Using directory '" + indexName + "'. Type 'help' for instructions."); message("Lucene CLI. Using directory '" + indexName + "'. Type 'help' for instructions.");
} }
@ -94,7 +96,7 @@ class LuceneMethods {
public void info() throws java.io.IOException { public void info() throws java.io.IOException {
IndexReader indexReader = IndexReader.open(indexName); IndexReader indexReader = IndexReader.open(indexName, true);
getFieldInfo(); getFieldInfo();
@ -103,7 +105,7 @@ class LuceneMethods {
message("All Fields:" + fields.toString()); message("All Fields:" + fields.toString());
message("Indexed Fields:" + indexedFields.toString()); message("Indexed Fields:" + indexedFields.toString());
if (IndexReader.isLocked(indexName)) { if (IndexWriter.isLocked(indexName)) {
message("Index is locked"); message("Index is locked");
} }
//IndexReader.getCurrentVersion(indexName); //IndexReader.getCurrentVersion(indexName);
@ -180,7 +182,7 @@ class LuceneMethods {
private Query explainQuery(String queryString) throws IOException, ParseException { private Query explainQuery(String queryString) throws IOException, ParseException {
searcher = new IndexSearcher(indexName); searcher = new IndexSearcher(indexName, true);
Analyzer analyzer = createAnalyzer(); Analyzer analyzer = createAnalyzer();
getFieldInfo(); getFieldInfo();
@ -201,7 +203,7 @@ class LuceneMethods {
*/ */
private Hits initSearch(String queryString) throws IOException, ParseException { private Hits initSearch(String queryString) throws IOException, ParseException {
searcher = new IndexSearcher(indexName); searcher = new IndexSearcher(indexName, true);
Analyzer analyzer = createAnalyzer(); Analyzer analyzer = createAnalyzer();
getFieldInfo(); getFieldInfo();
@ -229,7 +231,7 @@ class LuceneMethods {
} }
private void getFieldInfo() throws IOException { private void getFieldInfo() throws IOException {
IndexReader indexReader = IndexReader.open(indexName); IndexReader indexReader = IndexReader.open(indexName, true);
fields = new ArrayList(); fields = new ArrayList();
indexedFields = new ArrayList(); indexedFields = new ArrayList();
@ -320,7 +322,7 @@ class LuceneMethods {
*/ */
public void terms(String field) throws IOException { public void terms(String field) throws IOException {
TreeMap termMap = new TreeMap(); TreeMap termMap = new TreeMap();
IndexReader indexReader = IndexReader.open(indexName); IndexReader indexReader = IndexReader.open(indexName, true);
TermEnum terms = indexReader.terms(); TermEnum terms = indexReader.terms();
while (terms.next()) { while (terms.next()) {
Term term = terms.term(); Term term = terms.term();

View File

@ -715,7 +715,7 @@ public class MemoryIndex implements Serializable {
private Searcher searcher; // needed to find searcher.getSimilarity() private Searcher searcher; // needed to find searcher.getSimilarity()
private MemoryIndexReader() { private MemoryIndexReader() {
super(null); // avoid as much superclass baggage as possible super(); // avoid as much superclass baggage as possible
} }
// lucene >= 1.9 or lucene-1.4.3 with patch removing "final" in superclass // lucene >= 1.9 or lucene-1.4.3 with patch removing "final" in superclass

View File

@ -420,7 +420,7 @@ public class MemoryIndexTest extends BaseTokenStreamTestCase {
Searcher searcher = null; Searcher searcher = null;
try { try {
if (index instanceof Directory) if (index instanceof Directory)
searcher = new IndexSearcher((Directory)index); searcher = new IndexSearcher((Directory)index, true);
else else
searcher = ((MemoryIndex) index).createSearcher(); searcher = ((MemoryIndex) index).createSearcher();
@ -450,7 +450,7 @@ public class MemoryIndexTest extends BaseTokenStreamTestCase {
try { try {
Directory dir = (Directory) index; Directory dir = (Directory) index;
int size = 0; int size = 0;
String[] fileNames = dir.list(); String[] fileNames = dir.listAll();
for (int i=0; i < fileNames.length; i++) { for (int i=0; i < fileNames.length; i++) {
size += dir.fileLength(fileNames[i]); size += dir.fileLength(fileNames[i]);
} }

View File

@ -112,7 +112,7 @@ public class FieldNormModifier {
TermEnum termEnum = null; TermEnum termEnum = null;
TermDocs termDocs = null; TermDocs termDocs = null;
try { try {
reader = IndexReader.open(dir); reader = IndexReader.open(dir, true);
termCounts = new int[reader.maxDoc()]; termCounts = new int[reader.maxDoc()];
// if we are killing norms, get fake ones // if we are killing norms, get fake ones
if (sim == null) if (sim == null)
@ -142,7 +142,7 @@ public class FieldNormModifier {
} }
try { try {
reader = IndexReader.open(dir); reader = IndexReader.open(dir, false);
for (int d = 0; d < termCounts.length; d++) { for (int d = 0; d < termCounts.length; d++) {
if (! reader.isDeleted(d)) { if (! reader.isDeleted(d)) {
if (sim == null) if (sim == null)

View File

@ -19,8 +19,11 @@ package org.apache.lucene.misc;
import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.Term; import org.apache.lucene.index.Term;
import org.apache.lucene.index.TermEnum; import org.apache.lucene.index.TermEnum;
import org.apache.lucene.store.FSDirectory;
import org.apache.lucene.util.PriorityQueue; import org.apache.lucene.util.PriorityQueue;
import java.io.File;
/** /**
* <code>HighFreqTerms</code> class extracts terms and their frequencies out * <code>HighFreqTerms</code> class extracts terms and their frequencies out
* of an existing Lucene index. * of an existing Lucene index.
@ -34,11 +37,14 @@ public class HighFreqTerms {
public static void main(String[] args) throws Exception { public static void main(String[] args) throws Exception {
IndexReader reader = null; IndexReader reader = null;
FSDirectory dir = null;
String field = null; String field = null;
if (args.length == 1) { if (args.length == 1) {
reader = IndexReader.open(args[0]); dir = FSDirectory.open(new File(args[0]));
reader = IndexReader.open(dir, true);
} else if (args.length == 2) { } else if (args.length == 2) {
reader = IndexReader.open(args[0]); dir = FSDirectory.open(new File(args[0]));
reader = IndexReader.open(dir, true);
field = args[1]; field = args[1];
} else { } else {
usage(); usage();

View File

@ -35,7 +35,7 @@ public class IndexMergeTool {
System.err.println("Usage: IndexMergeTool <mergedIndex> <index1> <index2> [index3] ..."); System.err.println("Usage: IndexMergeTool <mergedIndex> <index1> <index2> [index3] ...");
System.exit(1); System.exit(1);
} }
File mergedIndex = new File(args[0]); FSDirectory mergedIndex = FSDirectory.open(new File(args[0]));
IndexWriter writer = new IndexWriter(mergedIndex, new SimpleAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED); IndexWriter writer = new IndexWriter(mergedIndex, new SimpleAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);

View File

@ -113,7 +113,7 @@ public class LengthNormModifier {
TermEnum termEnum = null; TermEnum termEnum = null;
TermDocs termDocs = null; TermDocs termDocs = null;
try { try {
reader = IndexReader.open(dir); reader = IndexReader.open(dir, false);
termCounts = new int[reader.maxDoc()]; termCounts = new int[reader.maxDoc()];
try { try {
termEnum = reader.terms(new Term(field)); termEnum = reader.terms(new Term(field));
@ -139,7 +139,7 @@ public class LengthNormModifier {
} }
try { try {
reader = IndexReader.open(dir); reader = IndexReader.open(dir, false);
for (int d = 0; d < termCounts.length; d++) { for (int d = 0; d < termCounts.length; d++) {
if (! reader.isDeleted(d)) { if (! reader.isDeleted(d)) {
byte norm = sim.encodeNorm(sim.lengthNorm(fieldName, termCounts[d])); byte norm = sim.encodeNorm(sim.lengthNorm(fieldName, termCounts[d]));

View File

@ -87,7 +87,7 @@ public class TestFieldNormModifier extends TestCase {
public void testFieldWithNoNorm() throws Exception { public void testFieldWithNoNorm() throws Exception {
IndexReader r = IndexReader.open(store); IndexReader r = IndexReader.open(store, false);
byte[] norms = r.norms("nonorm"); byte[] norms = r.norms("nonorm");
// sanity check, norms should all be 1 // sanity check, norms should all be 1
@ -110,7 +110,7 @@ public class TestFieldNormModifier extends TestCase {
} }
// nothing should have changed // nothing should have changed
r = IndexReader.open(store); r = IndexReader.open(store, false);
norms = r.norms("nonorm"); norms = r.norms("nonorm");
assertTrue("Whoops we have norms?", !r.hasNorms("nonorm")); assertTrue("Whoops we have norms?", !r.hasNorms("nonorm"));
@ -128,7 +128,7 @@ public class TestFieldNormModifier extends TestCase {
public void testGoodCases() throws Exception { public void testGoodCases() throws Exception {
IndexSearcher searcher = new IndexSearcher(store); IndexSearcher searcher = new IndexSearcher(store, true);
final float[] scores = new float[NUM_DOCS]; final float[] scores = new float[NUM_DOCS];
float lastScore = 0.0f; float lastScore = 0.0f;
@ -164,7 +164,7 @@ public class TestFieldNormModifier extends TestCase {
fnm.reSetNorms("field"); fnm.reSetNorms("field");
// new norm (with default similarity) should put longer docs first // new norm (with default similarity) should put longer docs first
searcher = new IndexSearcher(store); searcher = new IndexSearcher(store, true);
searcher.search(new TermQuery(new Term("field", "word")), new Collector() { searcher.search(new TermQuery(new Term("field", "word")), new Collector() {
private int docBase = 0; private int docBase = 0;
private Scorer scorer; private Scorer scorer;
@ -194,21 +194,21 @@ public class TestFieldNormModifier extends TestCase {
public void testNormKiller() throws IOException { public void testNormKiller() throws IOException {
IndexReader r = IndexReader.open(store); IndexReader r = IndexReader.open(store, false);
byte[] oldNorms = r.norms("untokfield"); byte[] oldNorms = r.norms("untokfield");
r.close(); r.close();
FieldNormModifier fnm = new FieldNormModifier(store, s); FieldNormModifier fnm = new FieldNormModifier(store, s);
fnm.reSetNorms("untokfield"); fnm.reSetNorms("untokfield");
r = IndexReader.open(store); r = IndexReader.open(store, false);
byte[] newNorms = r.norms("untokfield"); byte[] newNorms = r.norms("untokfield");
r.close(); r.close();
assertFalse(Arrays.equals(oldNorms, newNorms)); assertFalse(Arrays.equals(oldNorms, newNorms));
// verify that we still get documents in the same order as originally // verify that we still get documents in the same order as originally
IndexSearcher searcher = new IndexSearcher(store); IndexSearcher searcher = new IndexSearcher(store, true);
final float[] scores = new float[NUM_DOCS]; final float[] scores = new float[NUM_DOCS];
float lastScore = 0.0f; float lastScore = 0.0f;

View File

@ -65,7 +65,7 @@ public class TestTermVectorAccessor extends TestCase {
iw.close(); iw.close();
IndexReader ir = IndexReader.open(dir); IndexReader ir = IndexReader.open(dir, false);
TermVectorAccessor accessor = new TermVectorAccessor(); TermVectorAccessor accessor = new TermVectorAccessor();

View File

@ -64,7 +64,7 @@ public class ChainedFilterTest extends TestCase {
writer.close(); writer.close();
searcher = new IndexSearcher(directory); searcher = new IndexSearcher(directory, true);
// query for everything to make life easier // query for everything to make life easier
BooleanQuery bq = new BooleanQuery(); BooleanQuery bq = new BooleanQuery();
@ -222,7 +222,7 @@ public class ChainedFilterTest extends TestCase {
IndexWriter writer = new IndexWriter(dir, analyzer, true, MaxFieldLength.LIMITED); IndexWriter writer = new IndexWriter(dir, analyzer, true, MaxFieldLength.LIMITED);
writer.close(); writer.close();
Searcher searcher = new IndexSearcher(dir); Searcher searcher = new IndexSearcher(dir, true);
Query query = new TermQuery(new Term("none", "none")); Query query = new TermQuery(new Term("none", "none"));

View File

@ -93,7 +93,7 @@ public class TestLengthNormModifier extends TestCase {
public void testFieldWithNoNorm() throws Exception { public void testFieldWithNoNorm() throws Exception {
IndexReader r = IndexReader.open(store); IndexReader r = IndexReader.open(store, false);
byte[] norms = r.norms("nonorm"); byte[] norms = r.norms("nonorm");
// sanity check, norms should all be 1 // sanity check, norms should all be 1
@ -116,7 +116,7 @@ public class TestLengthNormModifier extends TestCase {
} }
// nothing should have changed // nothing should have changed
r = IndexReader.open(store); r = IndexReader.open(store, false);
norms = r.norms("nonorm"); norms = r.norms("nonorm");
assertTrue("Whoops we have norms?", !r.hasNorms("nonorm")); assertTrue("Whoops we have norms?", !r.hasNorms("nonorm"));
@ -140,7 +140,7 @@ public class TestLengthNormModifier extends TestCase {
float lastScore = 0.0f; float lastScore = 0.0f;
// default similarity should put docs with shorter length first // default similarity should put docs with shorter length first
searcher = new IndexSearcher(store); searcher = new IndexSearcher(store, false);
searcher.search(new TermQuery(new Term("field", "word")), new Collector() { searcher.search(new TermQuery(new Term("field", "word")), new Collector() {
private int docBase = 0; private int docBase = 0;
private Scorer scorer; private Scorer scorer;
@ -177,7 +177,7 @@ public class TestLengthNormModifier extends TestCase {
fnm.reSetNorms("field"); fnm.reSetNorms("field");
// new norm (with default similarity) should put longer docs first // new norm (with default similarity) should put longer docs first
searcher = new IndexSearcher(store); searcher = new IndexSearcher(store, false);
searcher.search(new TermQuery(new Term("field", "word")), new Collector() { searcher.search(new TermQuery(new Term("field", "word")), new Collector() {
private int docBase = 0; private int docBase = 0;
private Scorer scorer; private Scorer scorer;

View File

@ -122,7 +122,7 @@ public class TestComplexPhraseQuery extends TestCase {
w.addDocument(doc); w.addDocument(doc);
} }
w.close(); w.close();
searcher = new IndexSearcher(rd); searcher = new IndexSearcher(rd, true);
} }
protected void tearDown() throws Exception { protected void tearDown() throws Exception {

View File

@ -162,28 +162,6 @@ public class DuplicateFilter extends Filter
return bits; return bits;
} }
/**
* @param args
* @throws IOException
* @throws Exception
*/
public static void main(String[] args) throws Exception
{
IndexReader r=IndexReader.open("/indexes/personCentricAnon");
// IndexReader r=IndexReader.open("/indexes/enron");
long start=System.currentTimeMillis();
// DuplicateFilter df = new DuplicateFilter("threadId",KM_USE_FIRST_OCCURRENCE, PM_FAST_INVALIDATION);
// DuplicateFilter df = new DuplicateFilter("threadId",KM_USE_LAST_OCCURRENCE, PM_FAST_INVALIDATION);
DuplicateFilter df = new DuplicateFilter("vehicle.vrm",KM_USE_LAST_OCCURRENCE, PM_FAST_INVALIDATION);
// DuplicateFilter df = new DuplicateFilter("title",USE_LAST_OCCURRENCE);
// df.setProcessingMode(PM_SLOW_VALIDATION);
BitSet b = df.bits(r);
long end=System.currentTimeMillis()-start;
System.out.println(b.cardinality()+" in "+end+" ms ");
}
public String getFieldName() public String getFieldName()
{ {
return fieldName; return fieldName;

View File

@ -32,6 +32,7 @@ import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.standard.StandardAnalyzer; import org.apache.lucene.analysis.standard.StandardAnalyzer;
import org.apache.lucene.analysis.tokenattributes.TermAttribute; import org.apache.lucene.analysis.tokenattributes.TermAttribute;
import org.apache.lucene.document.Document; import org.apache.lucene.document.Document;
import org.apache.lucene.store.FSDirectory;
import java.util.Set; import java.util.Set;
import java.util.HashMap; import java.util.HashMap;
@ -720,7 +721,8 @@ public final class MoreLikeThis {
} }
PrintStream o = System.out; PrintStream o = System.out;
IndexReader r = IndexReader.open(indexName); FSDirectory dir = FSDirectory.open(new File(indexName));
IndexReader r = IndexReader.open(dir, true);
o.println("Open index " + indexName + " which has " + r.numDocs() + " docs"); o.println("Open index " + indexName + " which has " + r.numDocs() + " docs");
MoreLikeThis mlt = new MoreLikeThis(r); MoreLikeThis mlt = new MoreLikeThis(r);
@ -741,7 +743,7 @@ public final class MoreLikeThis {
o.println("q: " + query); o.println("q: " + query);
o.println(); o.println();
IndexSearcher searcher = new IndexSearcher(indexName); IndexSearcher searcher = new IndexSearcher(dir, true);
Hits hits = searcher.search(query); Hits hits = searcher.search(query);
int len = hits.length(); int len = hits.length();

View File

@ -54,7 +54,7 @@ public class BooleanFilterTest extends TestCase
addDoc(writer, "admin guest", "030", "20050101","N"); addDoc(writer, "admin guest", "030", "20050101","N");
writer.close(); writer.close();
reader=IndexReader.open(directory); reader=IndexReader.open(directory, true);
} }
private void addDoc(IndexWriter writer, String accessRights, String price, String date, String inStock) throws IOException private void addDoc(IndexWriter writer, String accessRights, String price, String date, String inStock) throws IOException

View File

@ -55,7 +55,7 @@ public class DuplicateFilterTest extends TestCase
addDoc(writer, "http://lucene.apache.org", "Oops. Lucene 2.1 out", "20050102"); addDoc(writer, "http://lucene.apache.org", "Oops. Lucene 2.1 out", "20050102");
writer.close(); writer.close();
reader=IndexReader.open(directory); reader=IndexReader.open(directory, true);
searcher =new IndexSearcher(reader); searcher =new IndexSearcher(reader);
} }

View File

@ -51,7 +51,7 @@ public class FuzzyLikeThisQueryTest extends TestCase
addDoc(writer, "johnathon smythe","6"); addDoc(writer, "johnathon smythe","6");
writer.close(); writer.close();
searcher=new IndexSearcher(directory); searcher=new IndexSearcher(directory, true);
} }
private void addDoc(IndexWriter writer, String name, String id) throws IOException private void addDoc(IndexWriter writer, String name, String id) throws IOException

View File

@ -64,7 +64,7 @@ public class TermsFilterTest extends TestCase
w.addDocument(doc); w.addDocument(doc);
} }
w.close(); w.close();
IndexReader reader = IndexReader.open(rd); IndexReader reader = IndexReader.open(rd, true);
TermsFilter tf=new TermsFilter(); TermsFilter tf=new TermsFilter();
tf.addTerm(new Term(fieldName,"19")); tf.addTerm(new Term(fieldName,"19"));

View File

@ -335,7 +335,7 @@ public class TestMultiFieldQPHelper extends LuceneTestCase {
mfqp.setAnalyzer(analyzer); mfqp.setAnalyzer(analyzer);
mfqp.setDefaultOperator(Operator.AND); mfqp.setDefaultOperator(Operator.AND);
Query q = mfqp.parse("the footest", null); Query q = mfqp.parse("the footest", null);
IndexSearcher is = new IndexSearcher(ramDir); IndexSearcher is = new IndexSearcher(ramDir, true);
ScoreDoc[] hits = is.search(q, null, 1000).scoreDocs; ScoreDoc[] hits = is.search(q, null, 1000).scoreDocs;
assertEquals(1, hits.length); assertEquals(1, hits.length);
is.close(); is.close();

View File

@ -333,7 +333,7 @@ public class TestMultiFieldQueryParserWrapper extends LuceneTestCase {
new String[] { "body" }, analyzer); new String[] { "body" }, analyzer);
mfqp.setDefaultOperator(QueryParserWrapper.Operator.AND); mfqp.setDefaultOperator(QueryParserWrapper.Operator.AND);
Query q = mfqp.parse("the footest"); Query q = mfqp.parse("the footest");
IndexSearcher is = new IndexSearcher(ramDir); IndexSearcher is = new IndexSearcher(ramDir, true);
ScoreDoc[] hits = is.search(q, null, 1000).scoreDocs; ScoreDoc[] hits = is.search(q, null, 1000).scoreDocs;
assertEquals(1, hits.length); assertEquals(1, hits.length);
is.close(); is.close();

View File

@ -598,7 +598,7 @@ public class TestQPHelper extends LocalizedTestCase {
Field.Index.UN_TOKENIZED)); Field.Index.UN_TOKENIZED));
iw.addDocument(doc); iw.addDocument(doc);
iw.close(); iw.close();
IndexSearcher is = new IndexSearcher(ramDir); IndexSearcher is = new IndexSearcher(ramDir, true);
StandardQueryParser qp = new StandardQueryParser(); StandardQueryParser qp = new StandardQueryParser();
qp.setAnalyzer(new WhitespaceAnalyzer()); qp.setAnalyzer(new WhitespaceAnalyzer());
@ -1020,7 +1020,7 @@ public class TestQPHelper extends LocalizedTestCase {
addDateDoc("a", 2005, 12, 2, 10, 15, 33, iw); addDateDoc("a", 2005, 12, 2, 10, 15, 33, iw);
addDateDoc("b", 2005, 12, 4, 22, 15, 00, iw); addDateDoc("b", 2005, 12, 4, 22, 15, 00, iw);
iw.close(); iw.close();
IndexSearcher is = new IndexSearcher(ramDir); IndexSearcher is = new IndexSearcher(ramDir, true);
assertHits(1, "[12/1/2005 TO 12/3/2005]", is); assertHits(1, "[12/1/2005 TO 12/3/2005]", is);
assertHits(2, "[12/1/2005 TO 12/4/2005]", is); assertHits(2, "[12/1/2005 TO 12/4/2005]", is);
assertHits(1, "[12/3/2005 TO 12/4/2005]", is); assertHits(1, "[12/3/2005 TO 12/4/2005]", is);

View File

@ -594,7 +594,7 @@ public class TestQueryParserWrapper extends LocalizedTestCase {
Field.Index.UN_TOKENIZED)); Field.Index.UN_TOKENIZED));
iw.addDocument(doc); iw.addDocument(doc);
iw.close(); iw.close();
IndexSearcher is = new IndexSearcher(ramDir); IndexSearcher is = new IndexSearcher(ramDir, true);
QueryParserWrapper qp = new QueryParserWrapper("content", QueryParserWrapper qp = new QueryParserWrapper("content",
new WhitespaceAnalyzer()); new WhitespaceAnalyzer());
@ -1000,7 +1000,7 @@ public class TestQueryParserWrapper extends LocalizedTestCase {
addDateDoc("a", 2005, 12, 2, 10, 15, 33, iw); addDateDoc("a", 2005, 12, 2, 10, 15, 33, iw);
addDateDoc("b", 2005, 12, 4, 22, 15, 00, iw); addDateDoc("b", 2005, 12, 4, 22, 15, 00, iw);
iw.close(); iw.close();
IndexSearcher is = new IndexSearcher(ramDir); IndexSearcher is = new IndexSearcher(ramDir, true);
assertHits(1, "[12/1/2005 TO 12/3/2005]", is); assertHits(1, "[12/1/2005 TO 12/3/2005]", is);
assertHits(2, "[12/1/2005 TO 12/4/2005]", is); assertHits(2, "[12/1/2005 TO 12/4/2005]", is);
assertHits(1, "[12/3/2005 TO 12/4/2005]", is); assertHits(1, "[12/3/2005 TO 12/4/2005]", is);

View File

@ -45,7 +45,7 @@ public class TestRegexQuery extends TestCase {
writer.addDocument(doc); writer.addDocument(doc);
writer.optimize(); writer.optimize();
writer.close(); writer.close();
searcher = new IndexSearcher(directory); searcher = new IndexSearcher(directory, true);
} catch (Exception e) { } catch (Exception e) {
fail(e.toString()); fail(e.toString());
} }

View File

@ -61,7 +61,7 @@ public class TestSpanRegexQuery extends TestCase {
writer.optimize(); writer.optimize();
writer.close(); writer.close();
IndexSearcher searcher = new IndexSearcher(directory); IndexSearcher searcher = new IndexSearcher(directory, true);
SpanRegexQuery srq = new SpanRegexQuery(new Term("field", "aut.*")); SpanRegexQuery srq = new SpanRegexQuery(new Term("field", "aut.*"));
SpanFirstQuery sfq = new SpanFirstQuery(srq, 1); SpanFirstQuery sfq = new SpanFirstQuery(srq, 1);
// SpanNearQuery query = new SpanNearQuery(new SpanQuery[] {srq, stq}, 6, // SpanNearQuery query = new SpanNearQuery(new SpanQuery[] {srq, stq}, 6,
@ -80,8 +80,8 @@ public class TestSpanRegexQuery extends TestCase {
// 1. Search the same store which works // 1. Search the same store which works
IndexSearcher[] arrSearcher = new IndexSearcher[2]; IndexSearcher[] arrSearcher = new IndexSearcher[2];
arrSearcher[0] = new IndexSearcher(indexStoreA); arrSearcher[0] = new IndexSearcher(indexStoreA, true);
arrSearcher[1] = new IndexSearcher(indexStoreB); arrSearcher[1] = new IndexSearcher(indexStoreB, true);
MultiSearcher searcher = new MultiSearcher(arrSearcher); MultiSearcher searcher = new MultiSearcher(arrSearcher);
Hits hits = searcher.search(query); Hits hits = searcher.search(query);
arrSearcher[0].close(); arrSearcher[0].close();

View File

@ -21,8 +21,10 @@ import org.apache.lucene.document.Document;
import org.apache.lucene.document.FieldSelector; import org.apache.lucene.document.FieldSelector;
import org.apache.lucene.index.Term; import org.apache.lucene.index.Term;
import org.apache.lucene.index.CorruptIndexException; import org.apache.lucene.index.CorruptIndexException;
import org.apache.lucene.store.FSDirectory;
import java.io.IOException; import java.io.IOException;
import java.io.File;
import java.rmi.Naming; import java.rmi.Naming;
import java.rmi.RMISecurityManager; import java.rmi.RMISecurityManager;
import java.rmi.RemoteException; import java.rmi.RemoteException;
@ -116,7 +118,7 @@ public class RemoteSearchable
System.setSecurityManager(new RMISecurityManager()); System.setSecurityManager(new RMISecurityManager());
} }
Searchable local = new IndexSearcher(indexName); Searchable local = new IndexSearcher(FSDirectory.open(new File(indexName)), true);
RemoteSearchable impl = new RemoteSearchable(local); RemoteSearchable impl = new RemoteSearchable(local);
// bind the implementation to "Searchable" // bind the implementation to "Searchable"

View File

@ -78,7 +78,7 @@ public class TestRemoteCachingWrapperFilter extends LuceneTestCase {
// publish it // publish it
port = _TestUtil.getRandomSocketPort(); port = _TestUtil.getRandomSocketPort();
LocateRegistry.createRegistry(port); LocateRegistry.createRegistry(port);
Searchable local = new IndexSearcher(indexStore); Searchable local = new IndexSearcher(indexStore, true);
RemoteSearchable impl = new RemoteSearchable(local); RemoteSearchable impl = new RemoteSearchable(local);
Naming.rebind("//localhost:" + port + "/Searchable", impl); Naming.rebind("//localhost:" + port + "/Searchable", impl);
} }

View File

@ -72,7 +72,7 @@ public class TestRemoteSearchable extends LuceneTestCase {
// publish it // publish it
port = _TestUtil.getRandomSocketPort(); port = _TestUtil.getRandomSocketPort();
LocateRegistry.createRegistry(port); LocateRegistry.createRegistry(port);
Searchable local = new IndexSearcher(indexStore); Searchable local = new IndexSearcher(indexStore, true);
RemoteSearchable impl = new RemoteSearchable(local); RemoteSearchable impl = new RemoteSearchable(local);
Naming.rebind("//localhost:" + port + "/Searchable", impl); Naming.rebind("//localhost:" + port + "/Searchable", impl);
} }

View File

@ -237,6 +237,7 @@ public class TestRemoteSort extends LuceneTestCase implements Serializable {
} }
// test custom search when remote // test custom search when remote
/* rewrite with new API
public void testRemoteCustomSort() throws Exception { public void testRemoteCustomSort() throws Exception {
Searchable searcher = getRemote(); Searchable searcher = getRemote();
MultiSearcher multi = new MultiSearcher (new Searchable[] { searcher }); MultiSearcher multi = new MultiSearcher (new Searchable[] { searcher });
@ -256,7 +257,7 @@ public class TestRemoteSort extends LuceneTestCase implements Serializable {
assertSaneFieldCaches(getName() + " Comparator"); assertSaneFieldCaches(getName() + " Comparator");
FieldCache.DEFAULT.purgeAllCaches(); FieldCache.DEFAULT.purgeAllCaches();
} }*/
// test that the relevancy scores are the same even if // test that the relevancy scores are the same even if
// hits are sorted // hits are sorted

View File

@ -164,7 +164,7 @@ public class TestCartesian extends TestCase{
} }
public void testAntiM() throws IOException, InvalidGeoException { public void testAntiM() throws IOException, InvalidGeoException {
searcher = new IndexSearcher(directory); searcher = new IndexSearcher(directory, true);
final double miles = 2800.0; final double miles = 2800.0;
// Hawaii // Hawaii
@ -252,7 +252,7 @@ public class TestCartesian extends TestCase{
} }
public void testPoleFlipping() throws IOException, InvalidGeoException { public void testPoleFlipping() throws IOException, InvalidGeoException {
searcher = new IndexSearcher(directory); searcher = new IndexSearcher(directory, true);
final double miles = 3500.0; final double miles = 3500.0;
lat = 41.6032207; lat = 41.6032207;
@ -340,7 +340,7 @@ public class TestCartesian extends TestCase{
} }
public void testRange() throws IOException, InvalidGeoException { public void testRange() throws IOException, InvalidGeoException {
searcher = new IndexSearcher(directory); searcher = new IndexSearcher(directory, true);
final double[] milesToTest = new double[] {6.0, 0.5, 0.001, 0.0}; final double[] milesToTest = new double[] {6.0, 0.5, 0.001, 0.0};
final int[] expected = new int[] {7, 1, 0, 0}; final int[] expected = new int[] {7, 1, 0, 0};
@ -431,7 +431,7 @@ public class TestCartesian extends TestCase{
public void testGeoHashRange() throws IOException, InvalidGeoException { public void testGeoHashRange() throws IOException, InvalidGeoException {
searcher = new IndexSearcher(directory); searcher = new IndexSearcher(directory, true);
final double[] milesToTest = new double[] {6.0, 0.5, 0.001, 0.0}; final double[] milesToTest = new double[] {6.0, 0.5, 0.001, 0.0};
final int[] expected = new int[] {7, 1, 0, 0}; final int[] expected = new int[] {7, 1, 0, 0};

View File

@ -113,7 +113,7 @@ public class SpellChecker {
if (searcher != null) { if (searcher != null) {
searcher.close(); searcher.close();
} }
searcher = new IndexSearcher(this.spellIndex); searcher = new IndexSearcher(this.spellIndex, true);
} }
public void setStringDistance(StringDistance sd) { public void setStringDistance(StringDistance sd) {
@ -302,7 +302,7 @@ public class SpellChecker {
//close the old searcher //close the old searcher
searcher.close(); searcher.close();
searcher = new IndexSearcher(this.spellIndex); searcher = new IndexSearcher(this.spellIndex, true);
} }
/** /**
@ -350,7 +350,7 @@ public class SpellChecker {
// also re-open the spell index to see our own changes when the next suggestion // also re-open the spell index to see our own changes when the next suggestion
// is fetched: // is fetched:
searcher.close(); searcher.close();
searcher = new IndexSearcher(this.spellIndex); searcher = new IndexSearcher(this.spellIndex, true);
} }
/** /**

View File

@ -77,7 +77,7 @@ public class TestLuceneDictionary extends TestCase {
public void testFieldNonExistent() throws IOException { public void testFieldNonExistent() throws IOException {
try { try {
indexReader = IndexReader.open(store); indexReader = IndexReader.open(store, true);
ld = new LuceneDictionary(indexReader, "nonexistent_field"); ld = new LuceneDictionary(indexReader, "nonexistent_field");
it = ld.getWordsIterator(); it = ld.getWordsIterator();
@ -91,7 +91,7 @@ public class TestLuceneDictionary extends TestCase {
public void testFieldAaa() throws IOException { public void testFieldAaa() throws IOException {
try { try {
indexReader = IndexReader.open(store); indexReader = IndexReader.open(store, true);
ld = new LuceneDictionary(indexReader, "aaa"); ld = new LuceneDictionary(indexReader, "aaa");
it = ld.getWordsIterator(); it = ld.getWordsIterator();
@ -107,7 +107,7 @@ public class TestLuceneDictionary extends TestCase {
public void testFieldContents_1() throws IOException { public void testFieldContents_1() throws IOException {
try { try {
indexReader = IndexReader.open(store); indexReader = IndexReader.open(store, true);
ld = new LuceneDictionary(indexReader, "contents"); ld = new LuceneDictionary(indexReader, "contents");
it = ld.getWordsIterator(); it = ld.getWordsIterator();
@ -137,7 +137,7 @@ public class TestLuceneDictionary extends TestCase {
public void testFieldContents_2() throws IOException { public void testFieldContents_2() throws IOException {
try { try {
indexReader = IndexReader.open(store); indexReader = IndexReader.open(store, true);
ld = new LuceneDictionary(indexReader, "contents"); ld = new LuceneDictionary(indexReader, "contents");
it = ld.getWordsIterator(); it = ld.getWordsIterator();
@ -169,7 +169,7 @@ public class TestLuceneDictionary extends TestCase {
public void testFieldZzz() throws IOException { public void testFieldZzz() throws IOException {
try { try {
indexReader = IndexReader.open(store); indexReader = IndexReader.open(store, true);
ld = new LuceneDictionary(indexReader, "zzz"); ld = new LuceneDictionary(indexReader, "zzz");
it = ld.getWordsIterator(); it = ld.getWordsIterator();
@ -186,7 +186,7 @@ public class TestLuceneDictionary extends TestCase {
public void testSpellchecker() throws IOException { public void testSpellchecker() throws IOException {
SpellChecker sc = new SpellChecker(new RAMDirectory()); SpellChecker sc = new SpellChecker(new RAMDirectory());
indexReader = IndexReader.open(store); indexReader = IndexReader.open(store, true);
sc.indexDictionary(new LuceneDictionary(indexReader, "contents")); sc.indexDictionary(new LuceneDictionary(indexReader, "contents"));
String[] suggestions = sc.suggestSimilar("Tam", 1); String[] suggestions = sc.suggestSimilar("Tam", 1);
assertEquals(1, suggestions.length); assertEquals(1, suggestions.length);

View File

@ -63,7 +63,7 @@ public class TestSpellChecker extends TestCase {
public void testBuild() throws CorruptIndexException, IOException { public void testBuild() throws CorruptIndexException, IOException {
IndexReader r = IndexReader.open(userindex); IndexReader r = IndexReader.open(userindex, true);
spellChecker.clearIndex(); spellChecker.clearIndex();
@ -192,7 +192,7 @@ public class TestSpellChecker extends TestCase {
} }
private int numdoc() throws IOException { private int numdoc() throws IOException {
IndexReader rs = IndexReader.open(spellindex); IndexReader rs = IndexReader.open(spellindex, true);
int num = rs.numDocs(); int num = rs.numDocs();
assertTrue(num != 0); assertTrue(num != 0);
//System.out.println("num docs: " + num); //System.out.println("num docs: " + num);

View File

@ -98,7 +98,7 @@ public class BooleanQueryTst {
/* if (verbose) System.out.println("Lucene: " + query.toString()); */ /* if (verbose) System.out.println("Lucene: " + query.toString()); */
TestCollector tc = new TestCollector(); TestCollector tc = new TestCollector();
Searcher searcher = new IndexSearcher(dBase.getDb()); Searcher searcher = new IndexSearcher(dBase.getDb(), true);
try { try {
searcher.search(query, tc); searcher.search(query, tc);
} finally { } finally {

View File

@ -153,7 +153,7 @@ public class ListSearcher extends AbstractListModel {
this.searchString = searchString; this.searchString = searchString;
//make a new index searcher with the in memory (RAM) index. //make a new index searcher with the in memory (RAM) index.
IndexSearcher is = new IndexSearcher(directory); IndexSearcher is = new IndexSearcher(directory, true);
//make an array of fields - one for each column //make an array of fields - one for each column
String[] fields = {FIELD_NAME}; String[] fields = {FIELD_NAME};

View File

@ -231,7 +231,7 @@ public class TableSearcher extends AbstractTableModel {
this.searchString = searchString; this.searchString = searchString;
//make a new index searcher with the in memory (RAM) index. //make a new index searcher with the in memory (RAM) index.
IndexSearcher is = new IndexSearcher(directory); IndexSearcher is = new IndexSearcher(directory, true);
//make an array of fields - one for each column //make an array of fields - one for each column
String[] fields = new String[tableModel.getColumnCount()]; String[] fields = new String[tableModel.getColumnCount()];

View File

@ -70,7 +70,7 @@ public final class SynExpand {
} }
FSDirectory directory = FSDirectory.open(new File(args[0])); FSDirectory directory = FSDirectory.open(new File(args[0]));
IndexSearcher searcher = new IndexSearcher(directory); IndexSearcher searcher = new IndexSearcher(directory, true);
String query = args[1]; String query = args[1];
String field = "contents"; String field = "contents";

View File

@ -53,7 +53,7 @@ public class SynLookup {
} }
FSDirectory directory = FSDirectory.open(new File(args[0])); FSDirectory directory = FSDirectory.open(new File(args[0]));
IndexSearcher searcher = new IndexSearcher(directory); IndexSearcher searcher = new IndexSearcher(directory, true);
String word = args[1]; String word = args[1];
Hits hits = searcher.search( Hits hits = searcher.search(

View File

@ -35,6 +35,7 @@ import org.apache.lucene.analysis.standard.StandardAnalyzer;
import org.apache.lucene.document.Document; import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field; import org.apache.lucene.document.Field;
import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.store.FSDirectory;
/** /**
* Convert the prolog file wn_s.pl from the <a href="http://www.cogsci.princeton.edu/2.0/WNprolog-2.0.tar.gz">WordNet prolog download</a> * Convert the prolog file wn_s.pl from the <a href="http://www.cogsci.princeton.edu/2.0/WNprolog-2.0.tar.gz">WordNet prolog download</a>
@ -239,31 +240,36 @@ public class Syns2Index
{ {
int row = 0; int row = 0;
int mod = 1; int mod = 1;
FSDirectory dir = FSDirectory.open(new File(indexDir));
try {
// override the specific index if it already exists // override the specific index if it already exists
IndexWriter writer = new IndexWriter(indexDir, ana, true, IndexWriter.MaxFieldLength.LIMITED); IndexWriter writer = new IndexWriter(dir, ana, true, IndexWriter.MaxFieldLength.LIMITED);
writer.setUseCompoundFile(true); // why? writer.setUseCompoundFile(true); // why?
Iterator i1 = word2Nums.keySet().iterator(); Iterator i1 = word2Nums.keySet().iterator();
while (i1.hasNext()) // for each word while (i1.hasNext()) // for each word
{ {
String g = (String) i1.next(); String g = (String) i1.next();
Document doc = new Document(); Document doc = new Document();
int n = index(word2Nums, num2Words, g, doc); int n = index(word2Nums, num2Words, g, doc);
if (n > 0) if (n > 0)
{ {
doc.add( new Field( F_WORD, g, Field.Store.YES, Field.Index.NOT_ANALYZED)); doc.add( new Field( F_WORD, g, Field.Store.YES, Field.Index.NOT_ANALYZED));
if ((++row % mod) == 0) if ((++row % mod) == 0)
{ {
o.println("\trow=" + row + "/" + word2Nums.size() + " doc= " + doc); o.println("\trow=" + row + "/" + word2Nums.size() + " doc= " + doc);
mod *= 2; mod *= 2;
} }
writer.addDocument(doc); writer.addDocument(doc);
} // else degenerate } // else degenerate
}
o.println( "Optimizing..");
writer.optimize();
writer.close();
} finally {
dir.close();
} }
o.println( "Optimizing..");
writer.optimize();
writer.close();
} }
/** /**

View File

@ -145,6 +145,6 @@ public class FormBasedXmlQueryDemo extends HttpServlet {
writer.close(); writer.close();
//open searcher //open searcher
searcher=new IndexSearcher(rd); searcher=new IndexSearcher(rd, true);
} }
} }

View File

@ -73,7 +73,7 @@ public class TestParser extends TestCase {
d.close(); d.close();
writer.close(); writer.close();
} }
reader=IndexReader.open(dir); reader=IndexReader.open(dir, true);
searcher=new IndexSearcher(reader); searcher=new IndexSearcher(reader);
} }

View File

@ -149,7 +149,7 @@ public class TestQueryTemplateManager extends TestCase {
} }
w.optimize(); w.optimize();
w.close(); w.close();
searcher=new IndexSearcher(dir); searcher=new IndexSearcher(dir, true);
//initialize the parser //initialize the parser
builder=new CorePlusExtensionsParser("artist", analyzer); builder=new CorePlusExtensionsParser("artist", analyzer);