mirror of
https://github.com/apache/lucene.git
synced 2025-02-06 10:08:58 +00:00
LUCENE-1944: Cleanup contrib to not use deprecated APIs
git-svn-id: https://svn.apache.org/repos/asf/lucene/java/trunk@821444 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
b75e96f2f4
commit
236baf9fcb
@ -62,7 +62,7 @@ public class QueryAutoStopWordAnalyzerTest extends BaseTokenStreamTestCase {
|
||||
writer.addDocument(doc);
|
||||
}
|
||||
writer.close();
|
||||
reader = IndexReader.open(dir);
|
||||
reader = IndexReader.open(dir, true);
|
||||
protectedAnalyzer = new QueryAutoStopWordAnalyzer(appAnalyzer);
|
||||
}
|
||||
|
||||
|
@ -79,7 +79,7 @@ public class ShingleAnalyzerWrapperTest extends BaseTokenStreamTestCase {
|
||||
|
||||
writer.close();
|
||||
|
||||
return new IndexSearcher(dir);
|
||||
return new IndexSearcher(dir, true);
|
||||
}
|
||||
|
||||
protected Hits queryParsingTest(Analyzer analyzer, String qs) throws Exception {
|
||||
|
@ -27,6 +27,7 @@ import org.apache.lucene.document.Field;
|
||||
import org.apache.lucene.document.DateTools;
|
||||
import org.apache.lucene.index.IndexWriter;
|
||||
import org.apache.lucene.index.Term;
|
||||
import org.apache.lucene.store.FSDirectory;
|
||||
import org.apache.lucene.search.Hits;
|
||||
import org.apache.lucene.search.IndexSearcher;
|
||||
import org.apache.lucene.search.Searcher;
|
||||
@ -256,133 +257,138 @@ public class IndexTask extends Task {
|
||||
create = true;
|
||||
}
|
||||
|
||||
Searcher searcher = null;
|
||||
boolean checkLastModified = false;
|
||||
if (!create) {
|
||||
try {
|
||||
searcher = new IndexSearcher(indexDir.getAbsolutePath());
|
||||
checkLastModified = true;
|
||||
} catch (IOException ioe) {
|
||||
log("IOException: " + ioe.getMessage());
|
||||
// Empty - ignore, which indicates to index all
|
||||
// documents
|
||||
}
|
||||
}
|
||||
|
||||
log("checkLastModified = " + checkLastModified, Project.MSG_VERBOSE);
|
||||
|
||||
IndexWriter writer =
|
||||
new IndexWriter(indexDir, analyzer, create, IndexWriter.MaxFieldLength.LIMITED);
|
||||
|
||||
writer.setUseCompoundFile(useCompoundIndex);
|
||||
int totalFiles = 0;
|
||||
int totalIndexed = 0;
|
||||
int totalIgnored = 0;
|
||||
FSDirectory dir = FSDirectory.open(indexDir);
|
||||
try {
|
||||
writer.setMergeFactor(mergeFactor);
|
||||
|
||||
for (int i = 0; i < rcs.size(); i++) {
|
||||
ResourceCollection rc = (ResourceCollection) rcs.elementAt(i);
|
||||
if (rc.isFilesystemOnly()) {
|
||||
Iterator resources = rc.iterator();
|
||||
while (resources.hasNext()) {
|
||||
Resource r = (Resource) resources.next();
|
||||
if (!r.isExists() || !(r instanceof FileResource)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
totalFiles++;
|
||||
|
||||
File file = ((FileResource) r).getFile();
|
||||
|
||||
if (!file.exists() || !file.canRead()) {
|
||||
throw new BuildException("File \"" +
|
||||
file.getAbsolutePath()
|
||||
+ "\" does not exist or is not readable.");
|
||||
}
|
||||
|
||||
boolean indexIt = true;
|
||||
|
||||
if (checkLastModified) {
|
||||
Term pathTerm =
|
||||
new Term("path", file.getPath());
|
||||
TermQuery query =
|
||||
new TermQuery(pathTerm);
|
||||
Hits hits = searcher.search(query);
|
||||
|
||||
// if document is found, compare the
|
||||
// indexed last modified time with the
|
||||
// current file
|
||||
// - don't index if up to date
|
||||
if (hits.length() > 0) {
|
||||
Document doc = hits.doc(0);
|
||||
String indexModified =
|
||||
doc.get("modified").trim();
|
||||
if (indexModified != null) {
|
||||
long lastModified = 0;
|
||||
try {
|
||||
lastModified = DateTools.stringToTime(indexModified);
|
||||
} catch (ParseException e) {
|
||||
// if modified time is not parsable, skip
|
||||
}
|
||||
if (lastModified == file.lastModified()) {
|
||||
// TODO: remove existing document
|
||||
indexIt = false;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (indexIt) {
|
||||
try {
|
||||
log("Indexing " + file.getPath(),
|
||||
Project.MSG_VERBOSE);
|
||||
Document doc =
|
||||
handler.getDocument(file);
|
||||
|
||||
if (doc == null) {
|
||||
totalIgnored++;
|
||||
} else {
|
||||
// Add the path of the file as a field named "path". Use a Keyword field, so
|
||||
// that the index stores the path, and so that the path is searchable
|
||||
doc.add(new Field("path", file.getPath(), Field.Store.YES, Field.Index.NOT_ANALYZED));
|
||||
|
||||
// Add the last modified date of the file a field named "modified". Use a
|
||||
// Keyword field, so that it's searchable, but so that no attempt is made
|
||||
// to tokenize the field into words.
|
||||
doc.add(new Field("modified", DateTools.timeToString(file.lastModified(), DateTools.Resolution.MILLISECOND), Field.Store.YES, Field.Index.NOT_ANALYZED));
|
||||
|
||||
writer.addDocument(doc);
|
||||
totalIndexed++;
|
||||
}
|
||||
} catch (DocumentHandlerException e) {
|
||||
throw new BuildException(e);
|
||||
}
|
||||
}
|
||||
}
|
||||
// for j
|
||||
Searcher searcher = null;
|
||||
boolean checkLastModified = false;
|
||||
if (!create) {
|
||||
try {
|
||||
searcher = new IndexSearcher(dir, true);
|
||||
checkLastModified = true;
|
||||
} catch (IOException ioe) {
|
||||
log("IOException: " + ioe.getMessage());
|
||||
// Empty - ignore, which indicates to index all
|
||||
// documents
|
||||
}
|
||||
// if (fs != null)
|
||||
}
|
||||
// for i
|
||||
|
||||
writer.optimize();
|
||||
}
|
||||
//try
|
||||
finally {
|
||||
// always make sure everything gets closed,
|
||||
// no matter how we exit.
|
||||
writer.close();
|
||||
if (searcher != null) {
|
||||
searcher.close();
|
||||
log("checkLastModified = " + checkLastModified, Project.MSG_VERBOSE);
|
||||
|
||||
IndexWriter writer =
|
||||
new IndexWriter(dir, analyzer, create, IndexWriter.MaxFieldLength.LIMITED);
|
||||
|
||||
writer.setUseCompoundFile(useCompoundIndex);
|
||||
int totalFiles = 0;
|
||||
int totalIndexed = 0;
|
||||
int totalIgnored = 0;
|
||||
try {
|
||||
writer.setMergeFactor(mergeFactor);
|
||||
|
||||
for (int i = 0; i < rcs.size(); i++) {
|
||||
ResourceCollection rc = (ResourceCollection) rcs.elementAt(i);
|
||||
if (rc.isFilesystemOnly()) {
|
||||
Iterator resources = rc.iterator();
|
||||
while (resources.hasNext()) {
|
||||
Resource r = (Resource) resources.next();
|
||||
if (!r.isExists() || !(r instanceof FileResource)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
totalFiles++;
|
||||
|
||||
File file = ((FileResource) r).getFile();
|
||||
|
||||
if (!file.exists() || !file.canRead()) {
|
||||
throw new BuildException("File \"" +
|
||||
file.getAbsolutePath()
|
||||
+ "\" does not exist or is not readable.");
|
||||
}
|
||||
|
||||
boolean indexIt = true;
|
||||
|
||||
if (checkLastModified) {
|
||||
Term pathTerm =
|
||||
new Term("path", file.getPath());
|
||||
TermQuery query =
|
||||
new TermQuery(pathTerm);
|
||||
Hits hits = searcher.search(query);
|
||||
|
||||
// if document is found, compare the
|
||||
// indexed last modified time with the
|
||||
// current file
|
||||
// - don't index if up to date
|
||||
if (hits.length() > 0) {
|
||||
Document doc = hits.doc(0);
|
||||
String indexModified =
|
||||
doc.get("modified").trim();
|
||||
if (indexModified != null) {
|
||||
long lastModified = 0;
|
||||
try {
|
||||
lastModified = DateTools.stringToTime(indexModified);
|
||||
} catch (ParseException e) {
|
||||
// if modified time is not parsable, skip
|
||||
}
|
||||
if (lastModified == file.lastModified()) {
|
||||
// TODO: remove existing document
|
||||
indexIt = false;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (indexIt) {
|
||||
try {
|
||||
log("Indexing " + file.getPath(),
|
||||
Project.MSG_VERBOSE);
|
||||
Document doc =
|
||||
handler.getDocument(file);
|
||||
|
||||
if (doc == null) {
|
||||
totalIgnored++;
|
||||
} else {
|
||||
// Add the path of the file as a field named "path". Use a Keyword field, so
|
||||
// that the index stores the path, and so that the path is searchable
|
||||
doc.add(new Field("path", file.getPath(), Field.Store.YES, Field.Index.NOT_ANALYZED));
|
||||
|
||||
// Add the last modified date of the file a field named "modified". Use a
|
||||
// Keyword field, so that it's searchable, but so that no attempt is made
|
||||
// to tokenize the field into words.
|
||||
doc.add(new Field("modified", DateTools.timeToString(file.lastModified(), DateTools.Resolution.MILLISECOND), Field.Store.YES, Field.Index.NOT_ANALYZED));
|
||||
|
||||
writer.addDocument(doc);
|
||||
totalIndexed++;
|
||||
}
|
||||
} catch (DocumentHandlerException e) {
|
||||
throw new BuildException(e);
|
||||
}
|
||||
}
|
||||
}
|
||||
// for j
|
||||
}
|
||||
// if (fs != null)
|
||||
}
|
||||
// for i
|
||||
|
||||
writer.optimize();
|
||||
}
|
||||
//try
|
||||
finally {
|
||||
// always make sure everything gets closed,
|
||||
// no matter how we exit.
|
||||
writer.close();
|
||||
if (searcher != null) {
|
||||
searcher.close();
|
||||
}
|
||||
}
|
||||
|
||||
Date end = new Date();
|
||||
|
||||
log(totalIndexed + " out of " + totalFiles + " indexed (" +
|
||||
totalIgnored + " ignored) in " + (end.getTime() - start.getTime()) +
|
||||
" milliseconds");
|
||||
} finally {
|
||||
dir.close();
|
||||
}
|
||||
|
||||
Date end = new Date();
|
||||
|
||||
log(totalIndexed + " out of " + totalFiles + " indexed (" +
|
||||
totalIgnored + " ignored) in " + (end.getTime() - start.getTime()) +
|
||||
" milliseconds");
|
||||
}
|
||||
|
||||
public static class HandlerConfig implements DynamicConfigurator {
|
||||
|
@ -30,6 +30,7 @@ import org.apache.lucene.search.Hits;
|
||||
import org.apache.lucene.search.IndexSearcher;
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.apache.lucene.search.Searcher;
|
||||
import org.apache.lucene.store.FSDirectory;
|
||||
|
||||
import org.apache.tools.ant.Project;
|
||||
import org.apache.tools.ant.types.FileSet;
|
||||
@ -43,10 +44,11 @@ public class IndexTaskTest extends TestCase {
|
||||
"org.apache.lucene.ant.FileExtensionDocumentHandler";
|
||||
|
||||
private String docsDir = System.getProperty("docs.dir");
|
||||
private String indexDir = System.getProperty("index.dir");
|
||||
private File indexDir = new File(System.getProperty("index.dir"));
|
||||
|
||||
private Searcher searcher;
|
||||
private Analyzer analyzer;
|
||||
private FSDirectory dir;
|
||||
|
||||
|
||||
/**
|
||||
@ -64,11 +66,12 @@ public class IndexTaskTest extends TestCase {
|
||||
task.addFileset(fs);
|
||||
task.setOverwrite(true);
|
||||
task.setDocumentHandler(docHandler);
|
||||
task.setIndex(new File(indexDir));
|
||||
task.setIndex(indexDir);
|
||||
task.setProject(project);
|
||||
task.execute();
|
||||
|
||||
searcher = new IndexSearcher(indexDir);
|
||||
dir = FSDirectory.open(indexDir);
|
||||
searcher = new IndexSearcher(dir, true);
|
||||
analyzer = new StopAnalyzer();
|
||||
}
|
||||
|
||||
@ -87,6 +90,7 @@ public class IndexTaskTest extends TestCase {
|
||||
*/
|
||||
public void tearDown() throws IOException {
|
||||
searcher.close();
|
||||
dir.close();
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -43,7 +43,7 @@ public class PrintReaderTask extends PerfTask {
|
||||
Config config = getRunData().getConfig();
|
||||
IndexReader r = null;
|
||||
if (userData == null)
|
||||
r = IndexReader.open(dir);
|
||||
r = IndexReader.open(dir, true);
|
||||
else
|
||||
r = OpenReaderTask.openCommitPoint(userData, dir, config, true);
|
||||
System.out.println("--> numDocs:"+r.numDocs()+" dels:"+r.numDeletedDocs());
|
||||
|
@ -67,7 +67,7 @@ public abstract class ReadTask extends PerfTask {
|
||||
IndexReader ir = getRunData().getIndexReader();
|
||||
if (ir == null) {
|
||||
Directory dir = getRunData().getDirectory();
|
||||
ir = IndexReader.open(dir);
|
||||
ir = IndexReader.open(dir, true);
|
||||
closeReader = true;
|
||||
//res++; //this is confusing, comment it out
|
||||
}
|
||||
|
@ -57,16 +57,14 @@ public class Algorithm {
|
||||
currSequence.setDepth(0);
|
||||
String taskPackage = PerfTask.class.getPackage().getName() + ".";
|
||||
|
||||
Class paramClass[] = {PerfRunData.class};
|
||||
PerfRunData paramObj[] = {runData};
|
||||
|
||||
while (stok.nextToken() != StreamTokenizer.TT_EOF) {
|
||||
switch(stok.ttype) {
|
||||
|
||||
case StreamTokenizer.TT_WORD:
|
||||
String s = stok.sval;
|
||||
Constructor cnstr = Class.forName(taskPackage+s+"Task").getConstructor(paramClass);
|
||||
PerfTask task = (PerfTask) cnstr.newInstance(paramObj);
|
||||
Constructor<? extends PerfTask> cnstr = Class.forName(taskPackage+s+"Task")
|
||||
.asSubclass(PerfTask.class).getConstructor(PerfRunData.class);
|
||||
PerfTask task = cnstr.newInstance(runData);
|
||||
task.setDisableCounting(isDisableCountNextTask);
|
||||
isDisableCountNextTask = false;
|
||||
currSequence.addTask(task);
|
||||
|
@ -24,6 +24,7 @@ import org.apache.lucene.benchmark.quality.utils.SubmissionReport;
|
||||
import org.apache.lucene.benchmark.quality.*;
|
||||
import org.apache.lucene.search.IndexSearcher;
|
||||
import org.apache.lucene.search.Searcher;
|
||||
import org.apache.lucene.store.FSDirectory;
|
||||
|
||||
import java.io.BufferedReader;
|
||||
import java.io.File;
|
||||
@ -41,7 +42,8 @@ public class QueryDriver {
|
||||
|
||||
File topicsFile = new File(args[0]);
|
||||
File qrelsFile = new File(args[1]);
|
||||
Searcher searcher = new IndexSearcher(args[3]);
|
||||
FSDirectory dir = FSDirectory.open(new File(args[3]));
|
||||
Searcher searcher = new IndexSearcher(dir, true);
|
||||
|
||||
int maxResults = 1000;
|
||||
String docNameField = "docname";
|
||||
|
@ -85,7 +85,7 @@ public class QualityQueriesFinder {
|
||||
|
||||
private String [] bestTerms(String field,int numTerms) throws IOException {
|
||||
PriorityQueue pq = new TermsDfQueue(numTerms);
|
||||
IndexReader ir = IndexReader.open(dir);
|
||||
IndexReader ir = IndexReader.open(dir, true);
|
||||
try {
|
||||
int threshold = ir.maxDoc() / 10; // ignore words too common.
|
||||
TermEnum terms = ir.terms(new Term(field,""));
|
||||
|
@ -94,7 +94,7 @@ public class TestPerfTasksLogic extends TestCase {
|
||||
// now we should be able to open the index for write.
|
||||
IndexWriter iw = new IndexWriter(benchmark.getRunData().getDirectory(),null,false, IndexWriter.MaxFieldLength.LIMITED);
|
||||
iw.close();
|
||||
IndexReader ir = IndexReader.open(benchmark.getRunData().getDirectory());
|
||||
IndexReader ir = IndexReader.open(benchmark.getRunData().getDirectory(), true);
|
||||
assertEquals("1000 docs were added to the index, this is what we expect to find!",1000,ir.numDocs());
|
||||
ir.close();
|
||||
}
|
||||
@ -153,7 +153,7 @@ public class TestPerfTasksLogic extends TestCase {
|
||||
// now we should be able to open the index for write.
|
||||
IndexWriter iw = new IndexWriter(benchmark.getRunData().getDirectory(),null,false, IndexWriter.MaxFieldLength.LIMITED);
|
||||
iw.close();
|
||||
IndexReader ir = IndexReader.open(benchmark.getRunData().getDirectory());
|
||||
IndexReader ir = IndexReader.open(benchmark.getRunData().getDirectory(), true);
|
||||
assertEquals("1000 docs were added to the index, this is what we expect to find!",1000,ir.numDocs());
|
||||
ir.close();
|
||||
}
|
||||
@ -191,7 +191,7 @@ public class TestPerfTasksLogic extends TestCase {
|
||||
// now we should be able to open the index for write.
|
||||
IndexWriter iw = new IndexWriter(benchmark.getRunData().getDirectory(),null,false,IndexWriter.MaxFieldLength.UNLIMITED);
|
||||
iw.close();
|
||||
IndexReader ir = IndexReader.open(benchmark.getRunData().getDirectory());
|
||||
IndexReader ir = IndexReader.open(benchmark.getRunData().getDirectory(), true);
|
||||
assertEquals("1000 docs were added to the index, this is what we expect to find!",1000,ir.numDocs());
|
||||
ir.close();
|
||||
}
|
||||
@ -263,7 +263,7 @@ public class TestPerfTasksLogic extends TestCase {
|
||||
// now we should be able to open the index for write.
|
||||
IndexWriter iw = new IndexWriter(benchmark.getRunData().getDirectory(),null,false,IndexWriter.MaxFieldLength.UNLIMITED);
|
||||
iw.close();
|
||||
IndexReader ir = IndexReader.open(benchmark.getRunData().getDirectory());
|
||||
IndexReader ir = IndexReader.open(benchmark.getRunData().getDirectory(), true);
|
||||
assertEquals("1 docs were added to the index, this is what we expect to find!",1,ir.numDocs());
|
||||
ir.close();
|
||||
}
|
||||
@ -292,7 +292,7 @@ public class TestPerfTasksLogic extends TestCase {
|
||||
Benchmark benchmark = execBenchmark(algLines);
|
||||
|
||||
// 3. test number of docs in the index
|
||||
IndexReader ir = IndexReader.open(benchmark.getRunData().getDirectory());
|
||||
IndexReader ir = IndexReader.open(benchmark.getRunData().getDirectory(), true);
|
||||
int ndocsExpected = 20; // Reuters20ContentSource exhausts after 20 docs.
|
||||
assertEquals("wrong number of docs in the index!", ndocsExpected, ir.numDocs());
|
||||
ir.close();
|
||||
@ -354,7 +354,7 @@ public class TestPerfTasksLogic extends TestCase {
|
||||
IndexWriter iw = new IndexWriter(benchmark.getRunData().getDirectory(),null,false,IndexWriter.MaxFieldLength.UNLIMITED);
|
||||
iw.close();
|
||||
|
||||
IndexReader ir = IndexReader.open(benchmark.getRunData().getDirectory());
|
||||
IndexReader ir = IndexReader.open(benchmark.getRunData().getDirectory(), true);
|
||||
assertEquals(numLines + " lines were created but " + ir.numDocs() + " docs are in the index", numLines, ir.numDocs());
|
||||
ir.close();
|
||||
|
||||
@ -398,7 +398,7 @@ public class TestPerfTasksLogic extends TestCase {
|
||||
}
|
||||
|
||||
// Separately count how many tokens are actually in the index:
|
||||
IndexReader reader = IndexReader.open(benchmark.getRunData().getDirectory());
|
||||
IndexReader reader = IndexReader.open(benchmark.getRunData().getDirectory(), true);
|
||||
assertEquals(NUM_DOCS, reader.numDocs());
|
||||
|
||||
TermEnum terms = reader.terms();
|
||||
@ -442,7 +442,7 @@ public class TestPerfTasksLogic extends TestCase {
|
||||
Benchmark benchmark = execBenchmark(algLines);
|
||||
|
||||
// 3. test number of docs in the index
|
||||
IndexReader ir = IndexReader.open(benchmark.getRunData().getDirectory());
|
||||
IndexReader ir = IndexReader.open(benchmark.getRunData().getDirectory(), true);
|
||||
int ndocsExpected = 2 * 20; // Reuters20ContentSource exhausts after 20 docs.
|
||||
assertEquals("wrong number of docs in the index!", ndocsExpected, ir.numDocs());
|
||||
ir.close();
|
||||
@ -524,7 +524,7 @@ public class TestPerfTasksLogic extends TestCase {
|
||||
Benchmark benchmark = execBenchmark(algLines);
|
||||
|
||||
// 3. test number of docs in the index
|
||||
IndexReader ir = IndexReader.open(benchmark.getRunData().getDirectory());
|
||||
IndexReader ir = IndexReader.open(benchmark.getRunData().getDirectory(), true);
|
||||
int ndocsExpected = 20; // Reuters20ContentSource exhausts after 20 docs.
|
||||
assertEquals("wrong number of docs in the index!", ndocsExpected, ir.numDocs());
|
||||
ir.close();
|
||||
@ -560,7 +560,7 @@ public class TestPerfTasksLogic extends TestCase {
|
||||
Benchmark benchmark = execBenchmark(algLines);
|
||||
|
||||
// 3. test number of docs in the index
|
||||
IndexReader ir = IndexReader.open(benchmark.getRunData().getDirectory());
|
||||
IndexReader ir = IndexReader.open(benchmark.getRunData().getDirectory(), true);
|
||||
int ndocsExpected = 20; // Reuters20ContentSource exhausts after 20 docs.
|
||||
assertEquals("wrong number of docs in the index!", ndocsExpected, ir.numDocs());
|
||||
ir.close();
|
||||
@ -604,7 +604,7 @@ public class TestPerfTasksLogic extends TestCase {
|
||||
benchmark.getRunData().getIndexWriter().close();
|
||||
|
||||
// 3. test number of docs in the index
|
||||
IndexReader ir = IndexReader.open(benchmark.getRunData().getDirectory());
|
||||
IndexReader ir = IndexReader.open(benchmark.getRunData().getDirectory(), true);
|
||||
int ndocsExpected = 20; // Reuters20ContentSource exhausts after 20 docs.
|
||||
assertEquals("wrong number of docs in the index!", ndocsExpected, ir.numDocs());
|
||||
ir.close();
|
||||
@ -649,7 +649,7 @@ public class TestPerfTasksLogic extends TestCase {
|
||||
benchmark.getRunData().getIndexWriter().close();
|
||||
|
||||
// 3. test number of docs in the index
|
||||
IndexReader ir = IndexReader.open(benchmark.getRunData().getDirectory());
|
||||
IndexReader ir = IndexReader.open(benchmark.getRunData().getDirectory(), true);
|
||||
int ndocsExpected = 20; // Reuters20ContentSource exhausts after 20 docs.
|
||||
assertEquals("wrong number of docs in the index!", ndocsExpected, ir.numDocs());
|
||||
ir.close();
|
||||
@ -692,7 +692,7 @@ public class TestPerfTasksLogic extends TestCase {
|
||||
assertFalse(writer.getUseCompoundFile());
|
||||
writer.close();
|
||||
Directory dir = benchmark.getRunData().getDirectory();
|
||||
IndexReader reader = IndexReader.open(dir);
|
||||
IndexReader reader = IndexReader.open(dir, true);
|
||||
TermFreqVector [] tfv = reader.getTermFreqVectors(0);
|
||||
assertNotNull(tfv);
|
||||
assertTrue(tfv.length > 0);
|
||||
@ -731,7 +731,7 @@ public class TestPerfTasksLogic extends TestCase {
|
||||
Benchmark benchmark = execBenchmark(algLines);
|
||||
|
||||
// 3. test number of docs in the index
|
||||
IndexReader ir = IndexReader.open(benchmark.getRunData().getDirectory());
|
||||
IndexReader ir = IndexReader.open(benchmark.getRunData().getDirectory(), true);
|
||||
int ndocsExpected = 20; // Reuters20ContentSource exhausts after 20 docs.
|
||||
assertEquals("wrong number of docs in the index!", ndocsExpected, ir.numDocs());
|
||||
ir.close();
|
||||
|
@ -82,7 +82,7 @@ public class TestQualityRun extends TestCase {
|
||||
// validate topics & judgments match each other
|
||||
judge.validateData(qqs, logger);
|
||||
|
||||
IndexSearcher searcher = new IndexSearcher(FSDirectory.open(new File(workDir,"index")));
|
||||
IndexSearcher searcher = new IndexSearcher(FSDirectory.open(new File(workDir,"index")), true);
|
||||
|
||||
QualityQueryParser qqParser = new SimpleQQParser("title","body");
|
||||
QualityBenchmark qrun = new QualityBenchmark(qqs, qqParser, searcher, docNameField);
|
||||
|
@ -82,7 +82,7 @@ public class CollationTestBase extends TestCase {
|
||||
Field.Store.YES, Field.Index.ANALYZED));
|
||||
writer.addDocument(doc);
|
||||
writer.close();
|
||||
IndexSearcher is = new IndexSearcher(ramDir);
|
||||
IndexSearcher is = new IndexSearcher(ramDir, true);
|
||||
|
||||
AnalyzingQueryParser aqp = new AnalyzingQueryParser("content", analyzer);
|
||||
aqp.setLowercaseExpandedTerms(false);
|
||||
@ -127,7 +127,7 @@ public class CollationTestBase extends TestCase {
|
||||
Field.Store.YES, Field.Index.NOT_ANALYZED));
|
||||
writer.addDocument(doc);
|
||||
writer.close();
|
||||
IndexSearcher searcher = new IndexSearcher(ramDir);
|
||||
IndexSearcher searcher = new IndexSearcher(ramDir, true);
|
||||
Query query = new TermQuery(new Term("body","body"));
|
||||
|
||||
// Unicode order would include U+0633 in [ U+062F - U+0698 ], but Farsi
|
||||
@ -162,7 +162,7 @@ public class CollationTestBase extends TestCase {
|
||||
Field.Store.YES, Field.Index.ANALYZED));
|
||||
writer.addDocument(doc);
|
||||
writer.close();
|
||||
IndexSearcher searcher = new IndexSearcher(ramDir);
|
||||
IndexSearcher searcher = new IndexSearcher(ramDir, true);
|
||||
|
||||
Query query = new TermRangeQuery("content", firstBeg, firstEnd, true, true);
|
||||
ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs;
|
||||
@ -189,7 +189,7 @@ public class CollationTestBase extends TestCase {
|
||||
writer.addDocument(doc);
|
||||
writer.close();
|
||||
|
||||
IndexReader reader = IndexReader.open(farsiIndex);
|
||||
IndexReader reader = IndexReader.open(farsiIndex, true);
|
||||
IndexSearcher search = new IndexSearcher(reader);
|
||||
|
||||
// Unicode order would include U+0633 in [ U+062F - U+0698 ], but Farsi
|
||||
@ -268,7 +268,7 @@ public class CollationTestBase extends TestCase {
|
||||
}
|
||||
writer.optimize();
|
||||
writer.close();
|
||||
Searcher searcher = new IndexSearcher(indexStore);
|
||||
Searcher searcher = new IndexSearcher(indexStore, true);
|
||||
|
||||
Sort sort = new Sort();
|
||||
Query queryX = new TermQuery(new Term ("contents", "x"));
|
||||
|
@ -140,7 +140,7 @@ public class JEDirectory extends Directory {
|
||||
throw new IOException("File does not exist: " + name);
|
||||
}
|
||||
|
||||
public String[] list() throws IOException {
|
||||
public String[] listAll() throws IOException {
|
||||
Cursor cursor = null;
|
||||
List list = new ArrayList();
|
||||
|
||||
@ -190,10 +190,6 @@ public class JEDirectory extends Directory {
|
||||
return new JELock();
|
||||
}
|
||||
|
||||
public void renameFile(String from, String to) throws IOException {
|
||||
new File(from).rename(this, to);
|
||||
}
|
||||
|
||||
public void touchFile(String name) throws IOException {
|
||||
File file = new File(name);
|
||||
long length = 0L;
|
||||
|
@ -158,7 +158,7 @@ public class DbDirectory extends Directory {
|
||||
throw new IOException("File does not exist: " + name);
|
||||
}
|
||||
|
||||
public String[] list()
|
||||
public String[] listAll()
|
||||
throws IOException
|
||||
{
|
||||
Dbc cursor = null;
|
||||
@ -216,12 +216,6 @@ public class DbDirectory extends Directory {
|
||||
return new DbLock();
|
||||
}
|
||||
|
||||
public void renameFile(String from, String to)
|
||||
throws IOException
|
||||
{
|
||||
new File(from).rename(this, to);
|
||||
}
|
||||
|
||||
public void touchFile(String name)
|
||||
throws IOException
|
||||
{
|
||||
|
@ -62,7 +62,7 @@ public class FieldTermStack {
|
||||
writer.addDocument( doc );
|
||||
writer.close();
|
||||
|
||||
IndexReader reader = IndexReader.open( dir );
|
||||
IndexReader reader = IndexReader.open( dir, true );
|
||||
FieldTermStack ftl = new FieldTermStack( reader, 0, "f", fieldQuery );
|
||||
reader.close();
|
||||
}
|
||||
|
@ -288,7 +288,7 @@ public abstract class AbstractTestCase extends TestCase {
|
||||
writer.addDocument( doc );
|
||||
writer.close();
|
||||
|
||||
reader = IndexReader.open( dir );
|
||||
reader = IndexReader.open( dir, true );
|
||||
}
|
||||
|
||||
protected void makeIndexShortMV() throws Exception {
|
||||
|
@ -125,6 +125,6 @@ public class SimpleFragmentsBuilderTest extends AbstractTestCase {
|
||||
writer.addDocument( doc );
|
||||
writer.close();
|
||||
|
||||
reader = IndexReader.open( dir );
|
||||
reader = IndexReader.open( dir, true );
|
||||
}
|
||||
}
|
||||
|
@ -114,7 +114,7 @@ public class HighlighterTest extends BaseTokenStreamTestCase implements Formatte
|
||||
Analyzer analyzer = new SimpleAnalyzer();
|
||||
QueryParser qp = new QueryParser(FIELD_NAME, analyzer);
|
||||
query = qp.parse("\"very long\"");
|
||||
searcher = new IndexSearcher(ramDir, false);
|
||||
searcher = new IndexSearcher(ramDir, true);
|
||||
TopDocs hits = searcher.search(query, 10);
|
||||
|
||||
QueryScorer scorer = new QueryScorer(query, FIELD_NAME);
|
||||
@ -564,7 +564,7 @@ public class HighlighterTest extends BaseTokenStreamTestCase implements Formatte
|
||||
|
||||
query = new ConstantScoreRangeQuery(FIELD_NAME, "kannedy", "kznnedy", true, true);
|
||||
|
||||
searcher = new IndexSearcher(ramDir);
|
||||
searcher = new IndexSearcher(ramDir, true);
|
||||
// can't rewrite ConstantScoreRangeQuery if you want to highlight it -
|
||||
// it rewrites to ConstantScoreQuery which cannot be highlighted
|
||||
// query = unReWrittenQuery.rewrite(reader);
|
||||
@ -600,7 +600,7 @@ public class HighlighterTest extends BaseTokenStreamTestCase implements Formatte
|
||||
|
||||
query = new WildcardQuery(new Term(FIELD_NAME, "ken*"));
|
||||
((WildcardQuery)query).setRewriteMethod(MultiTermQuery.CONSTANT_SCORE_FILTER_REWRITE);
|
||||
searcher = new IndexSearcher(ramDir);
|
||||
searcher = new IndexSearcher(ramDir, true);
|
||||
// can't rewrite ConstantScore if you want to highlight it -
|
||||
// it rewrites to ConstantScoreQuery which cannot be highlighted
|
||||
// query = unReWrittenQuery.rewrite(reader);
|
||||
@ -1098,7 +1098,7 @@ public class HighlighterTest extends BaseTokenStreamTestCase implements Formatte
|
||||
public void run() throws Exception {
|
||||
numHighlights = 0;
|
||||
// test to show how rewritten query can still be used
|
||||
searcher = new IndexSearcher(ramDir);
|
||||
searcher = new IndexSearcher(ramDir, true);
|
||||
Analyzer analyzer = new StandardAnalyzer();
|
||||
|
||||
QueryParser parser = new QueryParser(FIELD_NAME, analyzer);
|
||||
@ -1218,7 +1218,7 @@ public class HighlighterTest extends BaseTokenStreamTestCase implements Formatte
|
||||
writer1.addDocument(d);
|
||||
writer1.optimize();
|
||||
writer1.close();
|
||||
IndexReader reader1 = IndexReader.open(ramDir1);
|
||||
IndexReader reader1 = IndexReader.open(ramDir1, true);
|
||||
|
||||
// setup index 2
|
||||
RAMDirectory ramDir2 = new RAMDirectory();
|
||||
@ -1229,11 +1229,11 @@ public class HighlighterTest extends BaseTokenStreamTestCase implements Formatte
|
||||
writer2.addDocument(d);
|
||||
writer2.optimize();
|
||||
writer2.close();
|
||||
IndexReader reader2 = IndexReader.open(ramDir2);
|
||||
IndexReader reader2 = IndexReader.open(ramDir2, true);
|
||||
|
||||
IndexSearcher searchers[] = new IndexSearcher[2];
|
||||
searchers[0] = new IndexSearcher(ramDir1);
|
||||
searchers[1] = new IndexSearcher(ramDir2);
|
||||
searchers[0] = new IndexSearcher(ramDir1, true);
|
||||
searchers[1] = new IndexSearcher(ramDir2, true);
|
||||
MultiSearcher multiSearcher = new MultiSearcher(searchers);
|
||||
QueryParser parser = new QueryParser(FIELD_NAME, new StandardAnalyzer());
|
||||
parser.setMultiTermRewriteMethod(MultiTermQuery.SCORING_BOOLEAN_QUERY_REWRITE);
|
||||
@ -1513,7 +1513,7 @@ public class HighlighterTest extends BaseTokenStreamTestCase implements Formatte
|
||||
String q = "t_text1:random";
|
||||
QueryParser parser = new QueryParser( "t_text1", a );
|
||||
Query query = parser.parse( q );
|
||||
IndexSearcher searcher = new IndexSearcher( dir );
|
||||
IndexSearcher searcher = new IndexSearcher( dir, true );
|
||||
// This scorer can return negative idf -> null fragment
|
||||
Scorer scorer = new QueryTermScorer( query, searcher.getIndexReader(), "t_text1" );
|
||||
// This scorer doesn't use idf (patch version)
|
||||
@ -1539,7 +1539,7 @@ public class HighlighterTest extends BaseTokenStreamTestCase implements Formatte
|
||||
* writer = new IndexWriter(ramDir,bigramAnalyzer , true); Document d = new
|
||||
* Document(); Field f = new Field(FIELD_NAME, "java abc def", true, true,
|
||||
* true); d.add(f); writer.addDocument(d); writer.close(); IndexReader reader =
|
||||
* IndexReader.open(ramDir);
|
||||
* IndexReader.open(ramDir, true);
|
||||
*
|
||||
* IndexSearcher searcher=new IndexSearcher(reader); query =
|
||||
* QueryParser.parse("abc", FIELD_NAME, bigramAnalyzer);
|
||||
@ -1572,7 +1572,7 @@ public class HighlighterTest extends BaseTokenStreamTestCase implements Formatte
|
||||
}
|
||||
|
||||
public void doSearching(Query unReWrittenQuery) throws Exception {
|
||||
searcher = new IndexSearcher(ramDir);
|
||||
searcher = new IndexSearcher(ramDir, true);
|
||||
// for any multi-term queries to work (prefix, wildcard, range,fuzzy etc)
|
||||
// you must use a rewritten query!
|
||||
query = unReWrittenQuery.rewrite(reader);
|
||||
@ -1609,7 +1609,7 @@ public class HighlighterTest extends BaseTokenStreamTestCase implements Formatte
|
||||
|
||||
writer.optimize();
|
||||
writer.close();
|
||||
reader = IndexReader.open(ramDir);
|
||||
reader = IndexReader.open(ramDir, true);
|
||||
numHighlights = 0;
|
||||
}
|
||||
|
||||
|
@ -62,7 +62,7 @@ public class TestEmptyIndex extends TestCase {
|
||||
|
||||
Directory d = new RAMDirectory();
|
||||
new IndexWriter(d, null, true, IndexWriter.MaxFieldLength.UNLIMITED).close();
|
||||
r = IndexReader.open(d);
|
||||
r = IndexReader.open(d, false);
|
||||
testNorms(r);
|
||||
r.close();
|
||||
d.close();
|
||||
@ -96,7 +96,7 @@ public class TestEmptyIndex extends TestCase {
|
||||
|
||||
Directory d = new RAMDirectory();
|
||||
new IndexWriter(d, null, true, IndexWriter.MaxFieldLength.UNLIMITED).close();
|
||||
r = IndexReader.open(d);
|
||||
r = IndexReader.open(d, false);
|
||||
termEnumTest(r);
|
||||
r.close();
|
||||
d.close();
|
||||
|
@ -54,7 +54,7 @@ public class TestIndicesEquals extends TestCase {
|
||||
|
||||
// public void test2() throws Exception {
|
||||
// FSDirectory fsdir = FSDirectory.open(new File("/tmp/fatcorpus"));
|
||||
// IndexReader ir = IndexReader.open(fsdir);
|
||||
// IndexReader ir = IndexReader.open(fsdir, false);
|
||||
// InstantiatedIndex ii = new InstantiatedIndex(ir);
|
||||
// ir.close();
|
||||
// testEquals(fsdir, ii);
|
||||
@ -74,7 +74,7 @@ public class TestIndicesEquals extends TestCase {
|
||||
indexWriter.close();
|
||||
|
||||
// test load ii from index reader
|
||||
IndexReader ir = IndexReader.open(dir);
|
||||
IndexReader ir = IndexReader.open(dir, false);
|
||||
InstantiatedIndex ii = new InstantiatedIndex(ir);
|
||||
ir.close();
|
||||
|
||||
@ -116,7 +116,7 @@ public class TestIndicesEquals extends TestCase {
|
||||
|
||||
private void testTermDocs(Directory aprioriIndex, InstantiatedIndex testIndex) throws Exception {
|
||||
|
||||
IndexReader aprioriReader = IndexReader.open(aprioriIndex);
|
||||
IndexReader aprioriReader = IndexReader.open(aprioriIndex, false);
|
||||
IndexReader testReader = testIndex.indexReaderFactory();
|
||||
|
||||
TermEnum aprioriTermEnum = aprioriReader.terms(new Term("c", "danny"));
|
||||
@ -216,7 +216,7 @@ public class TestIndicesEquals extends TestCase {
|
||||
testEquals(aprioriIndex, testIndex);
|
||||
|
||||
// delete a few documents
|
||||
IndexReader ir = IndexReader.open(aprioriIndex);
|
||||
IndexReader ir = IndexReader.open(aprioriIndex, false);
|
||||
ir.deleteDocument(3);
|
||||
ir.deleteDocument(8);
|
||||
ir.close();
|
||||
@ -232,7 +232,7 @@ public class TestIndicesEquals extends TestCase {
|
||||
|
||||
protected void testEquals(Directory aprioriIndex, InstantiatedIndex testIndex) throws Exception {
|
||||
|
||||
IndexReader aprioriReader = IndexReader.open(aprioriIndex);
|
||||
IndexReader aprioriReader = IndexReader.open(aprioriIndex, false);
|
||||
IndexReader testReader = testIndex.indexReaderFactory();
|
||||
|
||||
assertEquals(aprioriReader.numDocs(), testReader.numDocs());
|
||||
|
@ -42,7 +42,7 @@ public class TestSerialization extends TestCase {
|
||||
iw.addDocument(doc);
|
||||
iw.close();
|
||||
|
||||
IndexReader ir = IndexReader.open(dir);
|
||||
IndexReader ir = IndexReader.open(dir, false);
|
||||
InstantiatedIndex ii = new InstantiatedIndex(ir);
|
||||
ir.close();
|
||||
|
||||
|
@ -50,7 +50,7 @@ public class TestUnoptimizedReaderOnConstructor extends TestCase {
|
||||
addDocument(iw, "All work and no play makes wendy a dull girl");
|
||||
iw.close();
|
||||
|
||||
IndexReader unoptimizedReader = IndexReader.open(dir);
|
||||
IndexReader unoptimizedReader = IndexReader.open(dir, false);
|
||||
unoptimizedReader.deleteDocument(2);
|
||||
|
||||
InstantiatedIndex ii;
|
||||
|
@ -31,6 +31,7 @@ import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.TreeMap;
|
||||
import java.util.Map.Entry;
|
||||
import java.io.File;
|
||||
|
||||
import jline.ConsoleReader;
|
||||
|
||||
@ -54,6 +55,7 @@ import org.apache.lucene.search.Hits;
|
||||
import org.apache.lucene.search.IndexSearcher;
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.apache.lucene.search.Searcher;
|
||||
import org.apache.lucene.store.FSDirectory;
|
||||
|
||||
/**
|
||||
* Various methods that interact with Lucene and provide info about the
|
||||
@ -62,7 +64,7 @@ import org.apache.lucene.search.Searcher;
|
||||
class LuceneMethods {
|
||||
|
||||
private int numDocs;
|
||||
private String indexName; //directory of this index
|
||||
private FSDirectory indexName; //directory of this index
|
||||
private java.util.Iterator fieldIterator;
|
||||
private List fields; //Fields as a vector
|
||||
private List indexedFields; //Fields as a vector
|
||||
@ -71,8 +73,8 @@ class LuceneMethods {
|
||||
private Query query; //current query string
|
||||
private String analyzerClassFQN = null; // Analyzer class, if NULL, use default Analyzer
|
||||
|
||||
public LuceneMethods(String index) {
|
||||
indexName = index;
|
||||
public LuceneMethods(String index) throws IOException {
|
||||
indexName = FSDirectory.open(new File(index));
|
||||
message("Lucene CLI. Using directory '" + indexName + "'. Type 'help' for instructions.");
|
||||
}
|
||||
|
||||
@ -94,7 +96,7 @@ class LuceneMethods {
|
||||
|
||||
|
||||
public void info() throws java.io.IOException {
|
||||
IndexReader indexReader = IndexReader.open(indexName);
|
||||
IndexReader indexReader = IndexReader.open(indexName, true);
|
||||
|
||||
|
||||
getFieldInfo();
|
||||
@ -103,7 +105,7 @@ class LuceneMethods {
|
||||
message("All Fields:" + fields.toString());
|
||||
message("Indexed Fields:" + indexedFields.toString());
|
||||
|
||||
if (IndexReader.isLocked(indexName)) {
|
||||
if (IndexWriter.isLocked(indexName)) {
|
||||
message("Index is locked");
|
||||
}
|
||||
//IndexReader.getCurrentVersion(indexName);
|
||||
@ -180,7 +182,7 @@ class LuceneMethods {
|
||||
|
||||
private Query explainQuery(String queryString) throws IOException, ParseException {
|
||||
|
||||
searcher = new IndexSearcher(indexName);
|
||||
searcher = new IndexSearcher(indexName, true);
|
||||
Analyzer analyzer = createAnalyzer();
|
||||
getFieldInfo();
|
||||
|
||||
@ -201,7 +203,7 @@ class LuceneMethods {
|
||||
*/
|
||||
private Hits initSearch(String queryString) throws IOException, ParseException {
|
||||
|
||||
searcher = new IndexSearcher(indexName);
|
||||
searcher = new IndexSearcher(indexName, true);
|
||||
Analyzer analyzer = createAnalyzer();
|
||||
getFieldInfo();
|
||||
|
||||
@ -229,7 +231,7 @@ class LuceneMethods {
|
||||
}
|
||||
|
||||
private void getFieldInfo() throws IOException {
|
||||
IndexReader indexReader = IndexReader.open(indexName);
|
||||
IndexReader indexReader = IndexReader.open(indexName, true);
|
||||
fields = new ArrayList();
|
||||
indexedFields = new ArrayList();
|
||||
|
||||
@ -320,7 +322,7 @@ class LuceneMethods {
|
||||
*/
|
||||
public void terms(String field) throws IOException {
|
||||
TreeMap termMap = new TreeMap();
|
||||
IndexReader indexReader = IndexReader.open(indexName);
|
||||
IndexReader indexReader = IndexReader.open(indexName, true);
|
||||
TermEnum terms = indexReader.terms();
|
||||
while (terms.next()) {
|
||||
Term term = terms.term();
|
||||
|
@ -715,7 +715,7 @@ public class MemoryIndex implements Serializable {
|
||||
private Searcher searcher; // needed to find searcher.getSimilarity()
|
||||
|
||||
private MemoryIndexReader() {
|
||||
super(null); // avoid as much superclass baggage as possible
|
||||
super(); // avoid as much superclass baggage as possible
|
||||
}
|
||||
|
||||
// lucene >= 1.9 or lucene-1.4.3 with patch removing "final" in superclass
|
||||
|
@ -420,7 +420,7 @@ public class MemoryIndexTest extends BaseTokenStreamTestCase {
|
||||
Searcher searcher = null;
|
||||
try {
|
||||
if (index instanceof Directory)
|
||||
searcher = new IndexSearcher((Directory)index);
|
||||
searcher = new IndexSearcher((Directory)index, true);
|
||||
else
|
||||
searcher = ((MemoryIndex) index).createSearcher();
|
||||
|
||||
@ -450,7 +450,7 @@ public class MemoryIndexTest extends BaseTokenStreamTestCase {
|
||||
try {
|
||||
Directory dir = (Directory) index;
|
||||
int size = 0;
|
||||
String[] fileNames = dir.list();
|
||||
String[] fileNames = dir.listAll();
|
||||
for (int i=0; i < fileNames.length; i++) {
|
||||
size += dir.fileLength(fileNames[i]);
|
||||
}
|
||||
|
@ -112,7 +112,7 @@ public class FieldNormModifier {
|
||||
TermEnum termEnum = null;
|
||||
TermDocs termDocs = null;
|
||||
try {
|
||||
reader = IndexReader.open(dir);
|
||||
reader = IndexReader.open(dir, true);
|
||||
termCounts = new int[reader.maxDoc()];
|
||||
// if we are killing norms, get fake ones
|
||||
if (sim == null)
|
||||
@ -142,7 +142,7 @@ public class FieldNormModifier {
|
||||
}
|
||||
|
||||
try {
|
||||
reader = IndexReader.open(dir);
|
||||
reader = IndexReader.open(dir, false);
|
||||
for (int d = 0; d < termCounts.length; d++) {
|
||||
if (! reader.isDeleted(d)) {
|
||||
if (sim == null)
|
||||
|
@ -19,8 +19,11 @@ package org.apache.lucene.misc;
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.Term;
|
||||
import org.apache.lucene.index.TermEnum;
|
||||
import org.apache.lucene.store.FSDirectory;
|
||||
import org.apache.lucene.util.PriorityQueue;
|
||||
|
||||
import java.io.File;
|
||||
|
||||
/**
|
||||
* <code>HighFreqTerms</code> class extracts terms and their frequencies out
|
||||
* of an existing Lucene index.
|
||||
@ -34,11 +37,14 @@ public class HighFreqTerms {
|
||||
|
||||
public static void main(String[] args) throws Exception {
|
||||
IndexReader reader = null;
|
||||
FSDirectory dir = null;
|
||||
String field = null;
|
||||
if (args.length == 1) {
|
||||
reader = IndexReader.open(args[0]);
|
||||
dir = FSDirectory.open(new File(args[0]));
|
||||
reader = IndexReader.open(dir, true);
|
||||
} else if (args.length == 2) {
|
||||
reader = IndexReader.open(args[0]);
|
||||
dir = FSDirectory.open(new File(args[0]));
|
||||
reader = IndexReader.open(dir, true);
|
||||
field = args[1];
|
||||
} else {
|
||||
usage();
|
||||
|
@ -35,7 +35,7 @@ public class IndexMergeTool {
|
||||
System.err.println("Usage: IndexMergeTool <mergedIndex> <index1> <index2> [index3] ...");
|
||||
System.exit(1);
|
||||
}
|
||||
File mergedIndex = new File(args[0]);
|
||||
FSDirectory mergedIndex = FSDirectory.open(new File(args[0]));
|
||||
|
||||
IndexWriter writer = new IndexWriter(mergedIndex, new SimpleAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
|
||||
|
@ -113,7 +113,7 @@ public class LengthNormModifier {
|
||||
TermEnum termEnum = null;
|
||||
TermDocs termDocs = null;
|
||||
try {
|
||||
reader = IndexReader.open(dir);
|
||||
reader = IndexReader.open(dir, false);
|
||||
termCounts = new int[reader.maxDoc()];
|
||||
try {
|
||||
termEnum = reader.terms(new Term(field));
|
||||
@ -139,7 +139,7 @@ public class LengthNormModifier {
|
||||
}
|
||||
|
||||
try {
|
||||
reader = IndexReader.open(dir);
|
||||
reader = IndexReader.open(dir, false);
|
||||
for (int d = 0; d < termCounts.length; d++) {
|
||||
if (! reader.isDeleted(d)) {
|
||||
byte norm = sim.encodeNorm(sim.lengthNorm(fieldName, termCounts[d]));
|
||||
|
@ -87,7 +87,7 @@ public class TestFieldNormModifier extends TestCase {
|
||||
|
||||
public void testFieldWithNoNorm() throws Exception {
|
||||
|
||||
IndexReader r = IndexReader.open(store);
|
||||
IndexReader r = IndexReader.open(store, false);
|
||||
byte[] norms = r.norms("nonorm");
|
||||
|
||||
// sanity check, norms should all be 1
|
||||
@ -110,7 +110,7 @@ public class TestFieldNormModifier extends TestCase {
|
||||
}
|
||||
|
||||
// nothing should have changed
|
||||
r = IndexReader.open(store);
|
||||
r = IndexReader.open(store, false);
|
||||
|
||||
norms = r.norms("nonorm");
|
||||
assertTrue("Whoops we have norms?", !r.hasNorms("nonorm"));
|
||||
@ -128,7 +128,7 @@ public class TestFieldNormModifier extends TestCase {
|
||||
|
||||
public void testGoodCases() throws Exception {
|
||||
|
||||
IndexSearcher searcher = new IndexSearcher(store);
|
||||
IndexSearcher searcher = new IndexSearcher(store, true);
|
||||
final float[] scores = new float[NUM_DOCS];
|
||||
float lastScore = 0.0f;
|
||||
|
||||
@ -164,7 +164,7 @@ public class TestFieldNormModifier extends TestCase {
|
||||
fnm.reSetNorms("field");
|
||||
|
||||
// new norm (with default similarity) should put longer docs first
|
||||
searcher = new IndexSearcher(store);
|
||||
searcher = new IndexSearcher(store, true);
|
||||
searcher.search(new TermQuery(new Term("field", "word")), new Collector() {
|
||||
private int docBase = 0;
|
||||
private Scorer scorer;
|
||||
@ -194,21 +194,21 @@ public class TestFieldNormModifier extends TestCase {
|
||||
|
||||
public void testNormKiller() throws IOException {
|
||||
|
||||
IndexReader r = IndexReader.open(store);
|
||||
IndexReader r = IndexReader.open(store, false);
|
||||
byte[] oldNorms = r.norms("untokfield");
|
||||
r.close();
|
||||
|
||||
FieldNormModifier fnm = new FieldNormModifier(store, s);
|
||||
fnm.reSetNorms("untokfield");
|
||||
|
||||
r = IndexReader.open(store);
|
||||
r = IndexReader.open(store, false);
|
||||
byte[] newNorms = r.norms("untokfield");
|
||||
r.close();
|
||||
assertFalse(Arrays.equals(oldNorms, newNorms));
|
||||
|
||||
|
||||
// verify that we still get documents in the same order as originally
|
||||
IndexSearcher searcher = new IndexSearcher(store);
|
||||
IndexSearcher searcher = new IndexSearcher(store, true);
|
||||
final float[] scores = new float[NUM_DOCS];
|
||||
float lastScore = 0.0f;
|
||||
|
||||
|
@ -65,7 +65,7 @@ public class TestTermVectorAccessor extends TestCase {
|
||||
|
||||
iw.close();
|
||||
|
||||
IndexReader ir = IndexReader.open(dir);
|
||||
IndexReader ir = IndexReader.open(dir, false);
|
||||
|
||||
TermVectorAccessor accessor = new TermVectorAccessor();
|
||||
|
||||
|
@ -64,7 +64,7 @@ public class ChainedFilterTest extends TestCase {
|
||||
|
||||
writer.close();
|
||||
|
||||
searcher = new IndexSearcher(directory);
|
||||
searcher = new IndexSearcher(directory, true);
|
||||
|
||||
// query for everything to make life easier
|
||||
BooleanQuery bq = new BooleanQuery();
|
||||
@ -222,7 +222,7 @@ public class ChainedFilterTest extends TestCase {
|
||||
IndexWriter writer = new IndexWriter(dir, analyzer, true, MaxFieldLength.LIMITED);
|
||||
writer.close();
|
||||
|
||||
Searcher searcher = new IndexSearcher(dir);
|
||||
Searcher searcher = new IndexSearcher(dir, true);
|
||||
|
||||
Query query = new TermQuery(new Term("none", "none"));
|
||||
|
||||
|
@ -93,7 +93,7 @@ public class TestLengthNormModifier extends TestCase {
|
||||
|
||||
public void testFieldWithNoNorm() throws Exception {
|
||||
|
||||
IndexReader r = IndexReader.open(store);
|
||||
IndexReader r = IndexReader.open(store, false);
|
||||
byte[] norms = r.norms("nonorm");
|
||||
|
||||
// sanity check, norms should all be 1
|
||||
@ -116,7 +116,7 @@ public class TestLengthNormModifier extends TestCase {
|
||||
}
|
||||
|
||||
// nothing should have changed
|
||||
r = IndexReader.open(store);
|
||||
r = IndexReader.open(store, false);
|
||||
|
||||
norms = r.norms("nonorm");
|
||||
assertTrue("Whoops we have norms?", !r.hasNorms("nonorm"));
|
||||
@ -140,7 +140,7 @@ public class TestLengthNormModifier extends TestCase {
|
||||
float lastScore = 0.0f;
|
||||
|
||||
// default similarity should put docs with shorter length first
|
||||
searcher = new IndexSearcher(store);
|
||||
searcher = new IndexSearcher(store, false);
|
||||
searcher.search(new TermQuery(new Term("field", "word")), new Collector() {
|
||||
private int docBase = 0;
|
||||
private Scorer scorer;
|
||||
@ -177,7 +177,7 @@ public class TestLengthNormModifier extends TestCase {
|
||||
fnm.reSetNorms("field");
|
||||
|
||||
// new norm (with default similarity) should put longer docs first
|
||||
searcher = new IndexSearcher(store);
|
||||
searcher = new IndexSearcher(store, false);
|
||||
searcher.search(new TermQuery(new Term("field", "word")), new Collector() {
|
||||
private int docBase = 0;
|
||||
private Scorer scorer;
|
||||
|
@ -122,7 +122,7 @@ public class TestComplexPhraseQuery extends TestCase {
|
||||
w.addDocument(doc);
|
||||
}
|
||||
w.close();
|
||||
searcher = new IndexSearcher(rd);
|
||||
searcher = new IndexSearcher(rd, true);
|
||||
}
|
||||
|
||||
protected void tearDown() throws Exception {
|
||||
|
@ -162,28 +162,6 @@ public class DuplicateFilter extends Filter
|
||||
return bits;
|
||||
}
|
||||
|
||||
/**
|
||||
* @param args
|
||||
* @throws IOException
|
||||
* @throws Exception
|
||||
*/
|
||||
public static void main(String[] args) throws Exception
|
||||
{
|
||||
IndexReader r=IndexReader.open("/indexes/personCentricAnon");
|
||||
// IndexReader r=IndexReader.open("/indexes/enron");
|
||||
long start=System.currentTimeMillis();
|
||||
// DuplicateFilter df = new DuplicateFilter("threadId",KM_USE_FIRST_OCCURRENCE, PM_FAST_INVALIDATION);
|
||||
// DuplicateFilter df = new DuplicateFilter("threadId",KM_USE_LAST_OCCURRENCE, PM_FAST_INVALIDATION);
|
||||
DuplicateFilter df = new DuplicateFilter("vehicle.vrm",KM_USE_LAST_OCCURRENCE, PM_FAST_INVALIDATION);
|
||||
// DuplicateFilter df = new DuplicateFilter("title",USE_LAST_OCCURRENCE);
|
||||
// df.setProcessingMode(PM_SLOW_VALIDATION);
|
||||
BitSet b = df.bits(r);
|
||||
long end=System.currentTimeMillis()-start;
|
||||
System.out.println(b.cardinality()+" in "+end+" ms ");
|
||||
|
||||
}
|
||||
|
||||
|
||||
public String getFieldName()
|
||||
{
|
||||
return fieldName;
|
||||
|
@ -32,6 +32,7 @@ import org.apache.lucene.analysis.TokenStream;
|
||||
import org.apache.lucene.analysis.standard.StandardAnalyzer;
|
||||
import org.apache.lucene.analysis.tokenattributes.TermAttribute;
|
||||
import org.apache.lucene.document.Document;
|
||||
import org.apache.lucene.store.FSDirectory;
|
||||
|
||||
import java.util.Set;
|
||||
import java.util.HashMap;
|
||||
@ -720,7 +721,8 @@ public final class MoreLikeThis {
|
||||
}
|
||||
|
||||
PrintStream o = System.out;
|
||||
IndexReader r = IndexReader.open(indexName);
|
||||
FSDirectory dir = FSDirectory.open(new File(indexName));
|
||||
IndexReader r = IndexReader.open(dir, true);
|
||||
o.println("Open index " + indexName + " which has " + r.numDocs() + " docs");
|
||||
|
||||
MoreLikeThis mlt = new MoreLikeThis(r);
|
||||
@ -741,7 +743,7 @@ public final class MoreLikeThis {
|
||||
|
||||
o.println("q: " + query);
|
||||
o.println();
|
||||
IndexSearcher searcher = new IndexSearcher(indexName);
|
||||
IndexSearcher searcher = new IndexSearcher(dir, true);
|
||||
|
||||
Hits hits = searcher.search(query);
|
||||
int len = hits.length();
|
||||
|
@ -54,7 +54,7 @@ public class BooleanFilterTest extends TestCase
|
||||
addDoc(writer, "admin guest", "030", "20050101","N");
|
||||
|
||||
writer.close();
|
||||
reader=IndexReader.open(directory);
|
||||
reader=IndexReader.open(directory, true);
|
||||
}
|
||||
|
||||
private void addDoc(IndexWriter writer, String accessRights, String price, String date, String inStock) throws IOException
|
||||
|
@ -55,7 +55,7 @@ public class DuplicateFilterTest extends TestCase
|
||||
addDoc(writer, "http://lucene.apache.org", "Oops. Lucene 2.1 out", "20050102");
|
||||
|
||||
writer.close();
|
||||
reader=IndexReader.open(directory);
|
||||
reader=IndexReader.open(directory, true);
|
||||
searcher =new IndexSearcher(reader);
|
||||
|
||||
}
|
||||
|
@ -51,7 +51,7 @@ public class FuzzyLikeThisQueryTest extends TestCase
|
||||
addDoc(writer, "johnathon smythe","6");
|
||||
|
||||
writer.close();
|
||||
searcher=new IndexSearcher(directory);
|
||||
searcher=new IndexSearcher(directory, true);
|
||||
}
|
||||
|
||||
private void addDoc(IndexWriter writer, String name, String id) throws IOException
|
||||
|
@ -64,7 +64,7 @@ public class TermsFilterTest extends TestCase
|
||||
w.addDocument(doc);
|
||||
}
|
||||
w.close();
|
||||
IndexReader reader = IndexReader.open(rd);
|
||||
IndexReader reader = IndexReader.open(rd, true);
|
||||
|
||||
TermsFilter tf=new TermsFilter();
|
||||
tf.addTerm(new Term(fieldName,"19"));
|
||||
|
@ -335,7 +335,7 @@ public class TestMultiFieldQPHelper extends LuceneTestCase {
|
||||
mfqp.setAnalyzer(analyzer);
|
||||
mfqp.setDefaultOperator(Operator.AND);
|
||||
Query q = mfqp.parse("the footest", null);
|
||||
IndexSearcher is = new IndexSearcher(ramDir);
|
||||
IndexSearcher is = new IndexSearcher(ramDir, true);
|
||||
ScoreDoc[] hits = is.search(q, null, 1000).scoreDocs;
|
||||
assertEquals(1, hits.length);
|
||||
is.close();
|
||||
|
@ -333,7 +333,7 @@ public class TestMultiFieldQueryParserWrapper extends LuceneTestCase {
|
||||
new String[] { "body" }, analyzer);
|
||||
mfqp.setDefaultOperator(QueryParserWrapper.Operator.AND);
|
||||
Query q = mfqp.parse("the footest");
|
||||
IndexSearcher is = new IndexSearcher(ramDir);
|
||||
IndexSearcher is = new IndexSearcher(ramDir, true);
|
||||
ScoreDoc[] hits = is.search(q, null, 1000).scoreDocs;
|
||||
assertEquals(1, hits.length);
|
||||
is.close();
|
||||
|
@ -598,7 +598,7 @@ public class TestQPHelper extends LocalizedTestCase {
|
||||
Field.Index.UN_TOKENIZED));
|
||||
iw.addDocument(doc);
|
||||
iw.close();
|
||||
IndexSearcher is = new IndexSearcher(ramDir);
|
||||
IndexSearcher is = new IndexSearcher(ramDir, true);
|
||||
|
||||
StandardQueryParser qp = new StandardQueryParser();
|
||||
qp.setAnalyzer(new WhitespaceAnalyzer());
|
||||
@ -1020,7 +1020,7 @@ public class TestQPHelper extends LocalizedTestCase {
|
||||
addDateDoc("a", 2005, 12, 2, 10, 15, 33, iw);
|
||||
addDateDoc("b", 2005, 12, 4, 22, 15, 00, iw);
|
||||
iw.close();
|
||||
IndexSearcher is = new IndexSearcher(ramDir);
|
||||
IndexSearcher is = new IndexSearcher(ramDir, true);
|
||||
assertHits(1, "[12/1/2005 TO 12/3/2005]", is);
|
||||
assertHits(2, "[12/1/2005 TO 12/4/2005]", is);
|
||||
assertHits(1, "[12/3/2005 TO 12/4/2005]", is);
|
||||
|
@ -594,7 +594,7 @@ public class TestQueryParserWrapper extends LocalizedTestCase {
|
||||
Field.Index.UN_TOKENIZED));
|
||||
iw.addDocument(doc);
|
||||
iw.close();
|
||||
IndexSearcher is = new IndexSearcher(ramDir);
|
||||
IndexSearcher is = new IndexSearcher(ramDir, true);
|
||||
|
||||
QueryParserWrapper qp = new QueryParserWrapper("content",
|
||||
new WhitespaceAnalyzer());
|
||||
@ -1000,7 +1000,7 @@ public class TestQueryParserWrapper extends LocalizedTestCase {
|
||||
addDateDoc("a", 2005, 12, 2, 10, 15, 33, iw);
|
||||
addDateDoc("b", 2005, 12, 4, 22, 15, 00, iw);
|
||||
iw.close();
|
||||
IndexSearcher is = new IndexSearcher(ramDir);
|
||||
IndexSearcher is = new IndexSearcher(ramDir, true);
|
||||
assertHits(1, "[12/1/2005 TO 12/3/2005]", is);
|
||||
assertHits(2, "[12/1/2005 TO 12/4/2005]", is);
|
||||
assertHits(1, "[12/3/2005 TO 12/4/2005]", is);
|
||||
|
@ -45,7 +45,7 @@ public class TestRegexQuery extends TestCase {
|
||||
writer.addDocument(doc);
|
||||
writer.optimize();
|
||||
writer.close();
|
||||
searcher = new IndexSearcher(directory);
|
||||
searcher = new IndexSearcher(directory, true);
|
||||
} catch (Exception e) {
|
||||
fail(e.toString());
|
||||
}
|
||||
|
@ -61,7 +61,7 @@ public class TestSpanRegexQuery extends TestCase {
|
||||
writer.optimize();
|
||||
writer.close();
|
||||
|
||||
IndexSearcher searcher = new IndexSearcher(directory);
|
||||
IndexSearcher searcher = new IndexSearcher(directory, true);
|
||||
SpanRegexQuery srq = new SpanRegexQuery(new Term("field", "aut.*"));
|
||||
SpanFirstQuery sfq = new SpanFirstQuery(srq, 1);
|
||||
// SpanNearQuery query = new SpanNearQuery(new SpanQuery[] {srq, stq}, 6,
|
||||
@ -80,8 +80,8 @@ public class TestSpanRegexQuery extends TestCase {
|
||||
|
||||
// 1. Search the same store which works
|
||||
IndexSearcher[] arrSearcher = new IndexSearcher[2];
|
||||
arrSearcher[0] = new IndexSearcher(indexStoreA);
|
||||
arrSearcher[1] = new IndexSearcher(indexStoreB);
|
||||
arrSearcher[0] = new IndexSearcher(indexStoreA, true);
|
||||
arrSearcher[1] = new IndexSearcher(indexStoreB, true);
|
||||
MultiSearcher searcher = new MultiSearcher(arrSearcher);
|
||||
Hits hits = searcher.search(query);
|
||||
arrSearcher[0].close();
|
||||
|
@ -21,8 +21,10 @@ import org.apache.lucene.document.Document;
|
||||
import org.apache.lucene.document.FieldSelector;
|
||||
import org.apache.lucene.index.Term;
|
||||
import org.apache.lucene.index.CorruptIndexException;
|
||||
import org.apache.lucene.store.FSDirectory;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.File;
|
||||
import java.rmi.Naming;
|
||||
import java.rmi.RMISecurityManager;
|
||||
import java.rmi.RemoteException;
|
||||
@ -116,7 +118,7 @@ public class RemoteSearchable
|
||||
System.setSecurityManager(new RMISecurityManager());
|
||||
}
|
||||
|
||||
Searchable local = new IndexSearcher(indexName);
|
||||
Searchable local = new IndexSearcher(FSDirectory.open(new File(indexName)), true);
|
||||
RemoteSearchable impl = new RemoteSearchable(local);
|
||||
|
||||
// bind the implementation to "Searchable"
|
||||
|
@ -78,7 +78,7 @@ public class TestRemoteCachingWrapperFilter extends LuceneTestCase {
|
||||
// publish it
|
||||
port = _TestUtil.getRandomSocketPort();
|
||||
LocateRegistry.createRegistry(port);
|
||||
Searchable local = new IndexSearcher(indexStore);
|
||||
Searchable local = new IndexSearcher(indexStore, true);
|
||||
RemoteSearchable impl = new RemoteSearchable(local);
|
||||
Naming.rebind("//localhost:" + port + "/Searchable", impl);
|
||||
}
|
||||
|
@ -72,7 +72,7 @@ public class TestRemoteSearchable extends LuceneTestCase {
|
||||
// publish it
|
||||
port = _TestUtil.getRandomSocketPort();
|
||||
LocateRegistry.createRegistry(port);
|
||||
Searchable local = new IndexSearcher(indexStore);
|
||||
Searchable local = new IndexSearcher(indexStore, true);
|
||||
RemoteSearchable impl = new RemoteSearchable(local);
|
||||
Naming.rebind("//localhost:" + port + "/Searchable", impl);
|
||||
}
|
||||
|
@ -237,6 +237,7 @@ public class TestRemoteSort extends LuceneTestCase implements Serializable {
|
||||
}
|
||||
|
||||
// test custom search when remote
|
||||
/* rewrite with new API
|
||||
public void testRemoteCustomSort() throws Exception {
|
||||
Searchable searcher = getRemote();
|
||||
MultiSearcher multi = new MultiSearcher (new Searchable[] { searcher });
|
||||
@ -256,7 +257,7 @@ public class TestRemoteSort extends LuceneTestCase implements Serializable {
|
||||
|
||||
assertSaneFieldCaches(getName() + " Comparator");
|
||||
FieldCache.DEFAULT.purgeAllCaches();
|
||||
}
|
||||
}*/
|
||||
|
||||
// test that the relevancy scores are the same even if
|
||||
// hits are sorted
|
||||
|
@ -164,7 +164,7 @@ public class TestCartesian extends TestCase{
|
||||
}
|
||||
|
||||
public void testAntiM() throws IOException, InvalidGeoException {
|
||||
searcher = new IndexSearcher(directory);
|
||||
searcher = new IndexSearcher(directory, true);
|
||||
|
||||
final double miles = 2800.0;
|
||||
// Hawaii
|
||||
@ -252,7 +252,7 @@ public class TestCartesian extends TestCase{
|
||||
}
|
||||
|
||||
public void testPoleFlipping() throws IOException, InvalidGeoException {
|
||||
searcher = new IndexSearcher(directory);
|
||||
searcher = new IndexSearcher(directory, true);
|
||||
|
||||
final double miles = 3500.0;
|
||||
lat = 41.6032207;
|
||||
@ -340,7 +340,7 @@ public class TestCartesian extends TestCase{
|
||||
}
|
||||
|
||||
public void testRange() throws IOException, InvalidGeoException {
|
||||
searcher = new IndexSearcher(directory);
|
||||
searcher = new IndexSearcher(directory, true);
|
||||
|
||||
final double[] milesToTest = new double[] {6.0, 0.5, 0.001, 0.0};
|
||||
final int[] expected = new int[] {7, 1, 0, 0};
|
||||
@ -431,7 +431,7 @@ public class TestCartesian extends TestCase{
|
||||
|
||||
|
||||
public void testGeoHashRange() throws IOException, InvalidGeoException {
|
||||
searcher = new IndexSearcher(directory);
|
||||
searcher = new IndexSearcher(directory, true);
|
||||
|
||||
final double[] milesToTest = new double[] {6.0, 0.5, 0.001, 0.0};
|
||||
final int[] expected = new int[] {7, 1, 0, 0};
|
||||
|
@ -113,7 +113,7 @@ public class SpellChecker {
|
||||
if (searcher != null) {
|
||||
searcher.close();
|
||||
}
|
||||
searcher = new IndexSearcher(this.spellIndex);
|
||||
searcher = new IndexSearcher(this.spellIndex, true);
|
||||
}
|
||||
|
||||
public void setStringDistance(StringDistance sd) {
|
||||
@ -302,7 +302,7 @@ public class SpellChecker {
|
||||
|
||||
//close the old searcher
|
||||
searcher.close();
|
||||
searcher = new IndexSearcher(this.spellIndex);
|
||||
searcher = new IndexSearcher(this.spellIndex, true);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -350,7 +350,7 @@ public class SpellChecker {
|
||||
// also re-open the spell index to see our own changes when the next suggestion
|
||||
// is fetched:
|
||||
searcher.close();
|
||||
searcher = new IndexSearcher(this.spellIndex);
|
||||
searcher = new IndexSearcher(this.spellIndex, true);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -77,7 +77,7 @@ public class TestLuceneDictionary extends TestCase {
|
||||
|
||||
public void testFieldNonExistent() throws IOException {
|
||||
try {
|
||||
indexReader = IndexReader.open(store);
|
||||
indexReader = IndexReader.open(store, true);
|
||||
|
||||
ld = new LuceneDictionary(indexReader, "nonexistent_field");
|
||||
it = ld.getWordsIterator();
|
||||
@ -91,7 +91,7 @@ public class TestLuceneDictionary extends TestCase {
|
||||
|
||||
public void testFieldAaa() throws IOException {
|
||||
try {
|
||||
indexReader = IndexReader.open(store);
|
||||
indexReader = IndexReader.open(store, true);
|
||||
|
||||
ld = new LuceneDictionary(indexReader, "aaa");
|
||||
it = ld.getWordsIterator();
|
||||
@ -107,7 +107,7 @@ public class TestLuceneDictionary extends TestCase {
|
||||
|
||||
public void testFieldContents_1() throws IOException {
|
||||
try {
|
||||
indexReader = IndexReader.open(store);
|
||||
indexReader = IndexReader.open(store, true);
|
||||
|
||||
ld = new LuceneDictionary(indexReader, "contents");
|
||||
it = ld.getWordsIterator();
|
||||
@ -137,7 +137,7 @@ public class TestLuceneDictionary extends TestCase {
|
||||
|
||||
public void testFieldContents_2() throws IOException {
|
||||
try {
|
||||
indexReader = IndexReader.open(store);
|
||||
indexReader = IndexReader.open(store, true);
|
||||
|
||||
ld = new LuceneDictionary(indexReader, "contents");
|
||||
it = ld.getWordsIterator();
|
||||
@ -169,7 +169,7 @@ public class TestLuceneDictionary extends TestCase {
|
||||
|
||||
public void testFieldZzz() throws IOException {
|
||||
try {
|
||||
indexReader = IndexReader.open(store);
|
||||
indexReader = IndexReader.open(store, true);
|
||||
|
||||
ld = new LuceneDictionary(indexReader, "zzz");
|
||||
it = ld.getWordsIterator();
|
||||
@ -186,7 +186,7 @@ public class TestLuceneDictionary extends TestCase {
|
||||
|
||||
public void testSpellchecker() throws IOException {
|
||||
SpellChecker sc = new SpellChecker(new RAMDirectory());
|
||||
indexReader = IndexReader.open(store);
|
||||
indexReader = IndexReader.open(store, true);
|
||||
sc.indexDictionary(new LuceneDictionary(indexReader, "contents"));
|
||||
String[] suggestions = sc.suggestSimilar("Tam", 1);
|
||||
assertEquals(1, suggestions.length);
|
||||
|
@ -63,7 +63,7 @@ public class TestSpellChecker extends TestCase {
|
||||
|
||||
|
||||
public void testBuild() throws CorruptIndexException, IOException {
|
||||
IndexReader r = IndexReader.open(userindex);
|
||||
IndexReader r = IndexReader.open(userindex, true);
|
||||
|
||||
spellChecker.clearIndex();
|
||||
|
||||
@ -192,7 +192,7 @@ public class TestSpellChecker extends TestCase {
|
||||
}
|
||||
|
||||
private int numdoc() throws IOException {
|
||||
IndexReader rs = IndexReader.open(spellindex);
|
||||
IndexReader rs = IndexReader.open(spellindex, true);
|
||||
int num = rs.numDocs();
|
||||
assertTrue(num != 0);
|
||||
//System.out.println("num docs: " + num);
|
||||
|
@ -98,7 +98,7 @@ public class BooleanQueryTst {
|
||||
/* if (verbose) System.out.println("Lucene: " + query.toString()); */
|
||||
|
||||
TestCollector tc = new TestCollector();
|
||||
Searcher searcher = new IndexSearcher(dBase.getDb());
|
||||
Searcher searcher = new IndexSearcher(dBase.getDb(), true);
|
||||
try {
|
||||
searcher.search(query, tc);
|
||||
} finally {
|
||||
|
@ -153,7 +153,7 @@ public class ListSearcher extends AbstractListModel {
|
||||
this.searchString = searchString;
|
||||
|
||||
//make a new index searcher with the in memory (RAM) index.
|
||||
IndexSearcher is = new IndexSearcher(directory);
|
||||
IndexSearcher is = new IndexSearcher(directory, true);
|
||||
|
||||
//make an array of fields - one for each column
|
||||
String[] fields = {FIELD_NAME};
|
||||
|
@ -231,7 +231,7 @@ public class TableSearcher extends AbstractTableModel {
|
||||
this.searchString = searchString;
|
||||
|
||||
//make a new index searcher with the in memory (RAM) index.
|
||||
IndexSearcher is = new IndexSearcher(directory);
|
||||
IndexSearcher is = new IndexSearcher(directory, true);
|
||||
|
||||
//make an array of fields - one for each column
|
||||
String[] fields = new String[tableModel.getColumnCount()];
|
||||
|
@ -70,7 +70,7 @@ public final class SynExpand {
|
||||
}
|
||||
|
||||
FSDirectory directory = FSDirectory.open(new File(args[0]));
|
||||
IndexSearcher searcher = new IndexSearcher(directory);
|
||||
IndexSearcher searcher = new IndexSearcher(directory, true);
|
||||
|
||||
String query = args[1];
|
||||
String field = "contents";
|
||||
|
@ -53,7 +53,7 @@ public class SynLookup {
|
||||
}
|
||||
|
||||
FSDirectory directory = FSDirectory.open(new File(args[0]));
|
||||
IndexSearcher searcher = new IndexSearcher(directory);
|
||||
IndexSearcher searcher = new IndexSearcher(directory, true);
|
||||
|
||||
String word = args[1];
|
||||
Hits hits = searcher.search(
|
||||
|
@ -35,6 +35,7 @@ import org.apache.lucene.analysis.standard.StandardAnalyzer;
|
||||
import org.apache.lucene.document.Document;
|
||||
import org.apache.lucene.document.Field;
|
||||
import org.apache.lucene.index.IndexWriter;
|
||||
import org.apache.lucene.store.FSDirectory;
|
||||
|
||||
/**
|
||||
* Convert the prolog file wn_s.pl from the <a href="http://www.cogsci.princeton.edu/2.0/WNprolog-2.0.tar.gz">WordNet prolog download</a>
|
||||
@ -239,31 +240,36 @@ public class Syns2Index
|
||||
{
|
||||
int row = 0;
|
||||
int mod = 1;
|
||||
FSDirectory dir = FSDirectory.open(new File(indexDir));
|
||||
try {
|
||||
|
||||
// override the specific index if it already exists
|
||||
IndexWriter writer = new IndexWriter(indexDir, ana, true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
writer.setUseCompoundFile(true); // why?
|
||||
Iterator i1 = word2Nums.keySet().iterator();
|
||||
while (i1.hasNext()) // for each word
|
||||
{
|
||||
String g = (String) i1.next();
|
||||
Document doc = new Document();
|
||||
// override the specific index if it already exists
|
||||
IndexWriter writer = new IndexWriter(dir, ana, true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
writer.setUseCompoundFile(true); // why?
|
||||
Iterator i1 = word2Nums.keySet().iterator();
|
||||
while (i1.hasNext()) // for each word
|
||||
{
|
||||
String g = (String) i1.next();
|
||||
Document doc = new Document();
|
||||
|
||||
int n = index(word2Nums, num2Words, g, doc);
|
||||
if (n > 0)
|
||||
{
|
||||
doc.add( new Field( F_WORD, g, Field.Store.YES, Field.Index.NOT_ANALYZED));
|
||||
if ((++row % mod) == 0)
|
||||
{
|
||||
o.println("\trow=" + row + "/" + word2Nums.size() + " doc= " + doc);
|
||||
mod *= 2;
|
||||
}
|
||||
writer.addDocument(doc);
|
||||
} // else degenerate
|
||||
int n = index(word2Nums, num2Words, g, doc);
|
||||
if (n > 0)
|
||||
{
|
||||
doc.add( new Field( F_WORD, g, Field.Store.YES, Field.Index.NOT_ANALYZED));
|
||||
if ((++row % mod) == 0)
|
||||
{
|
||||
o.println("\trow=" + row + "/" + word2Nums.size() + " doc= " + doc);
|
||||
mod *= 2;
|
||||
}
|
||||
writer.addDocument(doc);
|
||||
} // else degenerate
|
||||
}
|
||||
o.println( "Optimizing..");
|
||||
writer.optimize();
|
||||
writer.close();
|
||||
} finally {
|
||||
dir.close();
|
||||
}
|
||||
o.println( "Optimizing..");
|
||||
writer.optimize();
|
||||
writer.close();
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -145,6 +145,6 @@ public class FormBasedXmlQueryDemo extends HttpServlet {
|
||||
writer.close();
|
||||
|
||||
//open searcher
|
||||
searcher=new IndexSearcher(rd);
|
||||
searcher=new IndexSearcher(rd, true);
|
||||
}
|
||||
}
|
||||
|
@ -73,7 +73,7 @@ public class TestParser extends TestCase {
|
||||
d.close();
|
||||
writer.close();
|
||||
}
|
||||
reader=IndexReader.open(dir);
|
||||
reader=IndexReader.open(dir, true);
|
||||
searcher=new IndexSearcher(reader);
|
||||
|
||||
}
|
||||
|
@ -149,7 +149,7 @@ public class TestQueryTemplateManager extends TestCase {
|
||||
}
|
||||
w.optimize();
|
||||
w.close();
|
||||
searcher=new IndexSearcher(dir);
|
||||
searcher=new IndexSearcher(dir, true);
|
||||
|
||||
//initialize the parser
|
||||
builder=new CorePlusExtensionsParser("artist", analyzer);
|
||||
|
Loading…
x
Reference in New Issue
Block a user