mirror of https://github.com/apache/lucene.git
LUCENE-1578: Support for loading unoptimized readers to the constructor of InstantiatedIndex. (Karl Wettin)
git-svn-id: https://svn.apache.org/repos/asf/lucene/java/trunk@784481 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
1511ec5e31
commit
196428ec39
|
@ -64,6 +64,9 @@ New features
|
||||||
|
|
||||||
6. LUCENE-1676: Added DelimitedPayloadTokenFilter class for automatically adding payloads "in-stream" (Grant Ingersoll)
|
6. LUCENE-1676: Added DelimitedPayloadTokenFilter class for automatically adding payloads "in-stream" (Grant Ingersoll)
|
||||||
|
|
||||||
|
7. LUCENE-1578: Support for loading unoptimized readers to the
|
||||||
|
constructor of InstantiatedIndex. (Karl Wettin)
|
||||||
|
|
||||||
Optimizations
|
Optimizations
|
||||||
|
|
||||||
1. LUCENE-1643: Re-use the collation key (RawCollationKey) for
|
1. LUCENE-1643: Re-use the collation key (RawCollationKey) for
|
||||||
|
|
|
@ -110,7 +110,8 @@ public class InstantiatedIndex
|
||||||
public InstantiatedIndex(IndexReader sourceIndexReader, Set<String> fields) throws IOException {
|
public InstantiatedIndex(IndexReader sourceIndexReader, Set<String> fields) throws IOException {
|
||||||
|
|
||||||
if (!sourceIndexReader.isOptimized()) {
|
if (!sourceIndexReader.isOptimized()) {
|
||||||
throw new IOException("Source index is not optimized.");
|
System.out.println(("Source index is not optimized."));
|
||||||
|
//throw new IOException("Source index is not optimized.");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -170,11 +171,14 @@ public class InstantiatedIndex
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
documentsByNumber = new InstantiatedDocument[sourceIndexReader.numDocs()];
|
documentsByNumber = new InstantiatedDocument[sourceIndexReader.maxDoc()];
|
||||||
|
|
||||||
|
|
||||||
// create documents
|
// create documents
|
||||||
for (int i = 0; i < sourceIndexReader.numDocs(); i++) {
|
for (int i = 0; i < sourceIndexReader.maxDoc(); i++) {
|
||||||
if (!sourceIndexReader.isDeleted(i)) {
|
if (sourceIndexReader.isDeleted(i)) {
|
||||||
|
deletedDocuments.add(i);
|
||||||
|
} else {
|
||||||
InstantiatedDocument document = new InstantiatedDocument();
|
InstantiatedDocument document = new InstantiatedDocument();
|
||||||
// copy stored fields from source reader
|
// copy stored fields from source reader
|
||||||
Document sourceDocument = sourceIndexReader.document(i);
|
Document sourceDocument = sourceIndexReader.document(i);
|
||||||
|
@ -259,6 +263,9 @@ public class InstantiatedIndex
|
||||||
|
|
||||||
// load offsets to term-document informations
|
// load offsets to term-document informations
|
||||||
for (InstantiatedDocument document : getDocumentsByNumber()) {
|
for (InstantiatedDocument document : getDocumentsByNumber()) {
|
||||||
|
if (document == null) {
|
||||||
|
continue; // deleted
|
||||||
|
}
|
||||||
for (Field field : (List<Field>) document.getDocument().getFields()) {
|
for (Field field : (List<Field>) document.getDocument().getFields()) {
|
||||||
if (field.isTermVectorStored() && field.isStoreOffsetWithTermVector()) {
|
if (field.isTermVectorStored() && field.isStoreOffsetWithTermVector()) {
|
||||||
TermPositionVector termPositionVector = (TermPositionVector) sourceIndexReader.getTermFreqVector(document.getDocumentNumber(), field.name());
|
TermPositionVector termPositionVector = (TermPositionVector) sourceIndexReader.getTermFreqVector(document.getDocumentNumber(), field.name());
|
||||||
|
|
|
@ -40,6 +40,10 @@ import org.apache.lucene.index.TermPositionVector;
|
||||||
import org.apache.lucene.index.TermPositions;
|
import org.apache.lucene.index.TermPositions;
|
||||||
import org.apache.lucene.store.Directory;
|
import org.apache.lucene.store.Directory;
|
||||||
import org.apache.lucene.store.RAMDirectory;
|
import org.apache.lucene.store.RAMDirectory;
|
||||||
|
import org.apache.lucene.search.IndexSearcher;
|
||||||
|
import org.apache.lucene.search.TermQuery;
|
||||||
|
import org.apache.lucene.search.TopDocCollector;
|
||||||
|
import org.apache.lucene.search.TopScoreDocCollector;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Asserts equality of content and behaviour of two index readers.
|
* Asserts equality of content and behaviour of two index readers.
|
||||||
|
|
|
@ -24,6 +24,12 @@ import org.apache.lucene.document.Document;
|
||||||
import org.apache.lucene.document.Field;
|
import org.apache.lucene.document.Field;
|
||||||
import org.apache.lucene.index.Term;
|
import org.apache.lucene.index.Term;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Assert that the content of an index
|
||||||
|
* is instantly available
|
||||||
|
* for all open searchers
|
||||||
|
* also after a commit.
|
||||||
|
*/
|
||||||
public class TestRealTime extends TestCase {
|
public class TestRealTime extends TestCase {
|
||||||
|
|
||||||
public void test() throws Exception {
|
public void test() throws Exception {
|
||||||
|
|
|
@ -0,0 +1,53 @@
|
||||||
|
package org.apache.lucene.store.instantiated;
|
||||||
|
|
||||||
|
import junit.framework.TestCase;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
|
||||||
|
import org.apache.lucene.index.IndexReader;
|
||||||
|
import org.apache.lucene.index.IndexWriter;
|
||||||
|
import org.apache.lucene.store.RAMDirectory;
|
||||||
|
import org.apache.lucene.store.Directory;
|
||||||
|
import org.apache.lucene.analysis.WhitespaceAnalyzer;
|
||||||
|
import org.apache.lucene.document.Document;
|
||||||
|
import org.apache.lucene.document.Field;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @author kalle
|
||||||
|
* @since 2009-mar-30 13:15:49
|
||||||
|
*/
|
||||||
|
public class TestUnoptimizedReaderOnConstructor extends TestCase {
|
||||||
|
|
||||||
|
public void test() throws Exception {
|
||||||
|
Directory dir = new RAMDirectory();
|
||||||
|
IndexWriter iw = new IndexWriter(dir, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.UNLIMITED);
|
||||||
|
addDocument(iw, "Hello, world!");
|
||||||
|
addDocument(iw, "All work and no play makes jack a dull boy");
|
||||||
|
iw.commit("a");
|
||||||
|
iw.close();
|
||||||
|
|
||||||
|
iw = new IndexWriter(dir, new WhitespaceAnalyzer(), false, IndexWriter.MaxFieldLength.UNLIMITED);
|
||||||
|
addDocument(iw, "Hello, tellus!");
|
||||||
|
addDocument(iw, "All work and no play makes danny a dull boy");
|
||||||
|
iw.commit("b");
|
||||||
|
iw.close();
|
||||||
|
|
||||||
|
iw = new IndexWriter(dir, new WhitespaceAnalyzer(), false, IndexWriter.MaxFieldLength.UNLIMITED);
|
||||||
|
addDocument(iw, "Hello, earth!");
|
||||||
|
addDocument(iw, "All work and no play makes wendy a dull girl");
|
||||||
|
iw.commit("c");
|
||||||
|
iw.close();
|
||||||
|
|
||||||
|
IndexReader unoptimizedReader = IndexReader.open(dir);
|
||||||
|
unoptimizedReader.deleteDocument(2);
|
||||||
|
|
||||||
|
InstantiatedIndex ii = new InstantiatedIndex(unoptimizedReader);
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
private void addDocument(IndexWriter iw, String text) throws IOException {
|
||||||
|
Document doc = new Document();
|
||||||
|
doc.add(new Field("field", text, Field.Store.NO, Field.Index.ANALYZED));
|
||||||
|
iw.addDocument(doc);
|
||||||
|
}
|
||||||
|
}
|
Loading…
Reference in New Issue