mirror of https://github.com/apache/lucene.git
LUCENE-3312: The API of oal.document was restructured to differentiate between stored documents and indexed documents
git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1379982 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
commit
bde79d433c
|
@ -6,6 +6,16 @@ http://s.apache.org/luceneversions
|
||||||
|
|
||||||
======================= Lucene 5.0.0 =======================
|
======================= Lucene 5.0.0 =======================
|
||||||
|
|
||||||
|
Changes in backwards compatibility policy
|
||||||
|
|
||||||
|
* LUCENE-3312: The API of oal.document was restructured to
|
||||||
|
differentiate between stored documents and indexed documents.
|
||||||
|
IndexReader.document(int) now returns StorableDocument
|
||||||
|
instead of Document. In most cases a simple replacement
|
||||||
|
of the return type is enough to upgrade (see MIGRATE.txt).
|
||||||
|
(Nikola Tanković, Uwe Schindler, Chris Male, Mike McCandless,
|
||||||
|
Robert Muir)
|
||||||
|
|
||||||
======================= Lucene 4.0.0 =======================
|
======================= Lucene 4.0.0 =======================
|
||||||
|
|
||||||
New Features
|
New Features
|
||||||
|
|
|
@ -1,3 +1,8 @@
|
||||||
# Apache Lucene Migration Guide
|
# Apache Lucene Migration Guide
|
||||||
|
|
||||||
TODO: Lucene 5.0 currently has no migration guide.
|
## Separation of IndexDocument and StoredDocument (LUCENE-3312)
|
||||||
|
|
||||||
|
The API of oal.document was restructured to differentiate between stored
|
||||||
|
documents and indexed documents. IndexReader.document(int) now returns
|
||||||
|
StorableDocument instead of Document. In most cases a simple replacement
|
||||||
|
of the return type is enough to upgrade.
|
||||||
|
|
|
@ -25,6 +25,7 @@ import org.apache.lucene.document.TextField;
|
||||||
import org.apache.lucene.index.DirectoryReader;
|
import org.apache.lucene.index.DirectoryReader;
|
||||||
import org.apache.lucene.index.IndexWriter;
|
import org.apache.lucene.index.IndexWriter;
|
||||||
import org.apache.lucene.index.IndexWriterConfig;
|
import org.apache.lucene.index.IndexWriterConfig;
|
||||||
|
import org.apache.lucene.index.StoredDocument;
|
||||||
import org.apache.lucene.search.IndexSearcher;
|
import org.apache.lucene.search.IndexSearcher;
|
||||||
import org.apache.lucene.search.MatchAllDocsQuery;
|
import org.apache.lucene.search.MatchAllDocsQuery;
|
||||||
import org.apache.lucene.search.TopDocs;
|
import org.apache.lucene.search.TopDocs;
|
||||||
|
@ -79,7 +80,7 @@ public class UIMABaseAnalyzerTest extends BaseTokenStreamTestCase {
|
||||||
IndexSearcher indexSearcher = new IndexSearcher(directoryReader);
|
IndexSearcher indexSearcher = new IndexSearcher(directoryReader);
|
||||||
TopDocs result = indexSearcher.search(new MatchAllDocsQuery(), 1);
|
TopDocs result = indexSearcher.search(new MatchAllDocsQuery(), 1);
|
||||||
assertTrue(result.totalHits > 0);
|
assertTrue(result.totalHits > 0);
|
||||||
Document d = indexSearcher.doc(result.scoreDocs[0].doc);
|
StoredDocument d = indexSearcher.doc(result.scoreDocs[0].doc);
|
||||||
assertNotNull(d);
|
assertNotNull(d);
|
||||||
assertNotNull(d.getField("title"));
|
assertNotNull(d.getField("title"));
|
||||||
assertEquals(dummyTitle, d.getField("title").stringValue());
|
assertEquals(dummyTitle, d.getField("title").stringValue());
|
||||||
|
@ -99,7 +100,7 @@ public class UIMABaseAnalyzerTest extends BaseTokenStreamTestCase {
|
||||||
directoryReader = DirectoryReader.open(dir);
|
directoryReader = DirectoryReader.open(dir);
|
||||||
indexSearcher = new IndexSearcher(directoryReader);
|
indexSearcher = new IndexSearcher(directoryReader);
|
||||||
result = indexSearcher.search(new MatchAllDocsQuery(), 2);
|
result = indexSearcher.search(new MatchAllDocsQuery(), 2);
|
||||||
Document d1 = indexSearcher.doc(result.scoreDocs[1].doc);
|
StoredDocument d1 = indexSearcher.doc(result.scoreDocs[1].doc);
|
||||||
assertNotNull(d1);
|
assertNotNull(d1);
|
||||||
assertNotNull(d1.getField("title"));
|
assertNotNull(d1.getField("title"));
|
||||||
assertEquals(dogmasTitle, d1.getField("title").stringValue());
|
assertEquals(dogmasTitle, d1.getField("title").stringValue());
|
||||||
|
|
|
@ -226,11 +226,12 @@ public class DocMaker implements Closeable {
|
||||||
|
|
||||||
final DocState ds = getDocState();
|
final DocState ds = getDocState();
|
||||||
final Document doc = reuseFields ? ds.doc : new Document();
|
final Document doc = reuseFields ? ds.doc : new Document();
|
||||||
doc.getFields().clear();
|
doc.clear();
|
||||||
|
|
||||||
// Set ID_FIELD
|
// Set ID_FIELD
|
||||||
FieldType ft = new FieldType(valType);
|
FieldType ft = new FieldType(valType);
|
||||||
ft.setIndexed(true);
|
ft.setIndexed(true);
|
||||||
|
ft.setStored(true);
|
||||||
|
|
||||||
Field idField = ds.getField(ID_FIELD, ft);
|
Field idField = ds.getField(ID_FIELD, ft);
|
||||||
int id;
|
int id;
|
||||||
|
|
|
@ -20,11 +20,12 @@ package org.apache.lucene.benchmark.byTask.tasks;
|
||||||
import org.apache.lucene.analysis.Analyzer;
|
import org.apache.lucene.analysis.Analyzer;
|
||||||
import org.apache.lucene.document.Document;
|
import org.apache.lucene.document.Document;
|
||||||
import org.apache.lucene.index.IndexReader;
|
import org.apache.lucene.index.IndexReader;
|
||||||
|
import org.apache.lucene.index.StoredDocument;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Abstract class for benchmarking highlighting performance
|
* Abstract class for benchmarking highlighting performance
|
||||||
*/
|
*/
|
||||||
public abstract class BenchmarkHighlighter {
|
public abstract class BenchmarkHighlighter {
|
||||||
public abstract int doHighlight( IndexReader reader, int doc, String field,
|
public abstract int doHighlight( IndexReader reader, int doc, String field,
|
||||||
Document document, Analyzer analyzer, String text ) throws Exception ;
|
StoredDocument document, Analyzer analyzer, String text ) throws Exception ;
|
||||||
}
|
}
|
||||||
|
|
|
@ -32,6 +32,8 @@ import org.apache.lucene.index.DirectoryReader;
|
||||||
import org.apache.lucene.index.IndexReader;
|
import org.apache.lucene.index.IndexReader;
|
||||||
import org.apache.lucene.index.IndexableField;
|
import org.apache.lucene.index.IndexableField;
|
||||||
import org.apache.lucene.index.MultiFields;
|
import org.apache.lucene.index.MultiFields;
|
||||||
|
import org.apache.lucene.index.StorableField;
|
||||||
|
import org.apache.lucene.index.StoredDocument;
|
||||||
import org.apache.lucene.search.Collector;
|
import org.apache.lucene.search.Collector;
|
||||||
import org.apache.lucene.search.TopDocs;
|
import org.apache.lucene.search.TopDocs;
|
||||||
import org.apache.lucene.search.MultiTermQuery;
|
import org.apache.lucene.search.MultiTermQuery;
|
||||||
|
@ -96,7 +98,7 @@ public abstract class ReadTask extends PerfTask {
|
||||||
|
|
||||||
// optionally warm and add num docs traversed to count
|
// optionally warm and add num docs traversed to count
|
||||||
if (withWarm()) {
|
if (withWarm()) {
|
||||||
Document doc = null;
|
StoredDocument doc = null;
|
||||||
Bits liveDocs = MultiFields.getLiveDocs(reader);
|
Bits liveDocs = MultiFields.getLiveDocs(reader);
|
||||||
for (int m = 0; m < reader.maxDoc(); m++) {
|
for (int m = 0; m < reader.maxDoc(); m++) {
|
||||||
if (null == liveDocs || liveDocs.get(m)) {
|
if (null == liveDocs || liveDocs.get(m)) {
|
||||||
|
@ -142,7 +144,7 @@ public abstract class ReadTask extends PerfTask {
|
||||||
System.out.println("numDocs() = " + reader.numDocs());
|
System.out.println("numDocs() = " + reader.numDocs());
|
||||||
for(int i=0;i<hits.scoreDocs.length;i++) {
|
for(int i=0;i<hits.scoreDocs.length;i++) {
|
||||||
final int docID = hits.scoreDocs[i].doc;
|
final int docID = hits.scoreDocs[i].doc;
|
||||||
final Document doc = reader.document(docID);
|
final StoredDocument doc = reader.document(docID);
|
||||||
System.out.println(" " + i + ": doc=" + docID + " score=" + hits.scoreDocs[i].score + " " + printHitsField + " =" + doc.get(printHitsField));
|
System.out.println(" " + i + ": doc=" + docID + " score=" + hits.scoreDocs[i].score + " " + printHitsField + " =" + doc.get(printHitsField));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -163,7 +165,7 @@ public abstract class ReadTask extends PerfTask {
|
||||||
int id = scoreDocs[m].doc;
|
int id = scoreDocs[m].doc;
|
||||||
res++;
|
res++;
|
||||||
if (retrieve) {
|
if (retrieve) {
|
||||||
Document document = retrieveDoc(reader, id);
|
StoredDocument document = retrieveDoc(reader, id);
|
||||||
res += document != null ? 1 : 0;
|
res += document != null ? 1 : 0;
|
||||||
if (numHighlight > 0 && m < numHighlight) {
|
if (numHighlight > 0 && m < numHighlight) {
|
||||||
Collection<String> fieldsToHighlight = getFieldsToHighlight(document);
|
Collection<String> fieldsToHighlight = getFieldsToHighlight(document);
|
||||||
|
@ -193,7 +195,7 @@ public abstract class ReadTask extends PerfTask {
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
protected Document retrieveDoc(IndexReader ir, int id) throws IOException {
|
protected StoredDocument retrieveDoc(IndexReader ir, int id) throws IOException {
|
||||||
return ir.document(id);
|
return ir.document(id);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -296,10 +298,10 @@ public abstract class ReadTask extends PerfTask {
|
||||||
* @param document The Document
|
* @param document The Document
|
||||||
* @return A Collection of Field names (Strings)
|
* @return A Collection of Field names (Strings)
|
||||||
*/
|
*/
|
||||||
protected Collection<String> getFieldsToHighlight(Document document) {
|
protected Collection<String> getFieldsToHighlight(StoredDocument document) {
|
||||||
List<IndexableField> fields = document.getFields();
|
List<StorableField> fields = document.getFields();
|
||||||
Set<String> result = new HashSet<String>(fields.size());
|
Set<String> result = new HashSet<String>(fields.size());
|
||||||
for (final IndexableField f : fields) {
|
for (final StorableField f : fields) {
|
||||||
result.add(f.name());
|
result.add(f.name());
|
||||||
}
|
}
|
||||||
return result;
|
return result;
|
||||||
|
|
|
@ -26,6 +26,7 @@ import org.apache.lucene.analysis.tokenattributes.TermToBytesRefAttribute;
|
||||||
import org.apache.lucene.benchmark.byTask.PerfRunData;
|
import org.apache.lucene.benchmark.byTask.PerfRunData;
|
||||||
import org.apache.lucene.benchmark.byTask.feeds.DocMaker;
|
import org.apache.lucene.benchmark.byTask.feeds.DocMaker;
|
||||||
import org.apache.lucene.document.Document;
|
import org.apache.lucene.document.Document;
|
||||||
|
import org.apache.lucene.document.Field;
|
||||||
import org.apache.lucene.document.IntField;
|
import org.apache.lucene.document.IntField;
|
||||||
import org.apache.lucene.document.LongField;
|
import org.apache.lucene.document.LongField;
|
||||||
import org.apache.lucene.document.FloatField;
|
import org.apache.lucene.document.FloatField;
|
||||||
|
@ -68,7 +69,7 @@ public class ReadTokensTask extends PerfTask {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public int doLogic() throws Exception {
|
public int doLogic() throws Exception {
|
||||||
List<IndexableField> fields = doc.getFields();
|
List<Field> fields = doc.getFields();
|
||||||
Analyzer analyzer = getRunData().getAnalyzer();
|
Analyzer analyzer = getRunData().getAnalyzer();
|
||||||
int tokenCount = 0;
|
int tokenCount = 0;
|
||||||
for(final IndexableField field : fields) {
|
for(final IndexableField field : fields) {
|
||||||
|
|
|
@ -22,6 +22,7 @@ import org.apache.lucene.analysis.TokenStream;
|
||||||
import org.apache.lucene.benchmark.byTask.PerfRunData;
|
import org.apache.lucene.benchmark.byTask.PerfRunData;
|
||||||
import org.apache.lucene.document.Document;
|
import org.apache.lucene.document.Document;
|
||||||
import org.apache.lucene.index.IndexReader;
|
import org.apache.lucene.index.IndexReader;
|
||||||
|
import org.apache.lucene.index.StoredDocument;
|
||||||
import org.apache.lucene.search.Query;
|
import org.apache.lucene.search.Query;
|
||||||
import org.apache.lucene.search.highlight.Highlighter;
|
import org.apache.lucene.search.highlight.Highlighter;
|
||||||
import org.apache.lucene.search.highlight.QueryScorer;
|
import org.apache.lucene.search.highlight.QueryScorer;
|
||||||
|
@ -101,7 +102,7 @@ public class SearchTravRetHighlightTask extends SearchTravTask {
|
||||||
return new BenchmarkHighlighter(){
|
return new BenchmarkHighlighter(){
|
||||||
@Override
|
@Override
|
||||||
public int doHighlight(IndexReader reader, int doc, String field,
|
public int doHighlight(IndexReader reader, int doc, String field,
|
||||||
Document document, Analyzer analyzer, String text) throws Exception {
|
StoredDocument document, Analyzer analyzer, String text) throws Exception {
|
||||||
TokenStream ts = TokenSources.getAnyTokenStream(reader, doc, field, document, analyzer);
|
TokenStream ts = TokenSources.getAnyTokenStream(reader, doc, field, document, analyzer);
|
||||||
TextFragment[] frag = highlighter.getBestTextFragments(ts, text, mergeContiguous, maxFrags);
|
TextFragment[] frag = highlighter.getBestTextFragments(ts, text, mergeContiguous, maxFrags);
|
||||||
return frag != null ? frag.length : 0;
|
return frag != null ? frag.length : 0;
|
||||||
|
@ -110,7 +111,7 @@ public class SearchTravRetHighlightTask extends SearchTravTask {
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected Collection<String> getFieldsToHighlight(Document document) {
|
protected Collection<String> getFieldsToHighlight(StoredDocument document) {
|
||||||
Collection<String> result = super.getFieldsToHighlight(document);
|
Collection<String> result = super.getFieldsToHighlight(document);
|
||||||
//if stored is false, then result will be empty, in which case just get all the param fields
|
//if stored is false, then result will be empty, in which case just get all the param fields
|
||||||
if (paramFields.isEmpty() == false && result.isEmpty() == false) {
|
if (paramFields.isEmpty() == false && result.isEmpty() == false) {
|
||||||
|
|
|
@ -25,6 +25,7 @@ import org.apache.lucene.benchmark.byTask.PerfRunData;
|
||||||
import org.apache.lucene.document.Document;
|
import org.apache.lucene.document.Document;
|
||||||
import org.apache.lucene.document.DocumentStoredFieldVisitor;
|
import org.apache.lucene.document.DocumentStoredFieldVisitor;
|
||||||
import org.apache.lucene.index.IndexReader;
|
import org.apache.lucene.index.IndexReader;
|
||||||
|
import org.apache.lucene.index.StoredDocument;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Search and Traverse and Retrieve docs task using a
|
* Search and Traverse and Retrieve docs task using a
|
||||||
|
@ -54,7 +55,7 @@ public class SearchTravRetLoadFieldSelectorTask extends SearchTravTask {
|
||||||
|
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected Document retrieveDoc(IndexReader ir, int id) throws IOException {
|
protected StoredDocument retrieveDoc(IndexReader ir, int id) throws IOException {
|
||||||
if (fieldsToLoad == null) {
|
if (fieldsToLoad == null) {
|
||||||
return ir.document(id);
|
return ir.document(id);
|
||||||
} else {
|
} else {
|
||||||
|
|
|
@ -21,6 +21,7 @@ import org.apache.lucene.analysis.Analyzer;
|
||||||
import org.apache.lucene.benchmark.byTask.PerfRunData;
|
import org.apache.lucene.benchmark.byTask.PerfRunData;
|
||||||
import org.apache.lucene.document.Document;
|
import org.apache.lucene.document.Document;
|
||||||
import org.apache.lucene.index.IndexReader;
|
import org.apache.lucene.index.IndexReader;
|
||||||
|
import org.apache.lucene.index.StoredDocument;
|
||||||
import org.apache.lucene.search.Query;
|
import org.apache.lucene.search.Query;
|
||||||
import org.apache.lucene.search.vectorhighlight.FastVectorHighlighter;
|
import org.apache.lucene.search.vectorhighlight.FastVectorHighlighter;
|
||||||
import org.apache.lucene.search.vectorhighlight.FieldQuery;
|
import org.apache.lucene.search.vectorhighlight.FieldQuery;
|
||||||
|
@ -99,7 +100,7 @@ public class SearchTravRetVectorHighlightTask extends SearchTravTask {
|
||||||
return new BenchmarkHighlighter(){
|
return new BenchmarkHighlighter(){
|
||||||
@Override
|
@Override
|
||||||
public int doHighlight(IndexReader reader, int doc, String field,
|
public int doHighlight(IndexReader reader, int doc, String field,
|
||||||
Document document, Analyzer analyzer, String text) throws Exception {
|
StoredDocument document, Analyzer analyzer, String text) throws Exception {
|
||||||
final FieldQuery fq = highlighter.getFieldQuery( myq, reader);
|
final FieldQuery fq = highlighter.getFieldQuery( myq, reader);
|
||||||
String[] fragments = highlighter.getBestFragments(fq, reader, doc, field, fragSize, maxFrags);
|
String[] fragments = highlighter.getBestFragments(fq, reader, doc, field, fragSize, maxFrags);
|
||||||
return fragments != null ? fragments.length : 0;
|
return fragments != null ? fragments.length : 0;
|
||||||
|
@ -108,7 +109,7 @@ public class SearchTravRetVectorHighlightTask extends SearchTravTask {
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected Collection<String> getFieldsToHighlight(Document document) {
|
protected Collection<String> getFieldsToHighlight(StoredDocument document) {
|
||||||
Collection<String> result = super.getFieldsToHighlight(document);
|
Collection<String> result = super.getFieldsToHighlight(document);
|
||||||
//if stored is false, then result will be empty, in which case just get all the param fields
|
//if stored is false, then result will be empty, in which case just get all the param fields
|
||||||
if (paramFields.isEmpty() == false && result.isEmpty() == false) {
|
if (paramFields.isEmpty() == false && result.isEmpty() == false) {
|
||||||
|
|
|
@ -33,6 +33,7 @@ import org.apache.lucene.benchmark.byTask.utils.Config;
|
||||||
import org.apache.lucene.benchmark.byTask.utils.StreamUtils;
|
import org.apache.lucene.benchmark.byTask.utils.StreamUtils;
|
||||||
import org.apache.lucene.document.Document;
|
import org.apache.lucene.document.Document;
|
||||||
import org.apache.lucene.index.IndexableField;
|
import org.apache.lucene.index.IndexableField;
|
||||||
|
import org.apache.lucene.index.StorableField;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* A task which writes documents, one line per document. Each line is in the
|
* A task which writes documents, one line per document. Each line is in the
|
||||||
|
@ -172,7 +173,7 @@ public class WriteLineDocTask extends PerfTask {
|
||||||
|
|
||||||
boolean sufficient = !checkSufficientFields;
|
boolean sufficient = !checkSufficientFields;
|
||||||
for (int i=0; i<fieldsToWrite.length; i++) {
|
for (int i=0; i<fieldsToWrite.length; i++) {
|
||||||
IndexableField f = doc.getField(fieldsToWrite[i]);
|
StorableField f = doc.getField(fieldsToWrite[i]);
|
||||||
String text = f == null ? "" : matcher.reset(f.stringValue()).replaceAll(" ").trim();
|
String text = f == null ? "" : matcher.reset(f.stringValue()).replaceAll(" ").trim();
|
||||||
sb.append(text).append(SEP);
|
sb.append(text).append(SEP);
|
||||||
sufficient |= text.length()>0 && sufficientFields[i];
|
sufficient |= text.length()>0 && sufficientFields[i];
|
||||||
|
|
|
@ -28,6 +28,7 @@ import org.apache.lucene.search.highlight.TokenSources;
|
||||||
import org.apache.lucene.search.Query;
|
import org.apache.lucene.search.Query;
|
||||||
import org.apache.lucene.document.Document;
|
import org.apache.lucene.document.Document;
|
||||||
import org.apache.lucene.index.IndexReader;
|
import org.apache.lucene.index.IndexReader;
|
||||||
|
import org.apache.lucene.index.StoredDocument;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
|
||||||
|
@ -44,8 +45,8 @@ public class CountingHighlighterTestTask extends SearchTravRetHighlightTask {
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected Document retrieveDoc(IndexReader ir, int id) throws IOException {
|
protected StoredDocument retrieveDoc(IndexReader ir, int id) throws IOException {
|
||||||
Document document = ir.document(id);
|
StoredDocument document = ir.document(id);
|
||||||
if (document != null) {
|
if (document != null) {
|
||||||
numDocsRetrieved++;
|
numDocsRetrieved++;
|
||||||
}
|
}
|
||||||
|
@ -57,7 +58,7 @@ public class CountingHighlighterTestTask extends SearchTravRetHighlightTask {
|
||||||
highlighter = new Highlighter(new SimpleHTMLFormatter(), new QueryScorer(q));
|
highlighter = new Highlighter(new SimpleHTMLFormatter(), new QueryScorer(q));
|
||||||
return new BenchmarkHighlighter() {
|
return new BenchmarkHighlighter() {
|
||||||
@Override
|
@Override
|
||||||
public int doHighlight(IndexReader reader, int doc, String field, Document document, Analyzer analyzer, String text) throws Exception {
|
public int doHighlight(IndexReader reader, int doc, String field, StoredDocument document, Analyzer analyzer, String text) throws Exception {
|
||||||
TokenStream ts = TokenSources.getAnyTokenStream(reader, doc, field, document, analyzer);
|
TokenStream ts = TokenSources.getAnyTokenStream(reader, doc, field, document, analyzer);
|
||||||
TextFragment[] frag = highlighter.getBestTextFragments(ts, text, mergeContiguous, maxFrags);
|
TextFragment[] frag = highlighter.getBestTextFragments(ts, text, mergeContiguous, maxFrags);
|
||||||
numHighlightedResults += frag != null ? frag.length : 0;
|
numHighlightedResults += frag != null ? frag.length : 0;
|
||||||
|
|
|
@ -168,7 +168,7 @@ and proximity searches (though sentence identification is not provided by Lucene
|
||||||
<ul>
|
<ul>
|
||||||
<li>
|
<li>
|
||||||
At indexing, as a consequence of
|
At indexing, as a consequence of
|
||||||
{@link org.apache.lucene.index.IndexWriter#addDocument(Iterable) addDocument(doc)},
|
{@link org.apache.lucene.index.IndexWriter#addDocument(IndexDocument) addDocument(doc)},
|
||||||
the Analyzer in effect for indexing is invoked for each indexed field of the added document.
|
the Analyzer in effect for indexing is invoked for each indexed field of the added document.
|
||||||
</li>
|
</li>
|
||||||
<li>
|
<li>
|
||||||
|
@ -241,7 +241,7 @@ and proximity searches (though sentence identification is not provided by Lucene
|
||||||
</p>
|
</p>
|
||||||
<h3>Field Section Boundaries</h3>
|
<h3>Field Section Boundaries</h3>
|
||||||
<p>
|
<p>
|
||||||
When {@link org.apache.lucene.document.Document#add(org.apache.lucene.index.IndexableField) document.add(field)}
|
When {@link org.apache.lucene.document.Document#add(org.apache.lucene.document.Field) document.add(field)}
|
||||||
is called multiple times for the same field name, we could say that each such call creates a new
|
is called multiple times for the same field name, we could say that each such call creates a new
|
||||||
section for that field in that document.
|
section for that field in that document.
|
||||||
In fact, a separate call to
|
In fact, a separate call to
|
||||||
|
|
|
@ -28,21 +28,23 @@ import org.apache.lucene.document.LongDocValuesField;
|
||||||
import org.apache.lucene.document.PackedLongDocValuesField;
|
import org.apache.lucene.document.PackedLongDocValuesField;
|
||||||
import org.apache.lucene.document.ShortDocValuesField;
|
import org.apache.lucene.document.ShortDocValuesField;
|
||||||
import org.apache.lucene.document.SortedBytesDocValuesField;
|
import org.apache.lucene.document.SortedBytesDocValuesField;
|
||||||
|
import org.apache.lucene.document.StoredField;
|
||||||
import org.apache.lucene.document.StraightBytesDocValuesField;
|
import org.apache.lucene.document.StraightBytesDocValuesField;
|
||||||
import org.apache.lucene.index.AtomicReader;
|
import org.apache.lucene.index.AtomicReader;
|
||||||
import org.apache.lucene.index.DocValues.Source;
|
import org.apache.lucene.index.DocValues.Source;
|
||||||
import org.apache.lucene.index.DocValues.Type;
|
import org.apache.lucene.index.DocValues.Type;
|
||||||
import org.apache.lucene.index.DocValues;
|
import org.apache.lucene.index.DocValues;
|
||||||
import org.apache.lucene.index.IndexableField;
|
import org.apache.lucene.index.IndexableField;
|
||||||
|
import org.apache.lucene.index.StorableField;
|
||||||
import org.apache.lucene.index.MergeState;
|
import org.apache.lucene.index.MergeState;
|
||||||
import org.apache.lucene.util.Bits;
|
import org.apache.lucene.util.Bits;
|
||||||
import org.apache.lucene.util.BytesRef;
|
import org.apache.lucene.util.BytesRef;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Abstract API that consumes {@link IndexableField}s.
|
* Abstract API that consumes {@link StorableField}s.
|
||||||
* {@link DocValuesConsumer} are always associated with a specific field and
|
* {@link DocValuesConsumer} are always associated with a specific field and
|
||||||
* segments. Concrete implementations of this API write the given
|
* segments. Concrete implementations of this API write the given
|
||||||
* {@link IndexableField} into a implementation specific format depending on
|
* {@link StorableField} into a implementation specific format depending on
|
||||||
* the fields meta-data.
|
* the fields meta-data.
|
||||||
*
|
*
|
||||||
* @lucene.experimental
|
* @lucene.experimental
|
||||||
|
@ -53,7 +55,7 @@ public abstract class DocValuesConsumer {
|
||||||
|
|
||||||
protected abstract Type getType();
|
protected abstract Type getType();
|
||||||
/**
|
/**
|
||||||
* Adds the given {@link IndexableField} instance to this
|
* Adds the given {@link StorableField} instance to this
|
||||||
* {@link DocValuesConsumer}
|
* {@link DocValuesConsumer}
|
||||||
*
|
*
|
||||||
* @param docID
|
* @param docID
|
||||||
|
@ -64,7 +66,7 @@ public abstract class DocValuesConsumer {
|
||||||
* @throws IOException
|
* @throws IOException
|
||||||
* if an {@link IOException} occurs
|
* if an {@link IOException} occurs
|
||||||
*/
|
*/
|
||||||
public abstract void add(int docID, IndexableField value)
|
public abstract void add(int docID, StorableField value)
|
||||||
throws IOException;
|
throws IOException;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -73,7 +75,7 @@ public abstract class DocValuesConsumer {
|
||||||
* @param docCount
|
* @param docCount
|
||||||
* the total number of documents in this {@link DocValuesConsumer}.
|
* the total number of documents in this {@link DocValuesConsumer}.
|
||||||
* Must be greater than or equal the last given docID to
|
* Must be greater than or equal the last given docID to
|
||||||
* {@link #add(int, IndexableField)}.
|
* {@link #add(int, StorableField)}.
|
||||||
* @throws IOException
|
* @throws IOException
|
||||||
*/
|
*/
|
||||||
public abstract void finish(int docCount) throws IOException;
|
public abstract void finish(int docCount) throws IOException;
|
||||||
|
@ -136,7 +138,7 @@ public abstract class DocValuesConsumer {
|
||||||
assert source != null;
|
assert source != null;
|
||||||
int docID = docBase;
|
int docID = docBase;
|
||||||
final Type type = getType();
|
final Type type = getType();
|
||||||
final Field scratchField;
|
final StoredField scratchField;
|
||||||
switch(type) {
|
switch(type) {
|
||||||
case VAR_INTS:
|
case VAR_INTS:
|
||||||
scratchField = new PackedLongDocValuesField("", (long) 0);
|
scratchField = new PackedLongDocValuesField("", (long) 0);
|
||||||
|
@ -202,7 +204,7 @@ public abstract class DocValuesConsumer {
|
||||||
* ID must always be greater than the previous ID or <tt>0</tt> if called the
|
* ID must always be greater than the previous ID or <tt>0</tt> if called the
|
||||||
* first time.
|
* first time.
|
||||||
*/
|
*/
|
||||||
protected void mergeDoc(Field scratchField, Source source, int docID, int sourceDoc)
|
protected void mergeDoc(StoredField scratchField, Source source, int docID, int sourceDoc)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
switch(getType()) {
|
switch(getType()) {
|
||||||
case BYTES_FIXED_DEREF:
|
case BYTES_FIXED_DEREF:
|
||||||
|
|
|
@ -19,13 +19,13 @@ package org.apache.lucene.codecs;
|
||||||
import java.io.Closeable;
|
import java.io.Closeable;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
|
||||||
import org.apache.lucene.document.Document;
|
|
||||||
import org.apache.lucene.index.AtomicReader;
|
|
||||||
import org.apache.lucene.index.FieldInfo;
|
import org.apache.lucene.index.FieldInfo;
|
||||||
import org.apache.lucene.index.FieldInfos;
|
import org.apache.lucene.index.FieldInfos;
|
||||||
import org.apache.lucene.index.IndexableField;
|
|
||||||
import org.apache.lucene.index.MergeState;
|
import org.apache.lucene.index.MergeState;
|
||||||
|
import org.apache.lucene.index.StorableField;
|
||||||
|
import org.apache.lucene.index.StoredDocument;
|
||||||
import org.apache.lucene.util.Bits;
|
import org.apache.lucene.util.Bits;
|
||||||
|
import org.apache.lucene.index.AtomicReader;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Codec API for writing stored fields:
|
* Codec API for writing stored fields:
|
||||||
|
@ -33,7 +33,7 @@ import org.apache.lucene.util.Bits;
|
||||||
* <ol>
|
* <ol>
|
||||||
* <li>For every document, {@link #startDocument(int)} is called,
|
* <li>For every document, {@link #startDocument(int)} is called,
|
||||||
* informing the Codec how many fields will be written.
|
* informing the Codec how many fields will be written.
|
||||||
* <li>{@link #writeField(FieldInfo, IndexableField)} is called for
|
* <li>{@link #writeField(FieldInfo, StorableField)} is called for
|
||||||
* each field in the document.
|
* each field in the document.
|
||||||
* <li>After all documents have been written, {@link #finish(FieldInfos, int)}
|
* <li>After all documents have been written, {@link #finish(FieldInfos, int)}
|
||||||
* is called for verification/sanity-checks.
|
* is called for verification/sanity-checks.
|
||||||
|
@ -45,14 +45,14 @@ import org.apache.lucene.util.Bits;
|
||||||
public abstract class StoredFieldsWriter implements Closeable {
|
public abstract class StoredFieldsWriter implements Closeable {
|
||||||
|
|
||||||
/** Called before writing the stored fields of the document.
|
/** Called before writing the stored fields of the document.
|
||||||
* {@link #writeField(FieldInfo, IndexableField)} will be called
|
* {@link #writeField(FieldInfo, StorableField)} will be called
|
||||||
* <code>numStoredFields</code> times. Note that this is
|
* <code>numStoredFields</code> times. Note that this is
|
||||||
* called even if the document has no stored fields, in
|
* called even if the document has no stored fields, in
|
||||||
* this case <code>numStoredFields</code> will be zero. */
|
* this case <code>numStoredFields</code> will be zero. */
|
||||||
public abstract void startDocument(int numStoredFields) throws IOException;
|
public abstract void startDocument(int numStoredFields) throws IOException;
|
||||||
|
|
||||||
/** Writes a single stored field. */
|
/** Writes a single stored field. */
|
||||||
public abstract void writeField(FieldInfo info, IndexableField field) throws IOException;
|
public abstract void writeField(FieldInfo info, StorableField field) throws IOException;
|
||||||
|
|
||||||
/** Aborts writing entirely, implementation should remove
|
/** Aborts writing entirely, implementation should remove
|
||||||
* any partially-written files, etc. */
|
* any partially-written files, etc. */
|
||||||
|
@ -69,7 +69,7 @@ public abstract class StoredFieldsWriter implements Closeable {
|
||||||
/** Merges in the stored fields from the readers in
|
/** Merges in the stored fields from the readers in
|
||||||
* <code>mergeState</code>. The default implementation skips
|
* <code>mergeState</code>. The default implementation skips
|
||||||
* over deleted documents, and uses {@link #startDocument(int)},
|
* over deleted documents, and uses {@link #startDocument(int)},
|
||||||
* {@link #writeField(FieldInfo, IndexableField)}, and {@link #finish(FieldInfos, int)},
|
* {@link #writeField(FieldInfo, StorableField)}, and {@link #finish(FieldInfos, int)},
|
||||||
* returning the number of documents that were written.
|
* returning the number of documents that were written.
|
||||||
* Implementations can override this method for more sophisticated
|
* Implementations can override this method for more sophisticated
|
||||||
* merging (bulk-byte copying, etc). */
|
* merging (bulk-byte copying, etc). */
|
||||||
|
@ -89,7 +89,7 @@ public abstract class StoredFieldsWriter implements Closeable {
|
||||||
// on the fly?
|
// on the fly?
|
||||||
// NOTE: it's very important to first assign to doc then pass it to
|
// NOTE: it's very important to first assign to doc then pass it to
|
||||||
// fieldsWriter.addDocument; see LUCENE-1282
|
// fieldsWriter.addDocument; see LUCENE-1282
|
||||||
Document doc = reader.document(i);
|
StoredDocument doc = reader.document(i);
|
||||||
addDocument(doc, mergeState.fieldInfos);
|
addDocument(doc, mergeState.fieldInfos);
|
||||||
docCount++;
|
docCount++;
|
||||||
mergeState.checkAbort.work(300);
|
mergeState.checkAbort.work(300);
|
||||||
|
@ -100,20 +100,16 @@ public abstract class StoredFieldsWriter implements Closeable {
|
||||||
}
|
}
|
||||||
|
|
||||||
/** sugar method for startDocument() + writeField() for every stored field in the document */
|
/** sugar method for startDocument() + writeField() for every stored field in the document */
|
||||||
protected final void addDocument(Iterable<? extends IndexableField> doc, FieldInfos fieldInfos) throws IOException {
|
protected final void addDocument(Iterable<? extends StorableField> doc, FieldInfos fieldInfos) throws IOException {
|
||||||
int storedCount = 0;
|
int storedCount = 0;
|
||||||
for (IndexableField field : doc) {
|
for (StorableField field : doc) {
|
||||||
if (field.fieldType().stored()) {
|
storedCount++;
|
||||||
storedCount++;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
startDocument(storedCount);
|
startDocument(storedCount);
|
||||||
|
|
||||||
for (IndexableField field : doc) {
|
for (StorableField field : doc) {
|
||||||
if (field.fieldType().stored()) {
|
|
||||||
writeField(fieldInfos.fieldInfo(field.name()), field);
|
writeField(fieldInfos.fieldInfo(field.name()), field);
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -29,6 +29,8 @@ import org.apache.lucene.index.IndexFileNames;
|
||||||
import org.apache.lucene.index.IndexableField;
|
import org.apache.lucene.index.IndexableField;
|
||||||
import org.apache.lucene.index.MergeState;
|
import org.apache.lucene.index.MergeState;
|
||||||
import org.apache.lucene.index.SegmentReader;
|
import org.apache.lucene.index.SegmentReader;
|
||||||
|
import org.apache.lucene.index.StorableField;
|
||||||
|
import org.apache.lucene.index.StoredDocument;
|
||||||
import org.apache.lucene.store.Directory;
|
import org.apache.lucene.store.Directory;
|
||||||
import org.apache.lucene.store.IOContext;
|
import org.apache.lucene.store.IOContext;
|
||||||
import org.apache.lucene.store.IndexInput;
|
import org.apache.lucene.store.IndexInput;
|
||||||
|
@ -131,7 +133,7 @@ public final class Lucene40StoredFieldsWriter extends StoredFieldsWriter {
|
||||||
IndexFileNames.segmentFileName(segment, "", FIELDS_INDEX_EXTENSION));
|
IndexFileNames.segmentFileName(segment, "", FIELDS_INDEX_EXTENSION));
|
||||||
}
|
}
|
||||||
|
|
||||||
public void writeField(FieldInfo info, IndexableField field) throws IOException {
|
public void writeField(FieldInfo info, StorableField field) throws IOException {
|
||||||
fieldsStream.writeVInt(info.number);
|
fieldsStream.writeVInt(info.number);
|
||||||
int bits = 0;
|
int bits = 0;
|
||||||
final BytesRef bytes;
|
final BytesRef bytes;
|
||||||
|
@ -297,7 +299,7 @@ public final class Lucene40StoredFieldsWriter extends StoredFieldsWriter {
|
||||||
// on the fly?
|
// on the fly?
|
||||||
// NOTE: it's very important to first assign to doc then pass it to
|
// NOTE: it's very important to first assign to doc then pass it to
|
||||||
// fieldsWriter.addDocument; see LUCENE-1282
|
// fieldsWriter.addDocument; see LUCENE-1282
|
||||||
Document doc = reader.document(j);
|
StoredDocument doc = reader.document(j);
|
||||||
addDocument(doc, mergeState.fieldInfos);
|
addDocument(doc, mergeState.fieldInfos);
|
||||||
docCount++;
|
docCount++;
|
||||||
mergeState.checkAbort.work(300);
|
mergeState.checkAbort.work(300);
|
||||||
|
@ -324,7 +326,7 @@ public final class Lucene40StoredFieldsWriter extends StoredFieldsWriter {
|
||||||
for (; docCount < maxDoc; docCount++) {
|
for (; docCount < maxDoc; docCount++) {
|
||||||
// NOTE: it's very important to first assign to doc then pass it to
|
// NOTE: it's very important to first assign to doc then pass it to
|
||||||
// fieldsWriter.addDocument; see LUCENE-1282
|
// fieldsWriter.addDocument; see LUCENE-1282
|
||||||
Document doc = reader.document(docCount);
|
StoredDocument doc = reader.document(docCount);
|
||||||
addDocument(doc, mergeState.fieldInfos);
|
addDocument(doc, mergeState.fieldInfos);
|
||||||
mergeState.checkAbort.work(300);
|
mergeState.checkAbort.work(300);
|
||||||
}
|
}
|
||||||
|
|
|
@ -30,6 +30,7 @@ import org.apache.lucene.index.DocValues.Type;
|
||||||
import org.apache.lucene.index.DocValues;
|
import org.apache.lucene.index.DocValues;
|
||||||
import org.apache.lucene.index.IndexFileNames;
|
import org.apache.lucene.index.IndexFileNames;
|
||||||
import org.apache.lucene.index.IndexableField;
|
import org.apache.lucene.index.IndexableField;
|
||||||
|
import org.apache.lucene.index.StorableField;
|
||||||
import org.apache.lucene.store.DataOutput;
|
import org.apache.lucene.store.DataOutput;
|
||||||
import org.apache.lucene.store.Directory;
|
import org.apache.lucene.store.Directory;
|
||||||
import org.apache.lucene.store.IOContext;
|
import org.apache.lucene.store.IOContext;
|
||||||
|
@ -420,7 +421,7 @@ public final class Bytes {
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void add(int docID, IndexableField value) throws IOException {
|
public void add(int docID, StorableField value) throws IOException {
|
||||||
BytesRef bytes = value.binaryValue();
|
BytesRef bytes = value.binaryValue();
|
||||||
assert bytes != null;
|
assert bytes != null;
|
||||||
if (bytes.length == 0) { // default value - skip it
|
if (bytes.length == 0) { // default value - skip it
|
||||||
|
|
|
@ -22,12 +22,12 @@ import java.io.IOException;
|
||||||
import org.apache.lucene.codecs.lucene40.values.Bytes.BytesReaderBase;
|
import org.apache.lucene.codecs.lucene40.values.Bytes.BytesReaderBase;
|
||||||
import org.apache.lucene.codecs.lucene40.values.Bytes.BytesSourceBase;
|
import org.apache.lucene.codecs.lucene40.values.Bytes.BytesSourceBase;
|
||||||
import org.apache.lucene.codecs.lucene40.values.Bytes.BytesWriterBase;
|
import org.apache.lucene.codecs.lucene40.values.Bytes.BytesWriterBase;
|
||||||
|
import org.apache.lucene.document.StoredField;
|
||||||
import org.apache.lucene.document.StraightBytesDocValuesField;
|
import org.apache.lucene.document.StraightBytesDocValuesField;
|
||||||
import org.apache.lucene.document.Field;
|
|
||||||
import org.apache.lucene.index.DocValues.Source;
|
import org.apache.lucene.index.DocValues.Source;
|
||||||
import org.apache.lucene.index.DocValues.Type;
|
import org.apache.lucene.index.DocValues.Type;
|
||||||
import org.apache.lucene.index.DocValues;
|
import org.apache.lucene.index.DocValues;
|
||||||
import org.apache.lucene.index.IndexableField;
|
import org.apache.lucene.index.StorableField;
|
||||||
import org.apache.lucene.store.Directory;
|
import org.apache.lucene.store.Directory;
|
||||||
import org.apache.lucene.store.IOContext;
|
import org.apache.lucene.store.IOContext;
|
||||||
import org.apache.lucene.store.IndexInput;
|
import org.apache.lucene.store.IndexInput;
|
||||||
|
@ -74,7 +74,7 @@ class FixedStraightBytesImpl {
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void add(int docID, IndexableField value) throws IOException {
|
public void add(int docID, StorableField value) throws IOException {
|
||||||
final BytesRef bytes = value.binaryValue();
|
final BytesRef bytes = value.binaryValue();
|
||||||
assert bytes != null;
|
assert bytes != null;
|
||||||
assert lastDocID < docID;
|
assert lastDocID < docID;
|
||||||
|
@ -201,7 +201,7 @@ class FixedStraightBytesImpl {
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected void mergeDoc(Field scratchField, Source source, int docID, int sourceDoc) throws IOException {
|
protected void mergeDoc(StoredField scratchField, Source source, int docID, int sourceDoc) throws IOException {
|
||||||
assert lastDocID < docID;
|
assert lastDocID < docID;
|
||||||
setMergeBytes(source, sourceDoc);
|
setMergeBytes(source, sourceDoc);
|
||||||
if (size == -1) {
|
if (size == -1) {
|
||||||
|
|
|
@ -24,6 +24,7 @@ import org.apache.lucene.index.DocValues.Source;
|
||||||
import org.apache.lucene.index.DocValues.Type;
|
import org.apache.lucene.index.DocValues.Type;
|
||||||
import org.apache.lucene.index.DocValues;
|
import org.apache.lucene.index.DocValues;
|
||||||
import org.apache.lucene.index.IndexableField;
|
import org.apache.lucene.index.IndexableField;
|
||||||
|
import org.apache.lucene.index.StorableField;
|
||||||
import org.apache.lucene.store.Directory;
|
import org.apache.lucene.store.Directory;
|
||||||
import org.apache.lucene.store.IOContext;
|
import org.apache.lucene.store.IOContext;
|
||||||
import org.apache.lucene.store.IndexInput;
|
import org.apache.lucene.store.IndexInput;
|
||||||
|
@ -88,7 +89,7 @@ public class Floats {
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void add(int docID, IndexableField value) throws IOException {
|
public void add(int docID, StorableField value) throws IOException {
|
||||||
template.toBytes(value.numericValue().doubleValue(), bytesRef);
|
template.toBytes(value.numericValue().doubleValue(), bytesRef);
|
||||||
bytesSpareField.setBytesValue(bytesRef);
|
bytesSpareField.setBytesValue(bytesRef);
|
||||||
super.add(docID, bytesSpareField);
|
super.add(docID, bytesSpareField);
|
||||||
|
|
|
@ -25,6 +25,7 @@ import org.apache.lucene.index.DocValues.Source;
|
||||||
import org.apache.lucene.index.DocValues.Type;
|
import org.apache.lucene.index.DocValues.Type;
|
||||||
import org.apache.lucene.index.DocValues;
|
import org.apache.lucene.index.DocValues;
|
||||||
import org.apache.lucene.index.IndexableField;
|
import org.apache.lucene.index.IndexableField;
|
||||||
|
import org.apache.lucene.index.StorableField;
|
||||||
import org.apache.lucene.store.Directory;
|
import org.apache.lucene.store.Directory;
|
||||||
import org.apache.lucene.store.IOContext;
|
import org.apache.lucene.store.IOContext;
|
||||||
import org.apache.lucene.store.IndexInput;
|
import org.apache.lucene.store.IndexInput;
|
||||||
|
@ -112,7 +113,7 @@ public final class Ints {
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void add(int docID, IndexableField value) throws IOException {
|
public void add(int docID, StorableField value) throws IOException {
|
||||||
template.toBytes(value.numericValue().longValue(), bytesRef);
|
template.toBytes(value.numericValue().longValue(), bytesRef);
|
||||||
bytesSpareField.setBytesValue(bytesRef);
|
bytesSpareField.setBytesValue(bytesRef);
|
||||||
super.add(docID, bytesSpareField);
|
super.add(docID, bytesSpareField);
|
||||||
|
|
|
@ -26,6 +26,7 @@ import org.apache.lucene.index.DocValues.Type;
|
||||||
import org.apache.lucene.index.DocValues;
|
import org.apache.lucene.index.DocValues;
|
||||||
import org.apache.lucene.index.IndexFileNames;
|
import org.apache.lucene.index.IndexFileNames;
|
||||||
import org.apache.lucene.index.IndexableField;
|
import org.apache.lucene.index.IndexableField;
|
||||||
|
import org.apache.lucene.index.StorableField;
|
||||||
import org.apache.lucene.store.Directory;
|
import org.apache.lucene.store.Directory;
|
||||||
import org.apache.lucene.store.IOContext;
|
import org.apache.lucene.store.IOContext;
|
||||||
import org.apache.lucene.store.IndexInput;
|
import org.apache.lucene.store.IndexInput;
|
||||||
|
@ -126,7 +127,7 @@ class PackedIntValues {
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void add(int docID, IndexableField docValue) throws IOException {
|
public void add(int docID, StorableField docValue) throws IOException {
|
||||||
final long v = docValue.numericValue().longValue();
|
final long v = docValue.numericValue().longValue();
|
||||||
assert lastDocId < docID;
|
assert lastDocId < docID;
|
||||||
if (!started) {
|
if (!started) {
|
||||||
|
|
|
@ -22,11 +22,11 @@ import java.io.IOException;
|
||||||
import org.apache.lucene.codecs.lucene40.values.Bytes.BytesReaderBase;
|
import org.apache.lucene.codecs.lucene40.values.Bytes.BytesReaderBase;
|
||||||
import org.apache.lucene.codecs.lucene40.values.Bytes.BytesSourceBase;
|
import org.apache.lucene.codecs.lucene40.values.Bytes.BytesSourceBase;
|
||||||
import org.apache.lucene.codecs.lucene40.values.Bytes.BytesWriterBase;
|
import org.apache.lucene.codecs.lucene40.values.Bytes.BytesWriterBase;
|
||||||
import org.apache.lucene.document.Field;
|
import org.apache.lucene.document.StoredField;
|
||||||
import org.apache.lucene.index.DocValues.Source;
|
import org.apache.lucene.index.DocValues.Source;
|
||||||
import org.apache.lucene.index.DocValues.Type;
|
import org.apache.lucene.index.DocValues.Type;
|
||||||
import org.apache.lucene.index.DocValues;
|
import org.apache.lucene.index.DocValues;
|
||||||
import org.apache.lucene.index.IndexableField;
|
import org.apache.lucene.index.StorableField;
|
||||||
import org.apache.lucene.store.Directory;
|
import org.apache.lucene.store.Directory;
|
||||||
import org.apache.lucene.store.IOContext;
|
import org.apache.lucene.store.IOContext;
|
||||||
import org.apache.lucene.store.IndexInput;
|
import org.apache.lucene.store.IndexInput;
|
||||||
|
@ -86,7 +86,7 @@ class VarStraightBytesImpl {
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void add(int docID, IndexableField value) throws IOException {
|
public void add(int docID, StorableField value) throws IOException {
|
||||||
final BytesRef bytes = value.binaryValue();
|
final BytesRef bytes = value.binaryValue();
|
||||||
assert bytes != null;
|
assert bytes != null;
|
||||||
assert !merge;
|
assert !merge;
|
||||||
|
@ -156,7 +156,7 @@ class VarStraightBytesImpl {
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected void mergeDoc(Field scratchField, Source source, int docID, int sourceDoc) throws IOException {
|
protected void mergeDoc(StoredField scratchField, Source source, int docID, int sourceDoc) throws IOException {
|
||||||
assert merge;
|
assert merge;
|
||||||
assert lastDocID < docID;
|
assert lastDocID < docID;
|
||||||
source.getBytes(sourceDoc, bytesRef);
|
source.getBytes(sourceDoc, bytesRef);
|
||||||
|
|
|
@ -21,7 +21,7 @@ import org.apache.lucene.codecs.DocValuesArraySource;
|
||||||
import org.apache.lucene.codecs.DocValuesConsumer;
|
import org.apache.lucene.codecs.DocValuesConsumer;
|
||||||
import org.apache.lucene.index.DocValues.Type;
|
import org.apache.lucene.index.DocValues.Type;
|
||||||
import org.apache.lucene.index.IndexFileNames;
|
import org.apache.lucene.index.IndexFileNames;
|
||||||
import org.apache.lucene.index.IndexableField;
|
import org.apache.lucene.index.StorableField;
|
||||||
import org.apache.lucene.store.Directory;
|
import org.apache.lucene.store.Directory;
|
||||||
import org.apache.lucene.store.IOContext;
|
import org.apache.lucene.store.IOContext;
|
||||||
import org.apache.lucene.store.IndexOutput;
|
import org.apache.lucene.store.IndexOutput;
|
||||||
|
@ -72,7 +72,7 @@ public class SimpleTextDocValuesConsumer extends DocValuesConsumer {
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void add(int docID, IndexableField value) throws IOException {
|
public void add(int docID, StorableField value) throws IOException {
|
||||||
assert docID >= 0;
|
assert docID >= 0;
|
||||||
final int ord, vSize;
|
final int ord, vSize;
|
||||||
switch (type) {
|
switch (type) {
|
||||||
|
|
|
@ -23,7 +23,7 @@ import org.apache.lucene.codecs.StoredFieldsWriter;
|
||||||
import org.apache.lucene.index.FieldInfo;
|
import org.apache.lucene.index.FieldInfo;
|
||||||
import org.apache.lucene.index.FieldInfos;
|
import org.apache.lucene.index.FieldInfos;
|
||||||
import org.apache.lucene.index.IndexFileNames;
|
import org.apache.lucene.index.IndexFileNames;
|
||||||
import org.apache.lucene.index.IndexableField;
|
import org.apache.lucene.index.StorableField;
|
||||||
import org.apache.lucene.store.Directory;
|
import org.apache.lucene.store.Directory;
|
||||||
import org.apache.lucene.store.IOContext;
|
import org.apache.lucene.store.IOContext;
|
||||||
import org.apache.lucene.store.IndexOutput;
|
import org.apache.lucene.store.IndexOutput;
|
||||||
|
@ -89,7 +89,7 @@ public class SimpleTextStoredFieldsWriter extends StoredFieldsWriter {
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void writeField(FieldInfo info, IndexableField field) throws IOException {
|
public void writeField(FieldInfo info, StorableField field) throws IOException {
|
||||||
write(FIELD);
|
write(FIELD);
|
||||||
write(Integer.toString(info.number));
|
write(Integer.toString(info.number));
|
||||||
newLine();
|
newLine();
|
||||||
|
|
|
@ -36,7 +36,7 @@ import org.apache.lucene.index.DocValues;
|
||||||
* @see DocValues for further information
|
* @see DocValues for further information
|
||||||
* */
|
* */
|
||||||
|
|
||||||
public class ByteDocValuesField extends Field {
|
public class ByteDocValuesField extends StoredField {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Type for 8-bit byte DocValues.
|
* Type for 8-bit byte DocValues.
|
||||||
|
|
|
@ -41,7 +41,7 @@ import org.apache.lucene.util.BytesRef;
|
||||||
* @see DocValues for further information
|
* @see DocValues for further information
|
||||||
* */
|
* */
|
||||||
|
|
||||||
public class DerefBytesDocValuesField extends Field {
|
public class DerefBytesDocValuesField extends StoredField {
|
||||||
|
|
||||||
// TODO: ideally indexer figures out var vs fixed on its own!?
|
// TODO: ideally indexer figures out var vs fixed on its own!?
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -19,11 +19,15 @@ package org.apache.lucene.document;
|
||||||
|
|
||||||
import java.util.*;
|
import java.util.*;
|
||||||
|
|
||||||
|
import org.apache.lucene.index.IndexDocument;
|
||||||
import org.apache.lucene.index.IndexReader; // for javadoc
|
import org.apache.lucene.index.IndexReader; // for javadoc
|
||||||
import org.apache.lucene.index.IndexableField;
|
import org.apache.lucene.index.IndexableField;
|
||||||
|
import org.apache.lucene.index.StorableField;
|
||||||
|
import org.apache.lucene.index.StoredDocument;
|
||||||
import org.apache.lucene.search.IndexSearcher; // for javadoc
|
import org.apache.lucene.search.IndexSearcher; // for javadoc
|
||||||
import org.apache.lucene.search.ScoreDoc; // for javadoc
|
import org.apache.lucene.search.ScoreDoc; // for javadoc
|
||||||
import org.apache.lucene.util.BytesRef;
|
import org.apache.lucene.util.BytesRef;
|
||||||
|
import org.apache.lucene.util.FilterIterator;
|
||||||
|
|
||||||
/** Documents are the unit of indexing and search.
|
/** Documents are the unit of indexing and search.
|
||||||
*
|
*
|
||||||
|
@ -38,18 +42,36 @@ import org.apache.lucene.util.BytesRef;
|
||||||
* ScoreDoc#doc} or {@link IndexReader#document(int)}.
|
* ScoreDoc#doc} or {@link IndexReader#document(int)}.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
public final class Document implements Iterable<IndexableField> {
|
public final class Document implements IndexDocument {
|
||||||
|
|
||||||
private final List<IndexableField> fields = new ArrayList<IndexableField>();
|
private final List<Field> fields = new ArrayList<Field>();
|
||||||
|
|
||||||
/** Constructs a new document with no fields. */
|
/** Constructs a new document with no fields. */
|
||||||
public Document() {}
|
public Document() {}
|
||||||
|
|
||||||
|
|
||||||
@Override
|
/**
|
||||||
public Iterator<IndexableField> iterator() {
|
* Creates a Document from StoredDocument so it that can be used e.g. for another
|
||||||
return fields.iterator();
|
* round of indexing.
|
||||||
}
|
*
|
||||||
|
*/
|
||||||
|
public Document(StoredDocument storedDoc) {
|
||||||
|
for (StorableField field : storedDoc.getFields()) {
|
||||||
|
Field newField = new Field(field.name(), (FieldType) field.fieldType());
|
||||||
|
|
||||||
|
newField.fieldsData = field.stringValue();
|
||||||
|
if (newField.fieldsData == null)
|
||||||
|
newField.fieldsData = field.numericValue();
|
||||||
|
if (newField.fieldsData == null)
|
||||||
|
newField.fieldsData = field.binaryValue();
|
||||||
|
if (newField.fieldsData == null)
|
||||||
|
newField.fieldsData = field.readerValue();
|
||||||
|
|
||||||
|
add(newField);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* <p>Adds a field to a document. Several fields may be added with
|
* <p>Adds a field to a document. Several fields may be added with
|
||||||
* the same name. In this case, if the fields are indexed, their text is
|
* the same name. In this case, if the fields are indexed, their text is
|
||||||
|
@ -60,7 +82,7 @@ public final class Document implements Iterable<IndexableField> {
|
||||||
* a document has to be deleted from an index and a new changed version of that
|
* a document has to be deleted from an index and a new changed version of that
|
||||||
* document has to be added.</p>
|
* document has to be added.</p>
|
||||||
*/
|
*/
|
||||||
public final void add(IndexableField field) {
|
public final void add(Field field) {
|
||||||
fields.add(field);
|
fields.add(field);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -75,9 +97,9 @@ public final class Document implements Iterable<IndexableField> {
|
||||||
* document has to be added.</p>
|
* document has to be added.</p>
|
||||||
*/
|
*/
|
||||||
public final void removeField(String name) {
|
public final void removeField(String name) {
|
||||||
Iterator<IndexableField> it = fields.iterator();
|
Iterator<Field> it = fields.iterator();
|
||||||
while (it.hasNext()) {
|
while (it.hasNext()) {
|
||||||
IndexableField field = it.next();
|
Field field = it.next();
|
||||||
if (field.name().equals(name)) {
|
if (field.name().equals(name)) {
|
||||||
it.remove();
|
it.remove();
|
||||||
return;
|
return;
|
||||||
|
@ -95,9 +117,9 @@ public final class Document implements Iterable<IndexableField> {
|
||||||
* document has to be added.</p>
|
* document has to be added.</p>
|
||||||
*/
|
*/
|
||||||
public final void removeFields(String name) {
|
public final void removeFields(String name) {
|
||||||
Iterator<IndexableField> it = fields.iterator();
|
Iterator<Field> it = fields.iterator();
|
||||||
while (it.hasNext()) {
|
while (it.hasNext()) {
|
||||||
IndexableField field = it.next();
|
Field field = it.next();
|
||||||
if (field.name().equals(name)) {
|
if (field.name().equals(name)) {
|
||||||
it.remove();
|
it.remove();
|
||||||
}
|
}
|
||||||
|
@ -116,7 +138,10 @@ public final class Document implements Iterable<IndexableField> {
|
||||||
*/
|
*/
|
||||||
public final BytesRef[] getBinaryValues(String name) {
|
public final BytesRef[] getBinaryValues(String name) {
|
||||||
final List<BytesRef> result = new ArrayList<BytesRef>();
|
final List<BytesRef> result = new ArrayList<BytesRef>();
|
||||||
for (IndexableField field : fields) {
|
Iterator<Field> it = storedFieldsIterator();
|
||||||
|
|
||||||
|
while (it.hasNext()) {
|
||||||
|
StorableField field = it.next();
|
||||||
if (field.name().equals(name)) {
|
if (field.name().equals(name)) {
|
||||||
final BytesRef bytes = field.binaryValue();
|
final BytesRef bytes = field.binaryValue();
|
||||||
if (bytes != null) {
|
if (bytes != null) {
|
||||||
|
@ -138,7 +163,10 @@ public final class Document implements Iterable<IndexableField> {
|
||||||
* @return a <code>byte[]</code> containing the binary field value or <code>null</code>
|
* @return a <code>byte[]</code> containing the binary field value or <code>null</code>
|
||||||
*/
|
*/
|
||||||
public final BytesRef getBinaryValue(String name) {
|
public final BytesRef getBinaryValue(String name) {
|
||||||
for (IndexableField field : fields) {
|
Iterator<Field> it = storedFieldsIterator();
|
||||||
|
|
||||||
|
while (it.hasNext()) {
|
||||||
|
StorableField field = it.next();
|
||||||
if (field.name().equals(name)) {
|
if (field.name().equals(name)) {
|
||||||
final BytesRef bytes = field.binaryValue();
|
final BytesRef bytes = field.binaryValue();
|
||||||
if (bytes != null) {
|
if (bytes != null) {
|
||||||
|
@ -153,8 +181,8 @@ public final class Document implements Iterable<IndexableField> {
|
||||||
* null. If multiple fields exists with this name, this method returns the
|
* null. If multiple fields exists with this name, this method returns the
|
||||||
* first value added.
|
* first value added.
|
||||||
*/
|
*/
|
||||||
public final IndexableField getField(String name) {
|
public final Field getField(String name) {
|
||||||
for (IndexableField field : fields) {
|
for (Field field : fields) {
|
||||||
if (field.name().equals(name)) {
|
if (field.name().equals(name)) {
|
||||||
return field;
|
return field;
|
||||||
}
|
}
|
||||||
|
@ -170,15 +198,15 @@ public final class Document implements Iterable<IndexableField> {
|
||||||
* @param name the name of the field
|
* @param name the name of the field
|
||||||
* @return a <code>Fieldable[]</code> array
|
* @return a <code>Fieldable[]</code> array
|
||||||
*/
|
*/
|
||||||
public IndexableField[] getFields(String name) {
|
public Field[] getFields(String name) {
|
||||||
List<IndexableField> result = new ArrayList<IndexableField>();
|
List<Field> result = new ArrayList<Field>();
|
||||||
for (IndexableField field : fields) {
|
for (Field field : fields) {
|
||||||
if (field.name().equals(name)) {
|
if (field.name().equals(name)) {
|
||||||
result.add(field);
|
result.add(field);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return result.toArray(new IndexableField[result.size()]);
|
return result.toArray(new Field[result.size()]);
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Returns a List of all the fields in a document.
|
/** Returns a List of all the fields in a document.
|
||||||
|
@ -186,9 +214,11 @@ public final class Document implements Iterable<IndexableField> {
|
||||||
* <i>not</i> available in documents retrieved from the
|
* <i>not</i> available in documents retrieved from the
|
||||||
* index, e.g. {@link IndexSearcher#doc(int)} or {@link
|
* index, e.g. {@link IndexSearcher#doc(int)} or {@link
|
||||||
* IndexReader#document(int)}.
|
* IndexReader#document(int)}.
|
||||||
|
*
|
||||||
|
* @return an immutable <code>List[Field]</code>
|
||||||
*/
|
*/
|
||||||
public final List<IndexableField> getFields() {
|
public final List<Field> getFields() {
|
||||||
return fields;
|
return Collections.unmodifiableList(fields);
|
||||||
}
|
}
|
||||||
|
|
||||||
private final static String[] NO_STRINGS = new String[0];
|
private final static String[] NO_STRINGS = new String[0];
|
||||||
|
@ -205,7 +235,10 @@ public final class Document implements Iterable<IndexableField> {
|
||||||
*/
|
*/
|
||||||
public final String[] getValues(String name) {
|
public final String[] getValues(String name) {
|
||||||
List<String> result = new ArrayList<String>();
|
List<String> result = new ArrayList<String>();
|
||||||
for (IndexableField field : fields) {
|
Iterator<Field> it = storedFieldsIterator();
|
||||||
|
|
||||||
|
while (it.hasNext()) {
|
||||||
|
StorableField field = it.next();
|
||||||
if (field.name().equals(name) && field.stringValue() != null) {
|
if (field.name().equals(name) && field.stringValue() != null) {
|
||||||
result.add(field.stringValue());
|
result.add(field.stringValue());
|
||||||
}
|
}
|
||||||
|
@ -227,7 +260,10 @@ public final class Document implements Iterable<IndexableField> {
|
||||||
* the actual numeric field instance back, use {@link #getField}.
|
* the actual numeric field instance back, use {@link #getField}.
|
||||||
*/
|
*/
|
||||||
public final String get(String name) {
|
public final String get(String name) {
|
||||||
for (IndexableField field : fields) {
|
Iterator<Field> it = storedFieldsIterator();
|
||||||
|
|
||||||
|
while (it.hasNext()) {
|
||||||
|
StorableField field = it.next();
|
||||||
if (field.name().equals(name) && field.stringValue() != null) {
|
if (field.name().equals(name) && field.stringValue() != null) {
|
||||||
return field.stringValue();
|
return field.stringValue();
|
||||||
}
|
}
|
||||||
|
@ -249,4 +285,50 @@ public final class Document implements Iterable<IndexableField> {
|
||||||
buffer.append(">");
|
buffer.append(">");
|
||||||
return buffer.toString();
|
return buffer.toString();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/** Obtains all indexed fields in document */
|
||||||
|
@Override
|
||||||
|
public Iterable<? extends IndexableField> indexableFields() {
|
||||||
|
return new Iterable<Field>() {
|
||||||
|
@Override
|
||||||
|
public Iterator<Field> iterator() {
|
||||||
|
return Document.this.indexedFieldsIterator();
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/** Obtains all stored fields in document. */
|
||||||
|
@Override
|
||||||
|
public Iterable<? extends StorableField> storableFields() {
|
||||||
|
return new Iterable<Field>() {
|
||||||
|
@Override
|
||||||
|
public Iterator<Field> iterator() {
|
||||||
|
return Document.this.storedFieldsIterator();
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
private Iterator<Field> storedFieldsIterator() {
|
||||||
|
return new FilterIterator<Field>(fields.iterator()) {
|
||||||
|
@Override
|
||||||
|
protected boolean predicateFunction(Field field) {
|
||||||
|
return field.type.stored();
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
private Iterator<Field> indexedFieldsIterator() {
|
||||||
|
return new FilterIterator<Field>(fields.iterator()) {
|
||||||
|
@Override
|
||||||
|
protected boolean predicateFunction(Field field) {
|
||||||
|
return field.type.indexed();
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Removes all the fields from document. */
|
||||||
|
public void clear() {
|
||||||
|
fields.clear();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -23,6 +23,7 @@ import java.util.HashSet;
|
||||||
|
|
||||||
import org.apache.lucene.index.FieldInfo;
|
import org.apache.lucene.index.FieldInfo;
|
||||||
import org.apache.lucene.index.IndexReader;
|
import org.apache.lucene.index.IndexReader;
|
||||||
|
import org.apache.lucene.index.StoredDocument;
|
||||||
import org.apache.lucene.index.StoredFieldVisitor;
|
import org.apache.lucene.index.StoredFieldVisitor;
|
||||||
|
|
||||||
/** A {@link StoredFieldVisitor} that creates a {@link
|
/** A {@link StoredFieldVisitor} that creates a {@link
|
||||||
|
@ -35,7 +36,7 @@ import org.apache.lucene.index.StoredFieldVisitor;
|
||||||
* @lucene.experimental */
|
* @lucene.experimental */
|
||||||
|
|
||||||
public class DocumentStoredFieldVisitor extends StoredFieldVisitor {
|
public class DocumentStoredFieldVisitor extends StoredFieldVisitor {
|
||||||
private final Document doc = new Document();
|
private final StoredDocument doc = new StoredDocument();
|
||||||
private final Set<String> fieldsToAdd;
|
private final Set<String> fieldsToAdd;
|
||||||
|
|
||||||
/** Load only fields named in the provided <code>Set<String></code>. */
|
/** Load only fields named in the provided <code>Set<String></code>. */
|
||||||
|
@ -68,7 +69,7 @@ public class DocumentStoredFieldVisitor extends StoredFieldVisitor {
|
||||||
ft.setIndexed(fieldInfo.isIndexed());
|
ft.setIndexed(fieldInfo.isIndexed());
|
||||||
ft.setOmitNorms(fieldInfo.omitsNorms());
|
ft.setOmitNorms(fieldInfo.omitsNorms());
|
||||||
ft.setIndexOptions(fieldInfo.getIndexOptions());
|
ft.setIndexOptions(fieldInfo.getIndexOptions());
|
||||||
doc.add(new Field(fieldInfo.name, value, ft));
|
doc.add(new StoredField(fieldInfo.name, value, ft));
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
@ -98,12 +99,12 @@ public class DocumentStoredFieldVisitor extends StoredFieldVisitor {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Retrieve the visited document.
|
* Retrieve the visited document.
|
||||||
* @return Document populated with stored fields. Note that only
|
* @return {@link StoredDocument} populated with stored fields. Note that only
|
||||||
* the stored information in the field instances is valid,
|
* the stored information in the field instances is valid,
|
||||||
* data such as boosts, indexing options, term vector options,
|
* data such as indexing options, term vector options,
|
||||||
* etc is not set.
|
* etc is not set.
|
||||||
*/
|
*/
|
||||||
public Document getDocument() {
|
public StoredDocument getDocument() {
|
||||||
return doc;
|
return doc;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -36,7 +36,7 @@ import org.apache.lucene.index.DocValues;
|
||||||
* @see DocValues for further information
|
* @see DocValues for further information
|
||||||
* */
|
* */
|
||||||
|
|
||||||
public class DoubleDocValuesField extends Field {
|
public class DoubleDocValuesField extends StoredField {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Type for 64-bit double DocValues.
|
* Type for 64-bit double DocValues.
|
||||||
|
|
|
@ -30,6 +30,7 @@ import org.apache.lucene.index.IndexWriter; // javadocs
|
||||||
import org.apache.lucene.index.IndexableField;
|
import org.apache.lucene.index.IndexableField;
|
||||||
import org.apache.lucene.index.IndexableFieldType;
|
import org.apache.lucene.index.IndexableFieldType;
|
||||||
import org.apache.lucene.index.Norm; // javadocs
|
import org.apache.lucene.index.Norm; // javadocs
|
||||||
|
import org.apache.lucene.index.StorableField;
|
||||||
import org.apache.lucene.util.BytesRef;
|
import org.apache.lucene.util.BytesRef;
|
||||||
import org.apache.lucene.index.FieldInvertState; // javadocs
|
import org.apache.lucene.index.FieldInvertState; // javadocs
|
||||||
|
|
||||||
|
@ -58,12 +59,13 @@ import org.apache.lucene.index.FieldInvertState; // javadocs
|
||||||
* Field it is used in. It is strongly recommended that no
|
* Field it is used in. It is strongly recommended that no
|
||||||
* changes be made after Field instantiation.
|
* changes be made after Field instantiation.
|
||||||
*/
|
*/
|
||||||
public class Field implements IndexableField {
|
public class Field implements IndexableField, StorableField {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Field's type
|
* Field's type
|
||||||
*/
|
*/
|
||||||
protected final FieldType type;
|
protected final FieldType type;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Field's name
|
* Field's name
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -27,7 +27,7 @@ import org.apache.lucene.util.NumericUtils;
|
||||||
/**
|
/**
|
||||||
* Describes the properties of a field.
|
* Describes the properties of a field.
|
||||||
*/
|
*/
|
||||||
public class FieldType implements IndexableFieldType {
|
public class FieldType implements IndexableFieldType {
|
||||||
|
|
||||||
/** Data type of the numeric value
|
/** Data type of the numeric value
|
||||||
* @since 3.2
|
* @since 3.2
|
||||||
|
@ -52,10 +52,10 @@ public class FieldType implements IndexableFieldType {
|
||||||
private boolean storeTermVectorPayloads;
|
private boolean storeTermVectorPayloads;
|
||||||
private boolean omitNorms;
|
private boolean omitNorms;
|
||||||
private IndexOptions indexOptions = IndexOptions.DOCS_AND_FREQS_AND_POSITIONS;
|
private IndexOptions indexOptions = IndexOptions.DOCS_AND_FREQS_AND_POSITIONS;
|
||||||
private DocValues.Type docValueType;
|
|
||||||
private NumericType numericType;
|
private NumericType numericType;
|
||||||
private boolean frozen;
|
private boolean frozen;
|
||||||
private int numericPrecisionStep = NumericUtils.PRECISION_STEP_DEFAULT;
|
private int numericPrecisionStep = NumericUtils.PRECISION_STEP_DEFAULT;
|
||||||
|
private DocValues.Type docValueType;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Create a new mutable FieldType with all of the properties from <code>ref</code>
|
* Create a new mutable FieldType with all of the properties from <code>ref</code>
|
||||||
|
@ -299,29 +299,6 @@ public class FieldType implements IndexableFieldType {
|
||||||
this.indexOptions = value;
|
this.indexOptions = value;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* Set's the field's DocValues.Type
|
|
||||||
* @param type DocValues type, or null if no DocValues should be stored.
|
|
||||||
* @throws IllegalStateException if this FieldType is frozen against
|
|
||||||
* future modifications.
|
|
||||||
* @see #docValueType()
|
|
||||||
*/
|
|
||||||
public void setDocValueType(DocValues.Type type) {
|
|
||||||
checkIfFrozen();
|
|
||||||
docValueType = type;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* {@inheritDoc}
|
|
||||||
* <p>
|
|
||||||
* The default is <code>null</code> (no docValues)
|
|
||||||
* @see #setDocValueType(DocValues.Type)
|
|
||||||
*/
|
|
||||||
@Override
|
|
||||||
public DocValues.Type docValueType() {
|
|
||||||
return docValueType;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Specifies the field's numeric type.
|
* Specifies the field's numeric type.
|
||||||
* @param type numeric type, or null if the field has no numeric type.
|
* @param type numeric type, or null if the field has no numeric type.
|
||||||
|
@ -423,4 +400,30 @@ public class FieldType implements IndexableFieldType {
|
||||||
|
|
||||||
return result.toString();
|
return result.toString();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* from StorableFieldType */
|
||||||
|
|
||||||
|
/**
|
||||||
|
* {@inheritDoc}
|
||||||
|
* <p>
|
||||||
|
* The default is <code>null</code> (no docValues)
|
||||||
|
* @see #setDocValueType(DocValues.Type)
|
||||||
|
*/
|
||||||
|
@Override
|
||||||
|
public DocValues.Type docValueType() {
|
||||||
|
return docValueType;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Set's the field's DocValues.Type
|
||||||
|
* @param type DocValues type, or null if no DocValues should be stored.
|
||||||
|
* @throws IllegalStateException if this FieldType is frozen against
|
||||||
|
* future modifications.
|
||||||
|
* @see #docValueType()
|
||||||
|
*/
|
||||||
|
public void setDocValueType(DocValues.Type type) {
|
||||||
|
checkIfFrozen();
|
||||||
|
docValueType = type;
|
||||||
|
this.stored = true;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -35,7 +35,7 @@ import org.apache.lucene.index.DocValues;
|
||||||
* @see DocValues for further information
|
* @see DocValues for further information
|
||||||
* */
|
* */
|
||||||
|
|
||||||
public class FloatDocValuesField extends Field {
|
public class FloatDocValuesField extends StoredField {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Type for 32-bit float DocValues.
|
* Type for 32-bit float DocValues.
|
||||||
|
|
|
@ -35,7 +35,7 @@ import org.apache.lucene.index.DocValues;
|
||||||
* @see DocValues for further information
|
* @see DocValues for further information
|
||||||
* */
|
* */
|
||||||
|
|
||||||
public class IntDocValuesField extends Field {
|
public class IntDocValuesField extends StoredField {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Type for 32-bit integer DocValues.
|
* Type for 32-bit integer DocValues.
|
||||||
|
|
|
@ -35,7 +35,7 @@ import org.apache.lucene.index.DocValues;
|
||||||
* @see DocValues for further information
|
* @see DocValues for further information
|
||||||
* */
|
* */
|
||||||
|
|
||||||
public class LongDocValuesField extends Field {
|
public class LongDocValuesField extends StoredField {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Type for 64-bit long DocValues.
|
* Type for 64-bit long DocValues.
|
||||||
|
|
|
@ -39,7 +39,7 @@ import org.apache.lucene.index.AtomicReader; // javadocs
|
||||||
* @see DocValues for further information
|
* @see DocValues for further information
|
||||||
* */
|
* */
|
||||||
|
|
||||||
public class PackedLongDocValuesField extends Field {
|
public class PackedLongDocValuesField extends StoredField {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Type for packed long DocValues.
|
* Type for packed long DocValues.
|
||||||
|
@ -47,6 +47,7 @@ public class PackedLongDocValuesField extends Field {
|
||||||
public static final FieldType TYPE = new FieldType();
|
public static final FieldType TYPE = new FieldType();
|
||||||
static {
|
static {
|
||||||
TYPE.setDocValueType(DocValues.Type.VAR_INTS);
|
TYPE.setDocValueType(DocValues.Type.VAR_INTS);
|
||||||
|
TYPE.setStored(true);
|
||||||
TYPE.freeze();
|
TYPE.freeze();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -36,7 +36,7 @@ import org.apache.lucene.index.DocValues;
|
||||||
* @see DocValues for further information
|
* @see DocValues for further information
|
||||||
* */
|
* */
|
||||||
|
|
||||||
public class ShortDocValuesField extends Field {
|
public class ShortDocValuesField extends StoredField {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Type for 16-bit short DocValues.
|
* Type for 16-bit short DocValues.
|
||||||
|
|
|
@ -37,7 +37,7 @@ import org.apache.lucene.util.BytesRef;
|
||||||
* @see DocValues for further information
|
* @see DocValues for further information
|
||||||
* */
|
* */
|
||||||
|
|
||||||
public class SortedBytesDocValuesField extends Field {
|
public class SortedBytesDocValuesField extends StoredField {
|
||||||
|
|
||||||
// TODO: ideally indexer figures out var vs fixed on its own!?
|
// TODO: ideally indexer figures out var vs fixed on its own!?
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
package org.apache.lucene.document;
|
package org.apache.lucene.document;
|
||||||
|
|
||||||
import org.apache.lucene.index.IndexReader; // javadocs
|
import org.apache.lucene.index.IndexReader; // javadocs
|
||||||
|
import org.apache.lucene.index.StorableField;
|
||||||
import org.apache.lucene.search.IndexSearcher; // javadocs
|
import org.apache.lucene.search.IndexSearcher; // javadocs
|
||||||
import org.apache.lucene.util.BytesRef;
|
import org.apache.lucene.util.BytesRef;
|
||||||
|
|
||||||
|
@ -24,7 +25,7 @@ import org.apache.lucene.util.BytesRef;
|
||||||
/** A field whose value is stored so that {@link
|
/** A field whose value is stored so that {@link
|
||||||
* IndexSearcher#doc} and {@link IndexReader#document} will
|
* IndexSearcher#doc} and {@link IndexReader#document} will
|
||||||
* return the field and its value. */
|
* return the field and its value. */
|
||||||
public final class StoredField extends Field {
|
public class StoredField extends Field {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Type for a stored-only field.
|
* Type for a stored-only field.
|
||||||
|
@ -36,6 +37,31 @@ public final class StoredField extends Field {
|
||||||
TYPE.freeze();
|
TYPE.freeze();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Expert: allows you to customize the {@link
|
||||||
|
* FieldType}.
|
||||||
|
* @param name field name
|
||||||
|
* @param type custom {@link FieldType} for this field
|
||||||
|
* @throws IllegalArgumentException if the field name is null.
|
||||||
|
*/
|
||||||
|
protected StoredField(String name, FieldType type) {
|
||||||
|
super(name, type);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Expert: allows you to customize the {@link
|
||||||
|
* FieldType}.
|
||||||
|
* <p>NOTE: the provided byte[] is not copied so be sure
|
||||||
|
* not to change it until you're done with this field.
|
||||||
|
* @param name field name
|
||||||
|
* @param bytes byte array pointing to binary content (not copied)
|
||||||
|
* @param type custom {@link FieldType} for this field
|
||||||
|
* @throws IllegalArgumentException if the field name is null.
|
||||||
|
*/
|
||||||
|
public StoredField(String name, BytesRef bytes, FieldType type) {
|
||||||
|
super(name, bytes, type);
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Create a stored-only field with the given binary value.
|
* Create a stored-only field with the given binary value.
|
||||||
* <p>NOTE: the provided byte[] is not copied so be sure
|
* <p>NOTE: the provided byte[] is not copied so be sure
|
||||||
|
@ -83,6 +109,18 @@ public final class StoredField extends Field {
|
||||||
public StoredField(String name, String value) {
|
public StoredField(String name, String value) {
|
||||||
super(name, value, TYPE);
|
super(name, value, TYPE);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Expert: allows you to customize the {@link
|
||||||
|
* FieldType}.
|
||||||
|
* @param name field name
|
||||||
|
* @param value string value
|
||||||
|
* @param type custom {@link FieldType} for this field
|
||||||
|
* @throws IllegalArgumentException if the field name or value is null.
|
||||||
|
*/
|
||||||
|
public StoredField(String name, String value, FieldType type) {
|
||||||
|
super(name, value, type);
|
||||||
|
}
|
||||||
|
|
||||||
// TODO: not great but maybe not a big problem?
|
// TODO: not great but maybe not a big problem?
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -40,7 +40,7 @@ import org.apache.lucene.util.BytesRef;
|
||||||
* @see DocValues for further information
|
* @see DocValues for further information
|
||||||
* */
|
* */
|
||||||
|
|
||||||
public class StraightBytesDocValuesField extends Field {
|
public class StraightBytesDocValuesField extends StoredField {
|
||||||
|
|
||||||
// TODO: ideally indexer figures out var vs fixed on its own!?
|
// TODO: ideally indexer figures out var vs fixed on its own!?
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -1180,7 +1180,7 @@ public class CheckIndex {
|
||||||
for (int j = 0; j < info.info.getDocCount(); ++j) {
|
for (int j = 0; j < info.info.getDocCount(); ++j) {
|
||||||
// Intentionally pull even deleted documents to
|
// Intentionally pull even deleted documents to
|
||||||
// make sure they too are not corrupt:
|
// make sure they too are not corrupt:
|
||||||
Document doc = reader.document(j);
|
StoredDocument doc = reader.document(j);
|
||||||
if (liveDocs == null || liveDocs.get(j)) {
|
if (liveDocs == null || liveDocs.get(j)) {
|
||||||
status.docCount++;
|
status.docCount++;
|
||||||
status.totFields += doc.getFields().size();
|
status.totFields += doc.getFields().size();
|
||||||
|
|
|
@ -28,6 +28,7 @@ import org.apache.lucene.codecs.Codec;
|
||||||
import org.apache.lucene.codecs.DocValuesConsumer;
|
import org.apache.lucene.codecs.DocValuesConsumer;
|
||||||
import org.apache.lucene.codecs.FieldInfosWriter;
|
import org.apache.lucene.codecs.FieldInfosWriter;
|
||||||
import org.apache.lucene.codecs.PerDocConsumer;
|
import org.apache.lucene.codecs.PerDocConsumer;
|
||||||
|
import org.apache.lucene.document.FieldType;
|
||||||
import org.apache.lucene.index.DocumentsWriterPerThread.DocState;
|
import org.apache.lucene.index.DocumentsWriterPerThread.DocState;
|
||||||
import org.apache.lucene.index.TypePromoter.TypeCompatibility;
|
import org.apache.lucene.index.TypePromoter.TypeCompatibility;
|
||||||
import org.apache.lucene.store.IOContext;
|
import org.apache.lucene.store.IOContext;
|
||||||
|
@ -218,58 +219,21 @@ final class DocFieldProcessor extends DocConsumer {
|
||||||
// seen before (eg suddenly turning on norms or
|
// seen before (eg suddenly turning on norms or
|
||||||
// vectors, etc.):
|
// vectors, etc.):
|
||||||
|
|
||||||
for(IndexableField field : docState.doc) {
|
for(IndexableField field : docState.doc.indexableFields()) {
|
||||||
final String fieldName = field.name();
|
final String fieldName = field.name();
|
||||||
|
IndexableFieldType ft = field.fieldType();
|
||||||
|
|
||||||
// Make sure we have a PerField allocated
|
DocFieldProcessorPerField fp = processField(fieldInfos, thisFieldGen, fieldName, ft);
|
||||||
final int hashPos = fieldName.hashCode() & hashMask;
|
|
||||||
DocFieldProcessorPerField fp = fieldHash[hashPos];
|
|
||||||
while(fp != null && !fp.fieldInfo.name.equals(fieldName)) {
|
|
||||||
fp = fp.next;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (fp == null) {
|
|
||||||
|
|
||||||
// TODO FI: we need to genericize the "flags" that a
|
|
||||||
// field holds, and, how these flags are merged; it
|
|
||||||
// needs to be more "pluggable" such that if I want
|
|
||||||
// to have a new "thing" my Fields can do, I can
|
|
||||||
// easily add it
|
|
||||||
FieldInfo fi = fieldInfos.addOrUpdate(fieldName, field.fieldType());
|
|
||||||
|
|
||||||
fp = new DocFieldProcessorPerField(this, fi);
|
|
||||||
fp.next = fieldHash[hashPos];
|
|
||||||
fieldHash[hashPos] = fp;
|
|
||||||
totalFieldCount++;
|
|
||||||
|
|
||||||
if (totalFieldCount >= fieldHash.length/2) {
|
|
||||||
rehash();
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
fieldInfos.addOrUpdate(fp.fieldInfo.name, field.fieldType());
|
|
||||||
}
|
|
||||||
|
|
||||||
if (thisFieldGen != fp.lastGen) {
|
|
||||||
|
|
||||||
// First time we're seeing this field for this doc
|
|
||||||
fp.fieldCount = 0;
|
|
||||||
|
|
||||||
if (fieldCount == fields.length) {
|
|
||||||
final int newSize = fields.length*2;
|
|
||||||
DocFieldProcessorPerField newArray[] = new DocFieldProcessorPerField[newSize];
|
|
||||||
System.arraycopy(fields, 0, newArray, 0, fieldCount);
|
|
||||||
fields = newArray;
|
|
||||||
}
|
|
||||||
|
|
||||||
fields[fieldCount++] = fp;
|
|
||||||
fp.lastGen = thisFieldGen;
|
|
||||||
}
|
|
||||||
|
|
||||||
fp.addField(field);
|
fp.addField(field);
|
||||||
|
}
|
||||||
|
for (StorableField field: docState.doc.storableFields()) {
|
||||||
|
final String fieldName = field.name();
|
||||||
|
IndexableFieldType ft = field.fieldType();
|
||||||
|
|
||||||
if (field.fieldType().stored()) {
|
DocFieldProcessorPerField fp = processField(fieldInfos, thisFieldGen, fieldName, ft);
|
||||||
fieldsWriter.addField(field, fp.fieldInfo);
|
fieldsWriter.addField(field, fp.fieldInfo);
|
||||||
}
|
|
||||||
final DocValues.Type dvType = field.fieldType().docValueType();
|
final DocValues.Type dvType = field.fieldType().docValueType();
|
||||||
if (dvType != null) {
|
if (dvType != null) {
|
||||||
DocValuesConsumerHolder docValuesConsumer = docValuesConsumer(dvType,
|
DocValuesConsumerHolder docValuesConsumer = docValuesConsumer(dvType,
|
||||||
|
@ -313,6 +277,54 @@ final class DocFieldProcessor extends DocConsumer {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private DocFieldProcessorPerField processField(FieldInfos.Builder fieldInfos,
|
||||||
|
final int thisFieldGen, final String fieldName, IndexableFieldType ft) {
|
||||||
|
// Make sure we have a PerField allocated
|
||||||
|
final int hashPos = fieldName.hashCode() & hashMask;
|
||||||
|
DocFieldProcessorPerField fp = fieldHash[hashPos];
|
||||||
|
while(fp != null && !fp.fieldInfo.name.equals(fieldName)) {
|
||||||
|
fp = fp.next;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (fp == null) {
|
||||||
|
|
||||||
|
// TODO FI: we need to genericize the "flags" that a
|
||||||
|
// field holds, and, how these flags are merged; it
|
||||||
|
// needs to be more "pluggable" such that if I want
|
||||||
|
// to have a new "thing" my Fields can do, I can
|
||||||
|
// easily add it
|
||||||
|
FieldInfo fi = fieldInfos.addOrUpdate(fieldName, ft);
|
||||||
|
|
||||||
|
fp = new DocFieldProcessorPerField(this, fi);
|
||||||
|
fp.next = fieldHash[hashPos];
|
||||||
|
fieldHash[hashPos] = fp;
|
||||||
|
totalFieldCount++;
|
||||||
|
|
||||||
|
if (totalFieldCount >= fieldHash.length/2) {
|
||||||
|
rehash();
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
fieldInfos.addOrUpdate(fp.fieldInfo.name, ft);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (thisFieldGen != fp.lastGen) {
|
||||||
|
|
||||||
|
// First time we're seeing this field for this doc
|
||||||
|
fp.fieldCount = 0;
|
||||||
|
|
||||||
|
if (fieldCount == fields.length) {
|
||||||
|
final int newSize = fields.length*2;
|
||||||
|
DocFieldProcessorPerField newArray[] = new DocFieldProcessorPerField[newSize];
|
||||||
|
System.arraycopy(fields, 0, newArray, 0, fieldCount);
|
||||||
|
fields = newArray;
|
||||||
|
}
|
||||||
|
|
||||||
|
fields[fieldCount++] = fp;
|
||||||
|
fp.lastGen = thisFieldGen;
|
||||||
|
}
|
||||||
|
return fp;
|
||||||
|
}
|
||||||
|
|
||||||
private static final Comparator<DocFieldProcessorPerField> fieldsComp = new Comparator<DocFieldProcessorPerField>() {
|
private static final Comparator<DocFieldProcessorPerField> fieldsComp = new Comparator<DocFieldProcessorPerField>() {
|
||||||
public int compare(DocFieldProcessorPerField o1, DocFieldProcessorPerField o2) {
|
public int compare(DocFieldProcessorPerField o1, DocFieldProcessorPerField o2) {
|
||||||
return o1.fieldInfo.name.compareTo(o2.fieldInfo.name);
|
return o1.fieldInfo.name.compareTo(o2.fieldInfo.name);
|
||||||
|
|
|
@ -75,7 +75,7 @@ final class DocInverterPerField extends DocFieldConsumerPerField {
|
||||||
// TODO FI: this should be "genericized" to querying
|
// TODO FI: this should be "genericized" to querying
|
||||||
// consumer if it wants to see this particular field
|
// consumer if it wants to see this particular field
|
||||||
// tokenized.
|
// tokenized.
|
||||||
if (fieldType.indexed() && doInvert) {
|
if (doInvert) {
|
||||||
final boolean analyzed = fieldType.tokenized() && docState.analyzer != null;
|
final boolean analyzed = fieldType.tokenized() && docState.analyzer != null;
|
||||||
|
|
||||||
// if the field omits norms, the boost cannot be indexed.
|
// if the field omits norms, the boost cannot be indexed.
|
||||||
|
|
|
@ -321,7 +321,7 @@ final class DocumentsWriter {
|
||||||
return maybeMerge;
|
return maybeMerge;
|
||||||
}
|
}
|
||||||
|
|
||||||
boolean updateDocuments(final Iterable<? extends Iterable<? extends IndexableField>> docs, final Analyzer analyzer,
|
boolean updateDocuments(final Iterable<? extends IndexDocument> docs, final Analyzer analyzer,
|
||||||
final Term delTerm) throws IOException {
|
final Term delTerm) throws IOException {
|
||||||
boolean maybeMerge = preUpdate();
|
boolean maybeMerge = preUpdate();
|
||||||
|
|
||||||
|
@ -352,7 +352,7 @@ final class DocumentsWriter {
|
||||||
return postUpdate(flushingDWPT, maybeMerge);
|
return postUpdate(flushingDWPT, maybeMerge);
|
||||||
}
|
}
|
||||||
|
|
||||||
boolean updateDocument(final Iterable<? extends IndexableField> doc, final Analyzer analyzer,
|
boolean updateDocument(final IndexDocument doc, final Analyzer analyzer,
|
||||||
final Term delTerm) throws IOException {
|
final Term delTerm) throws IOException {
|
||||||
|
|
||||||
boolean maybeMerge = preUpdate();
|
boolean maybeMerge = preUpdate();
|
||||||
|
|
|
@ -94,7 +94,7 @@ class DocumentsWriterPerThread {
|
||||||
InfoStream infoStream;
|
InfoStream infoStream;
|
||||||
Similarity similarity;
|
Similarity similarity;
|
||||||
int docID;
|
int docID;
|
||||||
Iterable<? extends IndexableField> doc;
|
IndexDocument doc;
|
||||||
String maxTermPrefix;
|
String maxTermPrefix;
|
||||||
|
|
||||||
DocState(DocumentsWriterPerThread docWriter, InfoStream infoStream) {
|
DocState(DocumentsWriterPerThread docWriter, InfoStream infoStream) {
|
||||||
|
@ -225,7 +225,7 @@ class DocumentsWriterPerThread {
|
||||||
return retval;
|
return retval;
|
||||||
}
|
}
|
||||||
|
|
||||||
public void updateDocument(Iterable<? extends IndexableField> doc, Analyzer analyzer, Term delTerm) throws IOException {
|
public void updateDocument(IndexDocument doc, Analyzer analyzer, Term delTerm) throws IOException {
|
||||||
assert writer.testPoint("DocumentsWriterPerThread addDocument start");
|
assert writer.testPoint("DocumentsWriterPerThread addDocument start");
|
||||||
assert deleteQueue != null;
|
assert deleteQueue != null;
|
||||||
docState.doc = doc;
|
docState.doc = doc;
|
||||||
|
@ -278,7 +278,7 @@ class DocumentsWriterPerThread {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
public int updateDocuments(Iterable<? extends Iterable<? extends IndexableField>> docs, Analyzer analyzer, Term delTerm) throws IOException {
|
public int updateDocuments(Iterable<? extends IndexDocument> docs, Analyzer analyzer, Term delTerm) throws IOException {
|
||||||
assert writer.testPoint("DocumentsWriterPerThread addDocuments start");
|
assert writer.testPoint("DocumentsWriterPerThread addDocuments start");
|
||||||
assert deleteQueue != null;
|
assert deleteQueue != null;
|
||||||
docState.analyzer = analyzer;
|
docState.analyzer = analyzer;
|
||||||
|
@ -290,7 +290,7 @@ class DocumentsWriterPerThread {
|
||||||
}
|
}
|
||||||
int docCount = 0;
|
int docCount = 0;
|
||||||
try {
|
try {
|
||||||
for(Iterable<? extends IndexableField> doc : docs) {
|
for(IndexDocument doc : docs) {
|
||||||
docState.doc = doc;
|
docState.doc = doc;
|
||||||
docState.docID = numDocsInRAM;
|
docState.docID = numDocsInRAM;
|
||||||
docCount++;
|
docCount++;
|
||||||
|
|
|
@ -103,12 +103,7 @@ final class FreqProxTermsWriterPerField extends TermsHashConsumerPerField implem
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
boolean start(IndexableField[] fields, int count) {
|
boolean start(IndexableField[] fields, int count) {
|
||||||
for(int i=0;i<count;i++) {
|
return true;
|
||||||
if (fields[i].fieldType().indexed()) {
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
|
|
@ -0,0 +1,33 @@
|
||||||
|
package org.apache.lucene.index;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||||
|
* contributor license agreements. See the NOTICE file distributed with
|
||||||
|
* this work for additional information regarding copyright ownership.
|
||||||
|
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||||
|
* (the "License"); you may not use this file except in compliance with
|
||||||
|
* the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
/** Represents a single field in lucene document. Further generalizations
|
||||||
|
* are {@link IndexableField} and {@link StorableField} interfaces.
|
||||||
|
*
|
||||||
|
* @lucene.experimental */
|
||||||
|
|
||||||
|
public interface GeneralField {
|
||||||
|
|
||||||
|
/** Field name */
|
||||||
|
public String name();
|
||||||
|
|
||||||
|
/** {@link IndexableFieldType} describing the properties
|
||||||
|
* of this field. */
|
||||||
|
public IndexableFieldType fieldType();
|
||||||
|
}
|
|
@ -0,0 +1,31 @@
|
||||||
|
package org.apache.lucene.index;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||||
|
* contributor license agreements. See the NOTICE file distributed with
|
||||||
|
* this work for additional information regarding copyright ownership.
|
||||||
|
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||||
|
* (the "License"); you may not use this file except in compliance with
|
||||||
|
* the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Elementary interface used for indexing an document.
|
||||||
|
* @lucene.internal
|
||||||
|
*/
|
||||||
|
public interface IndexDocument {
|
||||||
|
|
||||||
|
/** Obtains all indexable fields in document */
|
||||||
|
public Iterable<? extends IndexableField> indexableFields();
|
||||||
|
|
||||||
|
/** Obtains all storable fields in document */
|
||||||
|
public Iterable<? extends StorableField> storableFields();
|
||||||
|
}
|
|
@ -342,7 +342,7 @@ public abstract class IndexReader implements Closeable {
|
||||||
// TODO: we need a separate StoredField, so that the
|
// TODO: we need a separate StoredField, so that the
|
||||||
// Document returned here contains that class not
|
// Document returned here contains that class not
|
||||||
// IndexableField
|
// IndexableField
|
||||||
public final Document document(int docID) throws IOException {
|
public final StoredDocument document(int docID) throws IOException {
|
||||||
final DocumentStoredFieldVisitor visitor = new DocumentStoredFieldVisitor();
|
final DocumentStoredFieldVisitor visitor = new DocumentStoredFieldVisitor();
|
||||||
document(docID, visitor);
|
document(docID, visitor);
|
||||||
return visitor.getDocument();
|
return visitor.getDocument();
|
||||||
|
@ -353,8 +353,10 @@ public abstract class IndexReader implements Closeable {
|
||||||
* fields. Note that this is simply sugar for {@link
|
* fields. Note that this is simply sugar for {@link
|
||||||
* DocumentStoredFieldVisitor#DocumentStoredFieldVisitor(Set)}.
|
* DocumentStoredFieldVisitor#DocumentStoredFieldVisitor(Set)}.
|
||||||
*/
|
*/
|
||||||
public final Document document(int docID, Set<String> fieldsToLoad) throws IOException {
|
public final StoredDocument document(int docID, Set<String> fieldsToLoad)
|
||||||
final DocumentStoredFieldVisitor visitor = new DocumentStoredFieldVisitor(fieldsToLoad);
|
throws IOException {
|
||||||
|
final DocumentStoredFieldVisitor visitor = new DocumentStoredFieldVisitor(
|
||||||
|
fieldsToLoad);
|
||||||
document(docID, visitor);
|
document(docID, visitor);
|
||||||
return visitor.getDocument();
|
return visitor.getDocument();
|
||||||
}
|
}
|
||||||
|
|
|
@ -69,10 +69,10 @@ import org.apache.lucene.util.ThreadInterruptedException;
|
||||||
new index if there is not already an index at the provided path
|
new index if there is not already an index at the provided path
|
||||||
and otherwise open the existing index.</p>
|
and otherwise open the existing index.</p>
|
||||||
|
|
||||||
<p>In either case, documents are added with {@link #addDocument(Iterable)
|
<p>In either case, documents are added with {@link #addDocument(IndexDocument)
|
||||||
addDocument} and removed with {@link #deleteDocuments(Term)} or {@link
|
addDocument} and removed with {@link #deleteDocuments(Term)} or {@link
|
||||||
#deleteDocuments(Query)}. A document can be updated with {@link
|
#deleteDocuments(Query)}. A document can be updated with {@link
|
||||||
#updateDocument(Term, Iterable) updateDocument} (which just deletes
|
#updateDocument(Term, IndexDocument) updateDocument} (which just deletes
|
||||||
and then adds the entire document). When finished adding, deleting
|
and then adds the entire document). When finished adding, deleting
|
||||||
and updating documents, {@link #close() close} should be called.</p>
|
and updating documents, {@link #close() close} should be called.</p>
|
||||||
|
|
||||||
|
@ -1099,7 +1099,7 @@ public class IndexWriter implements Closeable, TwoPhaseCommit {
|
||||||
* @throws CorruptIndexException if the index is corrupt
|
* @throws CorruptIndexException if the index is corrupt
|
||||||
* @throws IOException if there is a low-level IO error
|
* @throws IOException if there is a low-level IO error
|
||||||
*/
|
*/
|
||||||
public void addDocument(Iterable<? extends IndexableField> doc) throws IOException {
|
public void addDocument(IndexDocument doc) throws IOException {
|
||||||
addDocument(doc, analyzer);
|
addDocument(doc, analyzer);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1107,7 +1107,7 @@ public class IndexWriter implements Closeable, TwoPhaseCommit {
|
||||||
* Adds a document to this index, using the provided analyzer instead of the
|
* Adds a document to this index, using the provided analyzer instead of the
|
||||||
* value of {@link #getAnalyzer()}.
|
* value of {@link #getAnalyzer()}.
|
||||||
*
|
*
|
||||||
* <p>See {@link #addDocument(Iterable)} for details on
|
* <p>See {@link #addDocument(IndexDocument)} for details on
|
||||||
* index and IndexWriter state after an Exception, and
|
* index and IndexWriter state after an Exception, and
|
||||||
* flushing/merging temporary free space requirements.</p>
|
* flushing/merging temporary free space requirements.</p>
|
||||||
*
|
*
|
||||||
|
@ -1118,7 +1118,7 @@ public class IndexWriter implements Closeable, TwoPhaseCommit {
|
||||||
* @throws CorruptIndexException if the index is corrupt
|
* @throws CorruptIndexException if the index is corrupt
|
||||||
* @throws IOException if there is a low-level IO error
|
* @throws IOException if there is a low-level IO error
|
||||||
*/
|
*/
|
||||||
public void addDocument(Iterable<? extends IndexableField> doc, Analyzer analyzer) throws IOException {
|
public void addDocument(IndexDocument doc, Analyzer analyzer) throws IOException {
|
||||||
updateDocument(null, doc, analyzer);
|
updateDocument(null, doc, analyzer);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1143,7 +1143,7 @@ public class IndexWriter implements Closeable, TwoPhaseCommit {
|
||||||
* perhaps to obtain better index compression), in which case
|
* perhaps to obtain better index compression), in which case
|
||||||
* you may need to fully re-index your documents at that time.
|
* you may need to fully re-index your documents at that time.
|
||||||
*
|
*
|
||||||
* <p>See {@link #addDocument(Iterable)} for details on
|
* <p>See {@link #addDocument(IndexDocument)} for details on
|
||||||
* index and IndexWriter state after an Exception, and
|
* index and IndexWriter state after an Exception, and
|
||||||
* flushing/merging temporary free space requirements.</p>
|
* flushing/merging temporary free space requirements.</p>
|
||||||
*
|
*
|
||||||
|
@ -1163,7 +1163,7 @@ public class IndexWriter implements Closeable, TwoPhaseCommit {
|
||||||
*
|
*
|
||||||
* @lucene.experimental
|
* @lucene.experimental
|
||||||
*/
|
*/
|
||||||
public void addDocuments(Iterable<? extends Iterable<? extends IndexableField>> docs) throws IOException {
|
public void addDocuments(Iterable<? extends IndexDocument> docs) throws IOException {
|
||||||
addDocuments(docs, analyzer);
|
addDocuments(docs, analyzer);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1178,7 +1178,7 @@ public class IndexWriter implements Closeable, TwoPhaseCommit {
|
||||||
*
|
*
|
||||||
* @lucene.experimental
|
* @lucene.experimental
|
||||||
*/
|
*/
|
||||||
public void addDocuments(Iterable<? extends Iterable<? extends IndexableField>> docs, Analyzer analyzer) throws IOException {
|
public void addDocuments(Iterable<? extends IndexDocument> docs, Analyzer analyzer) throws IOException {
|
||||||
updateDocuments(null, docs, analyzer);
|
updateDocuments(null, docs, analyzer);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1195,7 +1195,7 @@ public class IndexWriter implements Closeable, TwoPhaseCommit {
|
||||||
*
|
*
|
||||||
* @lucene.experimental
|
* @lucene.experimental
|
||||||
*/
|
*/
|
||||||
public void updateDocuments(Term delTerm, Iterable<? extends Iterable<? extends IndexableField>> docs) throws IOException {
|
public void updateDocuments(Term delTerm, Iterable<? extends IndexDocument> docs) throws IOException {
|
||||||
updateDocuments(delTerm, docs, analyzer);
|
updateDocuments(delTerm, docs, analyzer);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1213,7 +1213,7 @@ public class IndexWriter implements Closeable, TwoPhaseCommit {
|
||||||
*
|
*
|
||||||
* @lucene.experimental
|
* @lucene.experimental
|
||||||
*/
|
*/
|
||||||
public void updateDocuments(Term delTerm, Iterable<? extends Iterable<? extends IndexableField>> docs, Analyzer analyzer) throws IOException {
|
public void updateDocuments(Term delTerm, Iterable<? extends IndexDocument> docs, Analyzer analyzer) throws IOException {
|
||||||
ensureOpen();
|
ensureOpen();
|
||||||
try {
|
try {
|
||||||
boolean success = false;
|
boolean success = false;
|
||||||
|
@ -1410,7 +1410,7 @@ public class IndexWriter implements Closeable, TwoPhaseCommit {
|
||||||
* @throws CorruptIndexException if the index is corrupt
|
* @throws CorruptIndexException if the index is corrupt
|
||||||
* @throws IOException if there is a low-level IO error
|
* @throws IOException if there is a low-level IO error
|
||||||
*/
|
*/
|
||||||
public void updateDocument(Term term, Iterable<? extends IndexableField> doc) throws IOException {
|
public void updateDocument(Term term, IndexDocument doc) throws IOException {
|
||||||
ensureOpen();
|
ensureOpen();
|
||||||
updateDocument(term, doc, getAnalyzer());
|
updateDocument(term, doc, getAnalyzer());
|
||||||
}
|
}
|
||||||
|
@ -1433,7 +1433,7 @@ public class IndexWriter implements Closeable, TwoPhaseCommit {
|
||||||
* @throws CorruptIndexException if the index is corrupt
|
* @throws CorruptIndexException if the index is corrupt
|
||||||
* @throws IOException if there is a low-level IO error
|
* @throws IOException if there is a low-level IO error
|
||||||
*/
|
*/
|
||||||
public void updateDocument(Term term, Iterable<? extends IndexableField> doc, Analyzer analyzer)
|
public void updateDocument(Term term, IndexDocument doc, Analyzer analyzer)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
ensureOpen();
|
ensureOpen();
|
||||||
try {
|
try {
|
||||||
|
|
|
@ -35,15 +35,19 @@ import org.apache.lucene.util.BytesRef;
|
||||||
*
|
*
|
||||||
* @lucene.experimental */
|
* @lucene.experimental */
|
||||||
|
|
||||||
public interface IndexableField {
|
public interface IndexableField extends GeneralField {
|
||||||
|
|
||||||
/** Field name */
|
/**
|
||||||
public String name();
|
* Creates the TokenStream used for indexing this field. If appropriate,
|
||||||
|
* implementations should use the given Analyzer to create the TokenStreams.
|
||||||
|
*
|
||||||
|
* @param analyzer Analyzer that should be used to create the TokenStreams from
|
||||||
|
* @return TokenStream value for indexing the document. Should always return
|
||||||
|
* a non-null value if the field is to be indexed
|
||||||
|
* @throws IOException Can be thrown while creating the TokenStream
|
||||||
|
*/
|
||||||
|
public TokenStream tokenStream(Analyzer analyzer) throws IOException;
|
||||||
|
|
||||||
/** {@link IndexableFieldType} describing the properties
|
|
||||||
* of this field. */
|
|
||||||
public IndexableFieldType fieldType();
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Returns the field's index-time boost.
|
* Returns the field's index-time boost.
|
||||||
* <p>
|
* <p>
|
||||||
|
@ -65,27 +69,4 @@ public interface IndexableField {
|
||||||
* @see DefaultSimilarity#encodeNormValue(float)
|
* @see DefaultSimilarity#encodeNormValue(float)
|
||||||
*/
|
*/
|
||||||
public float boost();
|
public float boost();
|
||||||
|
|
||||||
/** Non-null if this field has a binary value */
|
|
||||||
public BytesRef binaryValue();
|
|
||||||
|
|
||||||
/** Non-null if this field has a string value */
|
|
||||||
public String stringValue();
|
|
||||||
|
|
||||||
/** Non-null if this field has a Reader value */
|
|
||||||
public Reader readerValue();
|
|
||||||
|
|
||||||
/** Non-null if this field has a numeric value */
|
|
||||||
public Number numericValue();
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Creates the TokenStream used for indexing this field. If appropriate,
|
|
||||||
* implementations should use the given Analyzer to create the TokenStreams.
|
|
||||||
*
|
|
||||||
* @param analyzer Analyzer that should be used to create the TokenStreams from
|
|
||||||
* @return TokenStream value for indexing the document. Should always return
|
|
||||||
* a non-null value if the field is to be indexed
|
|
||||||
* @throws IOException Can be thrown while creating the TokenStream
|
|
||||||
*/
|
|
||||||
public TokenStream tokenStream(Analyzer analyzer) throws IOException;
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -31,7 +31,7 @@ public interface IndexableFieldType {
|
||||||
|
|
||||||
/** True if the field's value should be stored */
|
/** True if the field's value should be stored */
|
||||||
public boolean stored();
|
public boolean stored();
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* True if this field's value should be analyzed by the
|
* True if this field's value should be analyzed by the
|
||||||
* {@link Analyzer}.
|
* {@link Analyzer}.
|
||||||
|
@ -95,5 +95,5 @@ public interface IndexableFieldType {
|
||||||
* DocValues {@link DocValues.Type}: if non-null then the field's value
|
* DocValues {@link DocValues.Type}: if non-null then the field's value
|
||||||
* will be indexed into docValues.
|
* will be indexed into docValues.
|
||||||
*/
|
*/
|
||||||
public DocValues.Type docValueType();
|
public DocValues.Type docValueType();
|
||||||
}
|
}
|
||||||
|
|
|
@ -26,13 +26,14 @@ import org.apache.lucene.document.LongDocValuesField;
|
||||||
import org.apache.lucene.document.PackedLongDocValuesField;
|
import org.apache.lucene.document.PackedLongDocValuesField;
|
||||||
import org.apache.lucene.document.ShortDocValuesField;
|
import org.apache.lucene.document.ShortDocValuesField;
|
||||||
import org.apache.lucene.document.SortedBytesDocValuesField;
|
import org.apache.lucene.document.SortedBytesDocValuesField;
|
||||||
|
import org.apache.lucene.document.StoredField;
|
||||||
import org.apache.lucene.document.StraightBytesDocValuesField;
|
import org.apache.lucene.document.StraightBytesDocValuesField;
|
||||||
import org.apache.lucene.index.DocValues.Type;
|
import org.apache.lucene.index.DocValues.Type;
|
||||||
import org.apache.lucene.search.similarities.Similarity;
|
import org.apache.lucene.search.similarities.Similarity;
|
||||||
import org.apache.lucene.util.BytesRef;
|
import org.apache.lucene.util.BytesRef;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Stores the normalization value computed in
|
* Stores the normalization value with {@link StorableField} computed in
|
||||||
* {@link Similarity#computeNorm(FieldInvertState, Norm)} per field.
|
* {@link Similarity#computeNorm(FieldInvertState, Norm)} per field.
|
||||||
* Normalization values must be consistent within a single field, different
|
* Normalization values must be consistent within a single field, different
|
||||||
* value types are not permitted within a single field. All values set must be
|
* value types are not permitted within a single field. All values set must be
|
||||||
|
@ -43,13 +44,13 @@ import org.apache.lucene.util.BytesRef;
|
||||||
* @lucene.internal
|
* @lucene.internal
|
||||||
*/
|
*/
|
||||||
public final class Norm {
|
public final class Norm {
|
||||||
private Field field;
|
private StoredField field;
|
||||||
private BytesRef spare;
|
private BytesRef spare;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Returns the {@link IndexableField} representation for this norm
|
* Returns the {@link StorableField} representation for this norm
|
||||||
*/
|
*/
|
||||||
public IndexableField field() {
|
public StorableField field() {
|
||||||
return field;
|
return field;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -51,7 +51,7 @@ final class NormsConsumerPerField extends InvertedDocEndConsumerPerField impleme
|
||||||
similarity.computeNorm(fieldState, norm);
|
similarity.computeNorm(fieldState, norm);
|
||||||
|
|
||||||
if (norm.type() != null) {
|
if (norm.type() != null) {
|
||||||
IndexableField field = norm.field();
|
StorableField field = norm.field();
|
||||||
// some similarity might not compute any norms
|
// some similarity might not compute any norms
|
||||||
DocValuesConsumer consumer = getConsumer(norm.type());
|
DocValuesConsumer consumer = getConsumer(norm.type());
|
||||||
consumer.add(docState.docID, field);
|
consumer.add(docState.docID, field);
|
||||||
|
|
|
@ -26,6 +26,7 @@ import java.util.Map.Entry;
|
||||||
import org.apache.lucene.document.Document;
|
import org.apache.lucene.document.Document;
|
||||||
import org.apache.lucene.document.Field;
|
import org.apache.lucene.document.Field;
|
||||||
import org.apache.lucene.document.FieldType;
|
import org.apache.lucene.document.FieldType;
|
||||||
|
import org.apache.lucene.document.StoredField;
|
||||||
import org.apache.lucene.index.IndexWriterConfig.OpenMode;
|
import org.apache.lucene.index.IndexWriterConfig.OpenMode;
|
||||||
import org.apache.lucene.store.Directory;
|
import org.apache.lucene.store.Directory;
|
||||||
import org.apache.lucene.util.Version;
|
import org.apache.lucene.util.Version;
|
||||||
|
@ -67,13 +68,13 @@ public class PersistentSnapshotDeletionPolicy extends SnapshotDeletionPolicy {
|
||||||
int numDocs = r.numDocs();
|
int numDocs = r.numDocs();
|
||||||
// index is allowed to have exactly one document or 0.
|
// index is allowed to have exactly one document or 0.
|
||||||
if (numDocs == 1) {
|
if (numDocs == 1) {
|
||||||
Document doc = r.document(r.maxDoc() - 1);
|
StoredDocument doc = r.document(r.maxDoc() - 1);
|
||||||
if (doc.getField(SNAPSHOTS_ID) == null) {
|
if (doc.getField(SNAPSHOTS_ID) == null) {
|
||||||
throw new IllegalStateException("directory is not a valid snapshots store!");
|
throw new IllegalStateException("directory is not a valid snapshots store!");
|
||||||
}
|
}
|
||||||
doc.removeField(SNAPSHOTS_ID);
|
for (StorableField f : doc) {
|
||||||
for (IndexableField f : doc) {
|
if (!f.name().equals(SNAPSHOTS_ID))
|
||||||
snapshots.put(f.name(), f.stringValue());
|
snapshots.put(f.name(), f.stringValue());
|
||||||
}
|
}
|
||||||
} else if (numDocs != 0) {
|
} else if (numDocs != 0) {
|
||||||
throw new IllegalStateException(
|
throw new IllegalStateException(
|
||||||
|
@ -184,14 +185,12 @@ public class PersistentSnapshotDeletionPolicy extends SnapshotDeletionPolicy {
|
||||||
private void persistSnapshotInfos(String id, String segment) throws IOException {
|
private void persistSnapshotInfos(String id, String segment) throws IOException {
|
||||||
writer.deleteAll();
|
writer.deleteAll();
|
||||||
Document d = new Document();
|
Document d = new Document();
|
||||||
FieldType ft = new FieldType();
|
d.add(new StoredField(SNAPSHOTS_ID, ""));
|
||||||
ft.setStored(true);
|
|
||||||
d.add(new Field(SNAPSHOTS_ID, "", ft));
|
|
||||||
for (Entry<String, String> e : super.getSnapshots().entrySet()) {
|
for (Entry<String, String> e : super.getSnapshots().entrySet()) {
|
||||||
d.add(new Field(e.getKey(), e.getValue(), ft));
|
d.add(new StoredField(e.getKey(), e.getValue()));
|
||||||
}
|
}
|
||||||
if (id != null) {
|
if (id != null) {
|
||||||
d.add(new Field(id, segment, ft));
|
d.add(new StoredField(id, segment));
|
||||||
}
|
}
|
||||||
writer.addDocument(d);
|
writer.addDocument(d);
|
||||||
writer.commit();
|
writer.commit();
|
||||||
|
|
|
@ -0,0 +1,42 @@
|
||||||
|
package org.apache.lucene.index;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||||
|
* contributor license agreements. See the NOTICE file distributed with
|
||||||
|
* this work for additional information regarding copyright ownership.
|
||||||
|
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||||
|
* (the "License"); you may not use this file except in compliance with
|
||||||
|
* the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
import java.io.Reader;
|
||||||
|
|
||||||
|
import org.apache.lucene.util.BytesRef;
|
||||||
|
|
||||||
|
/** Represents a single stored field in lucene document. These fields
|
||||||
|
* are contained in document retrieved from IndexReader.
|
||||||
|
*
|
||||||
|
* @lucene.experimental */
|
||||||
|
|
||||||
|
public interface StorableField extends GeneralField {
|
||||||
|
|
||||||
|
/** Non-null if this field has a binary value */
|
||||||
|
public BytesRef binaryValue();
|
||||||
|
|
||||||
|
/** Non-null if this field has a string value */
|
||||||
|
public String stringValue();
|
||||||
|
|
||||||
|
/** Non-null if this field has a Reader value */
|
||||||
|
public Reader readerValue();
|
||||||
|
|
||||||
|
/** Non-null if this field has a numeric value */
|
||||||
|
public Number numericValue();
|
||||||
|
}
|
|
@ -0,0 +1,31 @@
|
||||||
|
package org.apache.lucene.index;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||||
|
* contributor license agreements. See the NOTICE file distributed with
|
||||||
|
* this work for additional information regarding copyright ownership.
|
||||||
|
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||||
|
* (the "License"); you may not use this file except in compliance with
|
||||||
|
* the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
// TODO: Move some properties from IndexableFieldType here, those regarding stored fields.
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Describes the properties of a stored field.
|
||||||
|
* @lucene.experimental
|
||||||
|
*/
|
||||||
|
public interface StorableFieldType {
|
||||||
|
|
||||||
|
/** DocValues type; if non-null then the field's value
|
||||||
|
* will be indexed into docValues */
|
||||||
|
public DocValues.Type docValueType();
|
||||||
|
}
|
|
@ -0,0 +1,191 @@
|
||||||
|
package org.apache.lucene.index;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||||
|
* contributor license agreements. See the NOTICE file distributed with
|
||||||
|
* this work for additional information regarding copyright ownership.
|
||||||
|
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||||
|
* (the "License"); you may not use this file except in compliance with
|
||||||
|
* the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
import java.util.ArrayList;
|
||||||
|
import java.util.Collections;
|
||||||
|
import java.util.Iterator;
|
||||||
|
import java.util.List;
|
||||||
|
|
||||||
|
import org.apache.lucene.document.Document;
|
||||||
|
import org.apache.lucene.document.DoubleField;
|
||||||
|
import org.apache.lucene.document.Field;
|
||||||
|
import org.apache.lucene.document.FieldType;
|
||||||
|
import org.apache.lucene.document.FloatField;
|
||||||
|
import org.apache.lucene.document.IntField;
|
||||||
|
import org.apache.lucene.document.LongField;
|
||||||
|
import org.apache.lucene.search.IndexSearcher;
|
||||||
|
import org.apache.lucene.search.ScoreDoc;
|
||||||
|
import org.apache.lucene.util.BytesRef;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* StoredDocument is retrieved from IndexReader containing only stored fields from indexed {@link IndexDocument}.
|
||||||
|
*/
|
||||||
|
public class StoredDocument implements Iterable<StorableField>{
|
||||||
|
|
||||||
|
private final List<StorableField> fields = new ArrayList<StorableField>();
|
||||||
|
|
||||||
|
|
||||||
|
public final void add(StorableField field) {
|
||||||
|
fields.add(field);
|
||||||
|
}
|
||||||
|
|
||||||
|
public StorableField[] getFields(String name) {
|
||||||
|
List<StorableField> result = new ArrayList<StorableField>();
|
||||||
|
for (StorableField field : fields) {
|
||||||
|
if (field.name().equals(name)) {
|
||||||
|
result.add(field);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return result.toArray(new StorableField[result.size()]);
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Returns a field with the given name if any exist in this document, or
|
||||||
|
* null. If multiple fields exists with this name, this method returns the
|
||||||
|
* first value added.
|
||||||
|
*/
|
||||||
|
public final StorableField getField(String name) {
|
||||||
|
for (StorableField field : fields) {
|
||||||
|
if (field.name().equals(name)) {
|
||||||
|
return field;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/** Returns a List of all the fields in a document.
|
||||||
|
* <p>Note that fields which are <i>not</i> stored are
|
||||||
|
* <i>not</i> available in documents retrieved from the
|
||||||
|
* index, e.g. {@link IndexSearcher#doc(int)} or {@link
|
||||||
|
* IndexReader#document(int)}.
|
||||||
|
*
|
||||||
|
* @return an immutable <code>List[StorableField]</code>
|
||||||
|
*/
|
||||||
|
public final List<StorableField> getFields() {
|
||||||
|
return fields;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Iterator<StorableField> iterator() {
|
||||||
|
return this.fields.iterator();
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns an array of byte arrays for of the fields that have the name specified
|
||||||
|
* as the method parameter. This method returns an empty
|
||||||
|
* array when there are no matching fields. It never
|
||||||
|
* returns null.
|
||||||
|
*
|
||||||
|
* @param name the name of the field
|
||||||
|
* @return a <code>byte[][]</code> of binary field values
|
||||||
|
*/
|
||||||
|
public final BytesRef[] getBinaryValues(String name) {
|
||||||
|
final List<BytesRef> result = new ArrayList<BytesRef>();
|
||||||
|
for (StorableField field : fields) {
|
||||||
|
if (field.name().equals(name)) {
|
||||||
|
final BytesRef bytes = field.binaryValue();
|
||||||
|
if (bytes != null) {
|
||||||
|
result.add(bytes);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return result.toArray(new BytesRef[result.size()]);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns an array of bytes for the first (or only) field that has the name
|
||||||
|
* specified as the method parameter. This method will return <code>null</code>
|
||||||
|
* if no binary fields with the specified name are available.
|
||||||
|
* There may be non-binary fields with the same name.
|
||||||
|
*
|
||||||
|
* @param name the name of the field.
|
||||||
|
* @return a <code>byte[]</code> containing the binary field value or <code>null</code>
|
||||||
|
*/
|
||||||
|
public final BytesRef getBinaryValue(String name) {
|
||||||
|
for (StorableField field : fields) {
|
||||||
|
if (field.name().equals(name)) {
|
||||||
|
final BytesRef bytes = field.binaryValue();
|
||||||
|
if (bytes != null) {
|
||||||
|
return bytes;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
private final static String[] NO_STRINGS = new String[0];
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns an array of values of the field specified as the method parameter.
|
||||||
|
* This method returns an empty array when there are no
|
||||||
|
* matching fields. It never returns null.
|
||||||
|
* For {@link IntField}, {@link LongField}, {@link
|
||||||
|
* FloatField} and {@link DoubleField} it returns the string value of the number. If you want
|
||||||
|
* the actual numeric field instances back, use {@link #getFields}.
|
||||||
|
* @param name the name of the field
|
||||||
|
* @return a <code>String[]</code> of field values
|
||||||
|
*/
|
||||||
|
public final String[] getValues(String name) {
|
||||||
|
List<String> result = new ArrayList<String>();
|
||||||
|
for (StorableField field : fields) {
|
||||||
|
if (field.name().equals(name) && field.stringValue() != null) {
|
||||||
|
result.add(field.stringValue());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (result.size() == 0) {
|
||||||
|
return NO_STRINGS;
|
||||||
|
}
|
||||||
|
|
||||||
|
return result.toArray(new String[result.size()]);
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Returns the string value of the field with the given name if any exist in
|
||||||
|
* this document, or null. If multiple fields exist with this name, this
|
||||||
|
* method returns the first value added. If only binary fields with this name
|
||||||
|
* exist, returns null.
|
||||||
|
* For {@link IntField}, {@link LongField}, {@link
|
||||||
|
* FloatField} and {@link DoubleField} it returns the string value of the number. If you want
|
||||||
|
* the actual numeric field instance back, use {@link #getField}.
|
||||||
|
*/
|
||||||
|
public final String get(String name) {
|
||||||
|
for (StorableField field : fields) {
|
||||||
|
if (field.name().equals(name) && field.stringValue() != null) {
|
||||||
|
return field.stringValue();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Prints the fields of a document for human consumption. */
|
||||||
|
@Override
|
||||||
|
public final String toString() {
|
||||||
|
StringBuilder buffer = new StringBuilder();
|
||||||
|
buffer.append("StoredDocument<");
|
||||||
|
for (int i = 0; i < fields.size(); i++) {
|
||||||
|
StorableField field = fields.get(i);
|
||||||
|
buffer.append(field.toString());
|
||||||
|
if (i != fields.size()-1)
|
||||||
|
buffer.append(" ");
|
||||||
|
}
|
||||||
|
buffer.append(">");
|
||||||
|
return buffer.toString();
|
||||||
|
}
|
||||||
|
}
|
|
@ -44,12 +44,12 @@ final class StoredFieldsConsumer {
|
||||||
}
|
}
|
||||||
|
|
||||||
private int numStoredFields;
|
private int numStoredFields;
|
||||||
private IndexableField[] storedFields;
|
private StorableField[] storedFields;
|
||||||
private FieldInfo[] fieldInfos;
|
private FieldInfo[] fieldInfos;
|
||||||
|
|
||||||
public void reset() {
|
public void reset() {
|
||||||
numStoredFields = 0;
|
numStoredFields = 0;
|
||||||
storedFields = new IndexableField[1];
|
storedFields = new StorableField[1];
|
||||||
fieldInfos = new FieldInfo[1];
|
fieldInfos = new FieldInfo[1];
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -126,10 +126,10 @@ final class StoredFieldsConsumer {
|
||||||
assert docWriter.writer.testPoint("StoredFieldsWriter.finishDocument end");
|
assert docWriter.writer.testPoint("StoredFieldsWriter.finishDocument end");
|
||||||
}
|
}
|
||||||
|
|
||||||
public void addField(IndexableField field, FieldInfo fieldInfo) {
|
public void addField(StorableField field, FieldInfo fieldInfo) {
|
||||||
if (numStoredFields == storedFields.length) {
|
if (numStoredFields == storedFields.length) {
|
||||||
int newSize = ArrayUtil.oversize(numStoredFields + 1, RamUsageEstimator.NUM_BYTES_OBJECT_REF);
|
int newSize = ArrayUtil.oversize(numStoredFields + 1, RamUsageEstimator.NUM_BYTES_OBJECT_REF);
|
||||||
IndexableField[] newArray = new IndexableField[newSize];
|
StorableField[] newArray = new StorableField[newSize];
|
||||||
System.arraycopy(storedFields, 0, newArray, 0, numStoredFields);
|
System.arraycopy(storedFields, 0, newArray, 0, numStoredFields);
|
||||||
storedFields = newArray;
|
storedFields = newArray;
|
||||||
|
|
||||||
|
|
|
@ -39,6 +39,7 @@ import org.apache.lucene.index.IndexReader;
|
||||||
import org.apache.lucene.index.MultiFields;
|
import org.apache.lucene.index.MultiFields;
|
||||||
import org.apache.lucene.index.IndexReaderContext;
|
import org.apache.lucene.index.IndexReaderContext;
|
||||||
import org.apache.lucene.index.ReaderUtil;
|
import org.apache.lucene.index.ReaderUtil;
|
||||||
|
import org.apache.lucene.index.StoredDocument;
|
||||||
import org.apache.lucene.index.StoredFieldVisitor;
|
import org.apache.lucene.index.StoredFieldVisitor;
|
||||||
import org.apache.lucene.index.Term;
|
import org.apache.lucene.index.Term;
|
||||||
import org.apache.lucene.index.TermContext;
|
import org.apache.lucene.index.TermContext;
|
||||||
|
@ -181,7 +182,7 @@ public class IndexSearcher {
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Sugar for <code>.getIndexReader().document(docID)</code> */
|
/** Sugar for <code>.getIndexReader().document(docID)</code> */
|
||||||
public Document doc(int docID) throws IOException {
|
public StoredDocument doc(int docID) throws IOException {
|
||||||
return reader.document(docID);
|
return reader.document(docID);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -191,7 +192,7 @@ public class IndexSearcher {
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Sugar for <code>.getIndexReader().document(docID, fieldsToLoad)</code> */
|
/** Sugar for <code>.getIndexReader().document(docID, fieldsToLoad)</code> */
|
||||||
public final Document document(int docID, Set<String> fieldsToLoad) throws IOException {
|
public final StoredDocument document(int docID, Set<String> fieldsToLoad) throws IOException {
|
||||||
return reader.document(docID, fieldsToLoad);
|
return reader.document(docID, fieldsToLoad);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -27,6 +27,7 @@ import java.util.concurrent.locks.ReentrantLock;
|
||||||
|
|
||||||
import org.apache.lucene.analysis.Analyzer;
|
import org.apache.lucene.analysis.Analyzer;
|
||||||
import org.apache.lucene.index.DirectoryReader;
|
import org.apache.lucene.index.DirectoryReader;
|
||||||
|
import org.apache.lucene.index.IndexDocument;
|
||||||
import org.apache.lucene.index.SegmentInfoPerCommit;
|
import org.apache.lucene.index.SegmentInfoPerCommit;
|
||||||
import org.apache.lucene.index.IndexReader; // javadocs
|
import org.apache.lucene.index.IndexReader; // javadocs
|
||||||
import org.apache.lucene.index.IndexWriter;
|
import org.apache.lucene.index.IndexWriter;
|
||||||
|
@ -154,25 +155,25 @@ public class NRTManager extends ReferenceManager<IndexSearcher> {
|
||||||
this.writer = writer;
|
this.writer = writer;
|
||||||
}
|
}
|
||||||
|
|
||||||
public long updateDocument(Term t, Iterable<? extends IndexableField> d, Analyzer a) throws IOException {
|
public long updateDocument(Term t, IndexDocument d, Analyzer a) throws IOException {
|
||||||
writer.updateDocument(t, d, a);
|
writer.updateDocument(t, d, a);
|
||||||
// Return gen as of when indexing finished:
|
// Return gen as of when indexing finished:
|
||||||
return indexingGen.get();
|
return indexingGen.get();
|
||||||
}
|
}
|
||||||
|
|
||||||
public long updateDocument(Term t, Iterable<? extends IndexableField> d) throws IOException {
|
public long updateDocument(Term t, IndexDocument d) throws IOException {
|
||||||
writer.updateDocument(t, d);
|
writer.updateDocument(t, d);
|
||||||
// Return gen as of when indexing finished:
|
// Return gen as of when indexing finished:
|
||||||
return indexingGen.get();
|
return indexingGen.get();
|
||||||
}
|
}
|
||||||
|
|
||||||
public long updateDocuments(Term t, Iterable<? extends Iterable<? extends IndexableField>> docs, Analyzer a) throws IOException {
|
public long updateDocuments(Term t, Iterable<? extends IndexDocument> docs, Analyzer a) throws IOException {
|
||||||
writer.updateDocuments(t, docs, a);
|
writer.updateDocuments(t, docs, a);
|
||||||
// Return gen as of when indexing finished:
|
// Return gen as of when indexing finished:
|
||||||
return indexingGen.get();
|
return indexingGen.get();
|
||||||
}
|
}
|
||||||
|
|
||||||
public long updateDocuments(Term t, Iterable<? extends Iterable<? extends IndexableField>> docs) throws IOException {
|
public long updateDocuments(Term t, Iterable<? extends IndexDocument> docs) throws IOException {
|
||||||
writer.updateDocuments(t, docs);
|
writer.updateDocuments(t, docs);
|
||||||
// Return gen as of when indexing finished:
|
// Return gen as of when indexing finished:
|
||||||
return indexingGen.get();
|
return indexingGen.get();
|
||||||
|
@ -208,25 +209,25 @@ public class NRTManager extends ReferenceManager<IndexSearcher> {
|
||||||
return indexingGen.get();
|
return indexingGen.get();
|
||||||
}
|
}
|
||||||
|
|
||||||
public long addDocument(Iterable<? extends IndexableField> d, Analyzer a) throws IOException {
|
public long addDocument(IndexDocument d, Analyzer a) throws IOException {
|
||||||
writer.addDocument(d, a);
|
writer.addDocument(d, a);
|
||||||
// Return gen as of when indexing finished:
|
// Return gen as of when indexing finished:
|
||||||
return indexingGen.get();
|
return indexingGen.get();
|
||||||
}
|
}
|
||||||
|
|
||||||
public long addDocuments(Iterable<? extends Iterable<? extends IndexableField>> docs, Analyzer a) throws IOException {
|
public long addDocuments(Iterable<? extends IndexDocument> docs, Analyzer a) throws IOException {
|
||||||
writer.addDocuments(docs, a);
|
writer.addDocuments(docs, a);
|
||||||
// Return gen as of when indexing finished:
|
// Return gen as of when indexing finished:
|
||||||
return indexingGen.get();
|
return indexingGen.get();
|
||||||
}
|
}
|
||||||
|
|
||||||
public long addDocument(Iterable<? extends IndexableField> d) throws IOException {
|
public long addDocument(IndexDocument d) throws IOException {
|
||||||
writer.addDocument(d);
|
writer.addDocument(d);
|
||||||
// Return gen as of when indexing finished:
|
// Return gen as of when indexing finished:
|
||||||
return indexingGen.get();
|
return indexingGen.get();
|
||||||
}
|
}
|
||||||
|
|
||||||
public long addDocuments(Iterable<? extends Iterable<? extends IndexableField>> docs) throws IOException {
|
public long addDocuments(Iterable<? extends IndexDocument> docs) throws IOException {
|
||||||
writer.addDocuments(docs);
|
writer.addDocuments(docs);
|
||||||
// Return gen as of when indexing finished:
|
// Return gen as of when indexing finished:
|
||||||
return indexingGen.get();
|
return indexingGen.get();
|
||||||
|
|
|
@ -178,4 +178,4 @@ abstract class BulkOperation implements PackedInts.Decoder, PackedInts.Encoder {
|
||||||
return iterations;
|
return iterations;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
|
@ -581,4 +581,4 @@ abstract class Packed64SingleBlock extends PackedInts.MutableImpl {
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
|
@ -130,7 +130,7 @@ adding
|
||||||
|
|
||||||
<li>
|
<li>
|
||||||
Create an {@link org.apache.lucene.index.IndexWriter IndexWriter}
|
Create an {@link org.apache.lucene.index.IndexWriter IndexWriter}
|
||||||
and add documents to it with {@link org.apache.lucene.index.IndexWriter#addDocument(Iterable) addDocument()};</li>
|
and add documents to it with {@link org.apache.lucene.index.IndexWriter#addDocument(org.apache.lucene.index.IndexDocument) addDocument()};</li>
|
||||||
|
|
||||||
<li>
|
<li>
|
||||||
Call <a href="../queryparser/org/apache/lucene/queryparser/classic/QueryParserBase.html#parse(java.lang.String)">QueryParser.parse()</a>
|
Call <a href="../queryparser/org/apache/lucene/queryparser/classic/QueryParserBase.html#parse(java.lang.String)">QueryParser.parse()</a>
|
||||||
|
|
|
@ -25,6 +25,7 @@ import org.apache.lucene.document.Document;
|
||||||
import org.apache.lucene.document.Field;
|
import org.apache.lucene.document.Field;
|
||||||
import org.apache.lucene.index.DirectoryReader;
|
import org.apache.lucene.index.DirectoryReader;
|
||||||
import org.apache.lucene.index.IndexReader;
|
import org.apache.lucene.index.IndexReader;
|
||||||
|
import org.apache.lucene.index.StoredDocument;
|
||||||
import org.apache.lucene.index.Term;
|
import org.apache.lucene.index.Term;
|
||||||
import org.apache.lucene.index.RandomIndexWriter;
|
import org.apache.lucene.index.RandomIndexWriter;
|
||||||
import org.apache.lucene.search.*;
|
import org.apache.lucene.search.*;
|
||||||
|
@ -64,7 +65,7 @@ public class TestDemo extends LuceneTestCase {
|
||||||
assertEquals(1, hits.totalHits);
|
assertEquals(1, hits.totalHits);
|
||||||
// Iterate through the results:
|
// Iterate through the results:
|
||||||
for (int i = 0; i < hits.scoreDocs.length; i++) {
|
for (int i = 0; i < hits.scoreDocs.length; i++) {
|
||||||
Document hitDoc = isearcher.doc(hits.scoreDocs[i].doc);
|
StoredDocument hitDoc = isearcher.doc(hits.scoreDocs[i].doc);
|
||||||
assertEquals(text, hitDoc.get("fieldname"));
|
assertEquals(text, hitDoc.get("fieldname"));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -110,7 +110,7 @@ public class TestSearch extends LuceneTestCase {
|
||||||
|
|
||||||
out.println(hits.length + " total results");
|
out.println(hits.length + " total results");
|
||||||
for (int i = 0 ; i < hits.length && i < 10; i++) {
|
for (int i = 0 ; i < hits.length && i < 10; i++) {
|
||||||
Document d = searcher.doc(hits[i].doc);
|
StoredDocument d = searcher.doc(hits[i].doc);
|
||||||
out.println(i + " " + hits[i].score + " " + d.get("contents"));
|
out.println(i + " " + hits[i].score + " " + d.get("contents"));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -127,7 +127,7 @@ public class TestSearchForDuplicates extends LuceneTestCase {
|
||||||
out.println(hits.length + " total results\n");
|
out.println(hits.length + " total results\n");
|
||||||
for (int i = 0 ; i < hits.length; i++) {
|
for (int i = 0 ; i < hits.length; i++) {
|
||||||
if ( i < 10 || (i > 94 && i < 105) ) {
|
if ( i < 10 || (i > 94 && i < 105) ) {
|
||||||
Document d = searcher.doc(hits[i].doc);
|
StoredDocument d = searcher.doc(hits[i].doc);
|
||||||
out.println(i + " " + d.get(ID_FIELD));
|
out.println(i + " " + d.get(ID_FIELD));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -137,7 +137,7 @@ public class TestSearchForDuplicates extends LuceneTestCase {
|
||||||
assertEquals("total results", expectedCount, hits.length);
|
assertEquals("total results", expectedCount, hits.length);
|
||||||
for (int i = 0 ; i < hits.length; i++) {
|
for (int i = 0 ; i < hits.length; i++) {
|
||||||
if (i < 10 || (i > 94 && i < 105) ) {
|
if (i < 10 || (i > 94 && i < 105) ) {
|
||||||
Document d = searcher.doc(hits[i].doc);
|
StoredDocument d = searcher.doc(hits[i].doc);
|
||||||
assertEquals("check " + i, String.valueOf(i), d.get(ID_FIELD));
|
assertEquals("check " + i, String.valueOf(i), d.get(ID_FIELD));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -32,6 +32,7 @@ import org.apache.lucene.index.IndexReader;
|
||||||
import org.apache.lucene.index.IndexWriter;
|
import org.apache.lucene.index.IndexWriter;
|
||||||
import org.apache.lucene.index.IndexWriterConfig;
|
import org.apache.lucene.index.IndexWriterConfig;
|
||||||
import org.apache.lucene.index.MultiFields;
|
import org.apache.lucene.index.MultiFields;
|
||||||
|
import org.apache.lucene.index.StoredDocument;
|
||||||
import org.apache.lucene.index.Terms;
|
import org.apache.lucene.index.Terms;
|
||||||
import org.apache.lucene.index.TermsEnum.SeekStatus;
|
import org.apache.lucene.index.TermsEnum.SeekStatus;
|
||||||
import org.apache.lucene.index.TermsEnum;
|
import org.apache.lucene.index.TermsEnum;
|
||||||
|
@ -128,7 +129,7 @@ public class TestAppendingCodec extends LuceneTestCase {
|
||||||
writer.close();
|
writer.close();
|
||||||
IndexReader reader = DirectoryReader.open(dir, 1);
|
IndexReader reader = DirectoryReader.open(dir, 1);
|
||||||
assertEquals(2, reader.numDocs());
|
assertEquals(2, reader.numDocs());
|
||||||
Document doc2 = reader.document(0);
|
StoredDocument doc2 = reader.document(0);
|
||||||
assertEquals(text, doc2.get("f"));
|
assertEquals(text, doc2.get("f"));
|
||||||
Fields fields = MultiFields.getFields(reader);
|
Fields fields = MultiFields.getFields(reader);
|
||||||
Terms terms = fields.terms("f");
|
Terms terms = fields.terms("f");
|
||||||
|
|
|
@ -28,12 +28,14 @@ import org.apache.lucene.codecs.DocValuesConsumer;
|
||||||
import org.apache.lucene.codecs.lucene40.values.Bytes;
|
import org.apache.lucene.codecs.lucene40.values.Bytes;
|
||||||
import org.apache.lucene.codecs.lucene40.values.Floats;
|
import org.apache.lucene.codecs.lucene40.values.Floats;
|
||||||
import org.apache.lucene.codecs.lucene40.values.Ints;
|
import org.apache.lucene.codecs.lucene40.values.Ints;
|
||||||
|
import org.apache.lucene.document.FieldType;
|
||||||
import org.apache.lucene.index.DocValues.SortedSource;
|
import org.apache.lucene.index.DocValues.SortedSource;
|
||||||
import org.apache.lucene.index.DocValues.Source;
|
import org.apache.lucene.index.DocValues.Source;
|
||||||
import org.apache.lucene.index.DocValues.Type;
|
import org.apache.lucene.index.DocValues.Type;
|
||||||
import org.apache.lucene.index.DocValues;
|
import org.apache.lucene.index.DocValues;
|
||||||
import org.apache.lucene.index.IndexableField;
|
import org.apache.lucene.index.IndexableField;
|
||||||
import org.apache.lucene.index.IndexableFieldType;
|
import org.apache.lucene.index.IndexableFieldType;
|
||||||
|
import org.apache.lucene.index.StorableField;
|
||||||
import org.apache.lucene.store.Directory;
|
import org.apache.lucene.store.Directory;
|
||||||
import org.apache.lucene.util.BytesRef;
|
import org.apache.lucene.util.BytesRef;
|
||||||
import org.apache.lucene.util.Counter;
|
import org.apache.lucene.util.Counter;
|
||||||
|
@ -438,21 +440,11 @@ public class TestDocValues extends LuceneTestCase {
|
||||||
return getSource(values).asSortedSource();
|
return getSource(values).asSortedSource();
|
||||||
}
|
}
|
||||||
|
|
||||||
public static class DocValueHolder implements IndexableField {
|
public static class DocValueHolder implements StorableField {
|
||||||
BytesRef bytes;
|
BytesRef bytes;
|
||||||
Number numberValue;
|
Number numberValue;
|
||||||
Comparator<BytesRef> comp;
|
Comparator<BytesRef> comp;
|
||||||
|
|
||||||
@Override
|
|
||||||
public TokenStream tokenStream(Analyzer a) {
|
|
||||||
return null;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public float boost() {
|
|
||||||
return 0.0f;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public String name() {
|
public String name() {
|
||||||
return "test";
|
return "test";
|
||||||
|
@ -479,7 +471,7 @@ public class TestDocValues extends LuceneTestCase {
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public IndexableFieldType fieldType() {
|
public FieldType fieldType() {
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -3,6 +3,7 @@ package org.apache.lucene.document;
|
||||||
import org.apache.lucene.index.IndexReader;
|
import org.apache.lucene.index.IndexReader;
|
||||||
import org.apache.lucene.index.IndexableField;
|
import org.apache.lucene.index.IndexableField;
|
||||||
import org.apache.lucene.index.RandomIndexWriter;
|
import org.apache.lucene.index.RandomIndexWriter;
|
||||||
|
import org.apache.lucene.index.StoredDocument;
|
||||||
import org.apache.lucene.store.Directory;
|
import org.apache.lucene.store.Directory;
|
||||||
import org.apache.lucene.util.BytesRef;
|
import org.apache.lucene.util.BytesRef;
|
||||||
import org.apache.lucene.util.LuceneTestCase;
|
import org.apache.lucene.util.LuceneTestCase;
|
||||||
|
@ -37,8 +38,8 @@ public class TestBinaryDocument extends LuceneTestCase {
|
||||||
{
|
{
|
||||||
FieldType ft = new FieldType();
|
FieldType ft = new FieldType();
|
||||||
ft.setStored(true);
|
ft.setStored(true);
|
||||||
IndexableField binaryFldStored = new StoredField("binaryStored", binaryValStored.getBytes("UTF-8"));
|
StoredField binaryFldStored = new StoredField("binaryStored", binaryValStored.getBytes("UTF-8"));
|
||||||
IndexableField stringFldStored = new Field("stringStored", binaryValStored, ft);
|
Field stringFldStored = new Field("stringStored", binaryValStored, ft);
|
||||||
|
|
||||||
Document doc = new Document();
|
Document doc = new Document();
|
||||||
|
|
||||||
|
@ -56,7 +57,7 @@ public class TestBinaryDocument extends LuceneTestCase {
|
||||||
|
|
||||||
/** open a reader and fetch the document */
|
/** open a reader and fetch the document */
|
||||||
IndexReader reader = writer.getReader();
|
IndexReader reader = writer.getReader();
|
||||||
Document docFromReader = reader.document(0);
|
StoredDocument docFromReader = reader.document(0);
|
||||||
assertTrue(docFromReader != null);
|
assertTrue(docFromReader != null);
|
||||||
|
|
||||||
/** fetch the binary stored field and compare it's content with the original one */
|
/** fetch the binary stored field and compare it's content with the original one */
|
||||||
|
@ -75,8 +76,8 @@ public class TestBinaryDocument extends LuceneTestCase {
|
||||||
}
|
}
|
||||||
|
|
||||||
public void testCompressionTools() throws Exception {
|
public void testCompressionTools() throws Exception {
|
||||||
IndexableField binaryFldCompressed = new StoredField("binaryCompressed", CompressionTools.compress(binaryValCompressed.getBytes("UTF-8")));
|
StoredField binaryFldCompressed = new StoredField("binaryCompressed", CompressionTools.compress(binaryValCompressed.getBytes("UTF-8")));
|
||||||
IndexableField stringFldCompressed = new StoredField("stringCompressed", CompressionTools.compressString(binaryValCompressed));
|
StoredField stringFldCompressed = new StoredField("stringCompressed", CompressionTools.compressString(binaryValCompressed));
|
||||||
|
|
||||||
Document doc = new Document();
|
Document doc = new Document();
|
||||||
|
|
||||||
|
@ -90,7 +91,7 @@ public class TestBinaryDocument extends LuceneTestCase {
|
||||||
|
|
||||||
/** open a reader and fetch the document */
|
/** open a reader and fetch the document */
|
||||||
IndexReader reader = writer.getReader();
|
IndexReader reader = writer.getReader();
|
||||||
Document docFromReader = reader.document(0);
|
StoredDocument docFromReader = reader.document(0);
|
||||||
assertTrue(docFromReader != null);
|
assertTrue(docFromReader != null);
|
||||||
|
|
||||||
/** fetch the binary compressed field and compare it's content with the original one */
|
/** fetch the binary compressed field and compare it's content with the original one */
|
||||||
|
|
|
@ -18,6 +18,7 @@ package org.apache.lucene.document;
|
||||||
*/
|
*/
|
||||||
|
|
||||||
import java.io.StringReader;
|
import java.io.StringReader;
|
||||||
|
import java.util.List;
|
||||||
|
|
||||||
import org.apache.lucene.analysis.EmptyTokenizer;
|
import org.apache.lucene.analysis.EmptyTokenizer;
|
||||||
import org.apache.lucene.analysis.MockAnalyzer;
|
import org.apache.lucene.analysis.MockAnalyzer;
|
||||||
|
@ -27,6 +28,8 @@ import org.apache.lucene.index.IndexWriter;
|
||||||
import org.apache.lucene.index.IndexWriterConfig;
|
import org.apache.lucene.index.IndexWriterConfig;
|
||||||
import org.apache.lucene.index.IndexableField;
|
import org.apache.lucene.index.IndexableField;
|
||||||
import org.apache.lucene.index.RandomIndexWriter;
|
import org.apache.lucene.index.RandomIndexWriter;
|
||||||
|
import org.apache.lucene.index.StorableField;
|
||||||
|
import org.apache.lucene.index.StoredDocument;
|
||||||
import org.apache.lucene.index.Term;
|
import org.apache.lucene.index.Term;
|
||||||
import org.apache.lucene.search.IndexSearcher;
|
import org.apache.lucene.search.IndexSearcher;
|
||||||
import org.apache.lucene.search.Query;
|
import org.apache.lucene.search.Query;
|
||||||
|
@ -50,9 +53,9 @@ public class TestDocument extends LuceneTestCase {
|
||||||
|
|
||||||
FieldType ft = new FieldType();
|
FieldType ft = new FieldType();
|
||||||
ft.setStored(true);
|
ft.setStored(true);
|
||||||
IndexableField stringFld = new Field("string", binaryVal, ft);
|
Field stringFld = new Field("string", binaryVal, ft);
|
||||||
IndexableField binaryFld = new StoredField("binary", binaryVal.getBytes("UTF-8"));
|
StoredField binaryFld = new StoredField("binary", binaryVal.getBytes("UTF-8"));
|
||||||
IndexableField binaryFld2 = new StoredField("binary", binaryVal2.getBytes("UTF-8"));
|
StoredField binaryFld2 = new StoredField("binary", binaryVal2.getBytes("UTF-8"));
|
||||||
|
|
||||||
doc.add(stringFld);
|
doc.add(stringFld);
|
||||||
doc.add(binaryFld);
|
doc.add(binaryFld);
|
||||||
|
@ -124,7 +127,7 @@ public class TestDocument extends LuceneTestCase {
|
||||||
// siltenlty ignored
|
// siltenlty ignored
|
||||||
assertEquals(0, doc.getFields().size());
|
assertEquals(0, doc.getFields().size());
|
||||||
}
|
}
|
||||||
|
|
||||||
public void testConstructorExceptions() {
|
public void testConstructorExceptions() {
|
||||||
FieldType ft = new FieldType();
|
FieldType ft = new FieldType();
|
||||||
ft.setStored(true);
|
ft.setStored(true);
|
||||||
|
@ -147,6 +150,34 @@ public class TestDocument extends LuceneTestCase {
|
||||||
// expected exception
|
// expected exception
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public void testClearDocument() {
|
||||||
|
Document doc = makeDocumentWithFields();
|
||||||
|
assertEquals(8, doc.getFields().size());
|
||||||
|
doc.clear();
|
||||||
|
assertEquals(0, doc.getFields().size());
|
||||||
|
}
|
||||||
|
|
||||||
|
public void testGetFieldsImmutable() {
|
||||||
|
Document doc = makeDocumentWithFields();
|
||||||
|
assertEquals(8, doc.getFields().size());
|
||||||
|
List<Field> fields = doc.getFields();
|
||||||
|
try {
|
||||||
|
fields.add( new StringField("name", "value", Field.Store.NO) );
|
||||||
|
fail("Document.getFields() should return immutable List");
|
||||||
|
}
|
||||||
|
catch (UnsupportedOperationException e) {
|
||||||
|
// OK
|
||||||
|
}
|
||||||
|
|
||||||
|
try {
|
||||||
|
fields.clear();
|
||||||
|
fail("Document.getFields() should return immutable List");
|
||||||
|
}
|
||||||
|
catch (UnsupportedOperationException e) {
|
||||||
|
// OK
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Tests {@link Document#getValues(String)} method for a brand new Document
|
* Tests {@link Document#getValues(String)} method for a brand new Document
|
||||||
|
@ -179,7 +210,7 @@ public class TestDocument extends LuceneTestCase {
|
||||||
ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs;
|
ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs;
|
||||||
assertEquals(1, hits.length);
|
assertEquals(1, hits.length);
|
||||||
|
|
||||||
doAssert(searcher.doc(hits[0].doc), true);
|
doAssert(searcher.doc(hits[0].doc));
|
||||||
writer.close();
|
writer.close();
|
||||||
reader.close();
|
reader.close();
|
||||||
dir.close();
|
dir.close();
|
||||||
|
@ -214,11 +245,14 @@ public class TestDocument extends LuceneTestCase {
|
||||||
return doc;
|
return doc;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private void doAssert(StoredDocument doc) {
|
||||||
|
doAssert(new Document(doc), true);
|
||||||
|
}
|
||||||
private void doAssert(Document doc, boolean fromIndex) {
|
private void doAssert(Document doc, boolean fromIndex) {
|
||||||
IndexableField[] keywordFieldValues = doc.getFields("keyword");
|
StorableField[] keywordFieldValues = doc.getFields("keyword");
|
||||||
IndexableField[] textFieldValues = doc.getFields("text");
|
StorableField[] textFieldValues = doc.getFields("text");
|
||||||
IndexableField[] unindexedFieldValues = doc.getFields("unindexed");
|
StorableField[] unindexedFieldValues = doc.getFields("unindexed");
|
||||||
IndexableField[] unstoredFieldValues = doc.getFields("unstored");
|
StorableField[] unstoredFieldValues = doc.getFields("unstored");
|
||||||
|
|
||||||
assertTrue(keywordFieldValues.length == 2);
|
assertTrue(keywordFieldValues.length == 2);
|
||||||
assertTrue(textFieldValues.length == 2);
|
assertTrue(textFieldValues.length == 2);
|
||||||
|
@ -268,7 +302,7 @@ public class TestDocument extends LuceneTestCase {
|
||||||
assertEquals(3, hits.length);
|
assertEquals(3, hits.length);
|
||||||
int result = 0;
|
int result = 0;
|
||||||
for (int i = 0; i < 3; i++) {
|
for (int i = 0; i < 3; i++) {
|
||||||
Document doc2 = searcher.doc(hits[i].doc);
|
StoredDocument doc2 = searcher.doc(hits[i].doc);
|
||||||
Field f = (Field) doc2.getField("id");
|
Field f = (Field) doc2.getField("id");
|
||||||
if (f.stringValue().equals("id1")) result |= 1;
|
if (f.stringValue().equals("id1")) result |= 1;
|
||||||
else if (f.stringValue().equals("id2")) result |= 2;
|
else if (f.stringValue().equals("id2")) result |= 2;
|
||||||
|
|
|
@ -1247,7 +1247,7 @@ public class TestAddIndexes extends LuceneTestCase {
|
||||||
w.close();
|
w.close();
|
||||||
assertEquals(2, r3.numDocs());
|
assertEquals(2, r3.numDocs());
|
||||||
for(int docID=0;docID<2;docID++) {
|
for(int docID=0;docID<2;docID++) {
|
||||||
Document d = r3.document(docID);
|
StoredDocument d = r3.document(docID);
|
||||||
if (d.get("id").equals("1")) {
|
if (d.get("id").equals("1")) {
|
||||||
assertEquals("doc1 field1", d.get("f1"));
|
assertEquals("doc1 field1", d.get("f1"));
|
||||||
} else {
|
} else {
|
||||||
|
|
|
@ -329,13 +329,13 @@ public class TestBackwardsCompatibility extends LuceneTestCase {
|
||||||
|
|
||||||
for(int i=0;i<35;i++) {
|
for(int i=0;i<35;i++) {
|
||||||
if (liveDocs.get(i)) {
|
if (liveDocs.get(i)) {
|
||||||
Document d = reader.document(i);
|
StoredDocument d = reader.document(i);
|
||||||
List<IndexableField> fields = d.getFields();
|
List<StorableField> fields = d.getFields();
|
||||||
boolean isProxDoc = d.getField("content3") == null;
|
boolean isProxDoc = d.getField("content3") == null;
|
||||||
if (isProxDoc) {
|
if (isProxDoc) {
|
||||||
final int numFields = is40Index ? 7 : 5;
|
final int numFields = is40Index ? 7 : 5;
|
||||||
assertEquals(numFields, fields.size());
|
assertEquals(numFields, fields.size());
|
||||||
IndexableField f = d.getField("id");
|
StorableField f = d.getField("id");
|
||||||
assertEquals(""+i, f.stringValue());
|
assertEquals(""+i, f.stringValue());
|
||||||
|
|
||||||
f = d.getField("utf8");
|
f = d.getField("utf8");
|
||||||
|
@ -406,7 +406,7 @@ public class TestBackwardsCompatibility extends LuceneTestCase {
|
||||||
ScoreDoc[] hits = searcher.search(new TermQuery(new Term(new String("content"), "aaa")), null, 1000).scoreDocs;
|
ScoreDoc[] hits = searcher.search(new TermQuery(new Term(new String("content"), "aaa")), null, 1000).scoreDocs;
|
||||||
|
|
||||||
// First document should be #0
|
// First document should be #0
|
||||||
Document d = searcher.getIndexReader().document(hits[0].doc);
|
StoredDocument d = searcher.getIndexReader().document(hits[0].doc);
|
||||||
assertEquals("didn't get the right document first", "0", d.get("id"));
|
assertEquals("didn't get the right document first", "0", d.get("id"));
|
||||||
|
|
||||||
doTestHits(hits, 34, searcher.getIndexReader());
|
doTestHits(hits, 34, searcher.getIndexReader());
|
||||||
|
@ -459,7 +459,7 @@ public class TestBackwardsCompatibility extends LuceneTestCase {
|
||||||
IndexReader reader = DirectoryReader.open(dir);
|
IndexReader reader = DirectoryReader.open(dir);
|
||||||
IndexSearcher searcher = new IndexSearcher(reader);
|
IndexSearcher searcher = new IndexSearcher(reader);
|
||||||
ScoreDoc[] hits = searcher.search(new TermQuery(new Term("content", "aaa")), null, 1000).scoreDocs;
|
ScoreDoc[] hits = searcher.search(new TermQuery(new Term("content", "aaa")), null, 1000).scoreDocs;
|
||||||
Document d = searcher.getIndexReader().document(hits[0].doc);
|
StoredDocument d = searcher.getIndexReader().document(hits[0].doc);
|
||||||
assertEquals("wrong first document", "0", d.get("id"));
|
assertEquals("wrong first document", "0", d.get("id"));
|
||||||
doTestHits(hits, 44, searcher.getIndexReader());
|
doTestHits(hits, 44, searcher.getIndexReader());
|
||||||
reader.close();
|
reader.close();
|
||||||
|
@ -485,7 +485,7 @@ public class TestBackwardsCompatibility extends LuceneTestCase {
|
||||||
IndexSearcher searcher = new IndexSearcher(reader);
|
IndexSearcher searcher = new IndexSearcher(reader);
|
||||||
ScoreDoc[] hits = searcher.search(new TermQuery(new Term("content", "aaa")), null, 1000).scoreDocs;
|
ScoreDoc[] hits = searcher.search(new TermQuery(new Term("content", "aaa")), null, 1000).scoreDocs;
|
||||||
assertEquals("wrong number of hits", 34, hits.length);
|
assertEquals("wrong number of hits", 34, hits.length);
|
||||||
Document d = searcher.doc(hits[0].doc);
|
StoredDocument d = searcher.doc(hits[0].doc);
|
||||||
assertEquals("wrong first document", "0", d.get("id"));
|
assertEquals("wrong first document", "0", d.get("id"));
|
||||||
reader.close();
|
reader.close();
|
||||||
|
|
||||||
|
@ -757,7 +757,7 @@ public class TestBackwardsCompatibility extends LuceneTestCase {
|
||||||
for (int id=10; id<15; id++) {
|
for (int id=10; id<15; id++) {
|
||||||
ScoreDoc[] hits = searcher.search(NumericRangeQuery.newIntRange("trieInt", 4, Integer.valueOf(id), Integer.valueOf(id), true, true), 100).scoreDocs;
|
ScoreDoc[] hits = searcher.search(NumericRangeQuery.newIntRange("trieInt", 4, Integer.valueOf(id), Integer.valueOf(id), true, true), 100).scoreDocs;
|
||||||
assertEquals("wrong number of hits", 1, hits.length);
|
assertEquals("wrong number of hits", 1, hits.length);
|
||||||
Document d = searcher.doc(hits[0].doc);
|
StoredDocument d = searcher.doc(hits[0].doc);
|
||||||
assertEquals(String.valueOf(id), d.get("id"));
|
assertEquals(String.valueOf(id), d.get("id"));
|
||||||
|
|
||||||
hits = searcher.search(NumericRangeQuery.newLongRange("trieLong", 4, Long.valueOf(id), Long.valueOf(id), true, true), 100).scoreDocs;
|
hits = searcher.search(NumericRangeQuery.newLongRange("trieLong", 4, Long.valueOf(id), Long.valueOf(id), true, true), 100).scoreDocs;
|
||||||
|
|
|
@ -74,7 +74,7 @@ public class TestCustomNorms extends LuceneTestCase {
|
||||||
assertEquals(Type.FLOAT_32, normValues.getType());
|
assertEquals(Type.FLOAT_32, normValues.getType());
|
||||||
float[] norms = (float[]) source.getArray();
|
float[] norms = (float[]) source.getArray();
|
||||||
for (int i = 0; i < open.maxDoc(); i++) {
|
for (int i = 0; i < open.maxDoc(); i++) {
|
||||||
Document document = open.document(i);
|
StoredDocument document = open.document(i);
|
||||||
float expected = Float.parseFloat(document.get(floatTestField));
|
float expected = Float.parseFloat(document.get(floatTestField));
|
||||||
assertEquals(expected, norms[i], 0.0f);
|
assertEquals(expected, norms[i], 0.0f);
|
||||||
}
|
}
|
||||||
|
|
|
@ -61,10 +61,10 @@ public class TestDirectoryReader extends LuceneTestCase {
|
||||||
assertTrue(reader != null);
|
assertTrue(reader != null);
|
||||||
assertTrue(reader instanceof StandardDirectoryReader);
|
assertTrue(reader instanceof StandardDirectoryReader);
|
||||||
|
|
||||||
Document newDoc1 = reader.document(0);
|
StoredDocument newDoc1 = reader.document(0);
|
||||||
assertTrue(newDoc1 != null);
|
assertTrue(newDoc1 != null);
|
||||||
assertTrue(DocHelper.numFields(newDoc1) == DocHelper.numFields(doc1) - DocHelper.unstored.size());
|
assertTrue(DocHelper.numFields(newDoc1) == DocHelper.numFields(doc1) - DocHelper.unstored.size());
|
||||||
Document newDoc2 = reader.document(1);
|
StoredDocument newDoc2 = reader.document(1);
|
||||||
assertTrue(newDoc2 != null);
|
assertTrue(newDoc2 != null);
|
||||||
assertTrue(DocHelper.numFields(newDoc2) == DocHelper.numFields(doc2) - DocHelper.unstored.size());
|
assertTrue(DocHelper.numFields(newDoc2) == DocHelper.numFields(doc2) - DocHelper.unstored.size());
|
||||||
Terms vector = reader.getTermVectors(0).terms(DocHelper.TEXT_FIELD_2_KEY);
|
Terms vector = reader.getTermVectors(0).terms(DocHelper.TEXT_FIELD_2_KEY);
|
||||||
|
@ -386,11 +386,11 @@ void assertTermDocsCount(String msg,
|
||||||
writer.addDocument(doc);
|
writer.addDocument(doc);
|
||||||
writer.close();
|
writer.close();
|
||||||
DirectoryReader reader = DirectoryReader.open(dir);
|
DirectoryReader reader = DirectoryReader.open(dir);
|
||||||
Document doc2 = reader.document(reader.maxDoc() - 1);
|
StoredDocument doc2 = reader.document(reader.maxDoc() - 1);
|
||||||
IndexableField[] fields = doc2.getFields("bin1");
|
StorableField[] fields = doc2.getFields("bin1");
|
||||||
assertNotNull(fields);
|
assertNotNull(fields);
|
||||||
assertEquals(1, fields.length);
|
assertEquals(1, fields.length);
|
||||||
IndexableField b1 = fields[0];
|
StorableField b1 = fields[0];
|
||||||
assertTrue(b1.binaryValue() != null);
|
assertTrue(b1.binaryValue() != null);
|
||||||
BytesRef bytesRef = b1.binaryValue();
|
BytesRef bytesRef = b1.binaryValue();
|
||||||
assertEquals(bin.length, bytesRef.length);
|
assertEquals(bin.length, bytesRef.length);
|
||||||
|
@ -595,13 +595,13 @@ public void testFilesOpenClose() throws IOException {
|
||||||
// check stored fields
|
// check stored fields
|
||||||
for (int i = 0; i < index1.maxDoc(); i++) {
|
for (int i = 0; i < index1.maxDoc(); i++) {
|
||||||
if (liveDocs1 == null || liveDocs1.get(i)) {
|
if (liveDocs1 == null || liveDocs1.get(i)) {
|
||||||
Document doc1 = index1.document(i);
|
StoredDocument doc1 = index1.document(i);
|
||||||
Document doc2 = index2.document(i);
|
StoredDocument doc2 = index2.document(i);
|
||||||
List<IndexableField> field1 = doc1.getFields();
|
List<StorableField> field1 = doc1.getFields();
|
||||||
List<IndexableField> field2 = doc2.getFields();
|
List<StorableField> field2 = doc2.getFields();
|
||||||
assertEquals("Different numbers of fields for doc " + i + ".", field1.size(), field2.size());
|
assertEquals("Different numbers of fields for doc " + i + ".", field1.size(), field2.size());
|
||||||
Iterator<IndexableField> itField1 = field1.iterator();
|
Iterator<StorableField> itField1 = field1.iterator();
|
||||||
Iterator<IndexableField> itField2 = field2.iterator();
|
Iterator<StorableField> itField2 = field2.iterator();
|
||||||
while (itField1.hasNext()) {
|
while (itField1.hasNext()) {
|
||||||
Field curField1 = (Field) itField1.next();
|
Field curField1 = (Field) itField1.next();
|
||||||
Field curField2 = (Field) itField2.next();
|
Field curField2 = (Field) itField2.next();
|
||||||
|
@ -1082,7 +1082,7 @@ public void testFilesOpenClose() throws IOException {
|
||||||
Set<String> fieldsToLoad = new HashSet<String>();
|
Set<String> fieldsToLoad = new HashSet<String>();
|
||||||
assertEquals(0, r.document(0, fieldsToLoad).getFields().size());
|
assertEquals(0, r.document(0, fieldsToLoad).getFields().size());
|
||||||
fieldsToLoad.add("field1");
|
fieldsToLoad.add("field1");
|
||||||
Document doc2 = r.document(0, fieldsToLoad);
|
StoredDocument doc2 = r.document(0, fieldsToLoad);
|
||||||
assertEquals(1, doc2.getFields().size());
|
assertEquals(1, doc2.getFields().size());
|
||||||
assertEquals("foobar", doc2.get("field1"));
|
assertEquals("foobar", doc2.get("field1"));
|
||||||
r.close();
|
r.close();
|
||||||
|
|
|
@ -122,7 +122,7 @@ public class TestDirectoryReaderReopen extends LuceneTestCase {
|
||||||
if (i>0) {
|
if (i>0) {
|
||||||
int k = i-1;
|
int k = i-1;
|
||||||
int n = j + k*M;
|
int n = j + k*M;
|
||||||
Document prevItereationDoc = reader.document(n);
|
StoredDocument prevItereationDoc = reader.document(n);
|
||||||
assertNotNull(prevItereationDoc);
|
assertNotNull(prevItereationDoc);
|
||||||
String id = prevItereationDoc.get("id");
|
String id = prevItereationDoc.get("id");
|
||||||
assertEquals(k+"_"+j, id);
|
assertEquals(k+"_"+j, id);
|
||||||
|
|
|
@ -122,7 +122,7 @@ public class TestDocTermOrds extends LuceneTestCase {
|
||||||
for(int id=0;id<NUM_DOCS;id++) {
|
for(int id=0;id<NUM_DOCS;id++) {
|
||||||
Document doc = new Document();
|
Document doc = new Document();
|
||||||
|
|
||||||
doc.add(new IntField("id", id, Field.Store.NO));
|
doc.add(new IntField("id", id, Field.Store.YES));
|
||||||
|
|
||||||
final int termCount = _TestUtil.nextInt(random(), 0, 20*RANDOM_MULTIPLIER);
|
final int termCount = _TestUtil.nextInt(random(), 0, 20*RANDOM_MULTIPLIER);
|
||||||
while(ordsForDocSet.size() < termCount) {
|
while(ordsForDocSet.size() < termCount) {
|
||||||
|
@ -219,7 +219,7 @@ public class TestDocTermOrds extends LuceneTestCase {
|
||||||
for(int id=0;id<NUM_DOCS;id++) {
|
for(int id=0;id<NUM_DOCS;id++) {
|
||||||
Document doc = new Document();
|
Document doc = new Document();
|
||||||
|
|
||||||
doc.add(new IntField("id", id, Field.Store.NO));
|
doc.add(new IntField("id", id, Field.Store.YES));
|
||||||
|
|
||||||
final int termCount = _TestUtil.nextInt(random(), 0, 20*RANDOM_MULTIPLIER);
|
final int termCount = _TestUtil.nextInt(random(), 0, 20*RANDOM_MULTIPLIER);
|
||||||
while(ordsForDocSet.size() < termCount) {
|
while(ordsForDocSet.size() < termCount) {
|
||||||
|
|
|
@ -24,6 +24,7 @@ import org.apache.lucene.document.ByteDocValuesField;
|
||||||
import org.apache.lucene.document.DerefBytesDocValuesField;
|
import org.apache.lucene.document.DerefBytesDocValuesField;
|
||||||
import org.apache.lucene.document.Document;
|
import org.apache.lucene.document.Document;
|
||||||
import org.apache.lucene.document.DoubleDocValuesField;
|
import org.apache.lucene.document.DoubleDocValuesField;
|
||||||
|
import org.apache.lucene.document.Field;
|
||||||
import org.apache.lucene.document.FloatDocValuesField;
|
import org.apache.lucene.document.FloatDocValuesField;
|
||||||
import org.apache.lucene.document.IntDocValuesField;
|
import org.apache.lucene.document.IntDocValuesField;
|
||||||
import org.apache.lucene.document.LongDocValuesField;
|
import org.apache.lucene.document.LongDocValuesField;
|
||||||
|
@ -67,7 +68,7 @@ public class TestDocValuesTypeCompatibility extends LuceneTestCase {
|
||||||
}
|
}
|
||||||
|
|
||||||
@SuppressWarnings("fallthrough")
|
@SuppressWarnings("fallthrough")
|
||||||
public IndexableField getRandomIntsField(Type maxType, boolean force) {
|
public Field getRandomIntsField(Type maxType, boolean force) {
|
||||||
switch (maxType) {
|
switch (maxType) {
|
||||||
|
|
||||||
case VAR_INTS:
|
case VAR_INTS:
|
||||||
|
@ -120,7 +121,7 @@ public class TestDocValuesTypeCompatibility extends LuceneTestCase {
|
||||||
|
|
||||||
}
|
}
|
||||||
@SuppressWarnings("fallthrough")
|
@SuppressWarnings("fallthrough")
|
||||||
public IndexableField getRandomFloatField(Type maxType, boolean force) {
|
public Field getRandomFloatField(Type maxType, boolean force) {
|
||||||
switch (maxType) {
|
switch (maxType) {
|
||||||
|
|
||||||
case FLOAT_64:
|
case FLOAT_64:
|
||||||
|
@ -161,7 +162,7 @@ public class TestDocValuesTypeCompatibility extends LuceneTestCase {
|
||||||
iwc.setRAMBufferSizeMB(IndexWriterConfig.DISABLE_AUTO_FLUSH);
|
iwc.setRAMBufferSizeMB(IndexWriterConfig.DISABLE_AUTO_FLUSH);
|
||||||
iwc.setRAMPerThreadHardLimitMB(2000);
|
iwc.setRAMPerThreadHardLimitMB(2000);
|
||||||
IndexWriter writer = new IndexWriter(dir, iwc);
|
IndexWriter writer = new IndexWriter(dir, iwc);
|
||||||
IndexableField[] fields = new IndexableField[] {
|
Field[] fields = new Field[] {
|
||||||
new DoubleDocValuesField("f", 1.0), new IntDocValuesField("f", 1),
|
new DoubleDocValuesField("f", 1.0), new IntDocValuesField("f", 1),
|
||||||
new ShortDocValuesField("f", (short) 1),
|
new ShortDocValuesField("f", (short) 1),
|
||||||
new ByteDocValuesField("f", (byte) 1)};
|
new ByteDocValuesField("f", (byte) 1)};
|
||||||
|
@ -194,7 +195,7 @@ public class TestDocValuesTypeCompatibility extends LuceneTestCase {
|
||||||
IndexWriter writer = new IndexWriter(dir, iwc);
|
IndexWriter writer = new IndexWriter(dir, iwc);
|
||||||
boolean mustBeFixed = random().nextBoolean();
|
boolean mustBeFixed = random().nextBoolean();
|
||||||
int maxSize = 2 + random().nextInt(15);
|
int maxSize = 2 + random().nextInt(15);
|
||||||
IndexableField bytesField = getRandomBytesField(mustBeFixed, maxSize,
|
Field bytesField = getRandomBytesField(mustBeFixed, maxSize,
|
||||||
true);
|
true);
|
||||||
addDoc(writer, bytesField);
|
addDoc(writer, bytesField);
|
||||||
for (int j = 0; j < numDocs; j++) {
|
for (int j = 0; j < numDocs; j++) {
|
||||||
|
@ -207,7 +208,7 @@ public class TestDocValuesTypeCompatibility extends LuceneTestCase {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
public IndexableField getRandomBytesField(boolean mustBeFixed, int maxSize,
|
public Field getRandomBytesField(boolean mustBeFixed, int maxSize,
|
||||||
boolean mustBeVariableIfNotFixed) {
|
boolean mustBeVariableIfNotFixed) {
|
||||||
int size = mustBeFixed ? maxSize : random().nextInt(maxSize) + 1;
|
int size = mustBeFixed ? maxSize : random().nextInt(maxSize) + 1;
|
||||||
StringBuilder s = new StringBuilder();
|
StringBuilder s = new StringBuilder();
|
||||||
|
@ -256,16 +257,16 @@ public class TestDocValuesTypeCompatibility extends LuceneTestCase {
|
||||||
dir.close();
|
dir.close();
|
||||||
}
|
}
|
||||||
|
|
||||||
private void addDoc(IndexWriter writer, IndexableField... fields)
|
private void addDoc(IndexWriter writer, Field... fields)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
Document doc = new Document();
|
Document doc = new Document();
|
||||||
for (IndexableField indexableField : fields) {
|
for (Field indexableField : fields) {
|
||||||
doc.add(indexableField);
|
doc.add(indexableField);
|
||||||
}
|
}
|
||||||
writer.addDocument(doc);
|
writer.addDocument(doc);
|
||||||
}
|
}
|
||||||
|
|
||||||
public IndexableField getRandomIndexableDVField() {
|
public Field getRandomIndexableDVField() {
|
||||||
int size = random().nextInt(100) + 1;
|
int size = random().nextInt(100) + 1;
|
||||||
StringBuilder s = new StringBuilder();
|
StringBuilder s = new StringBuilder();
|
||||||
for (int i = 0; i < size; i++) {
|
for (int i = 0; i < size; i++) {
|
||||||
|
|
|
@ -67,11 +67,11 @@ public class TestDocumentWriter extends LuceneTestCase {
|
||||||
//After adding the document, we should be able to read it back in
|
//After adding the document, we should be able to read it back in
|
||||||
SegmentReader reader = new SegmentReader(info, DirectoryReader.DEFAULT_TERMS_INDEX_DIVISOR, newIOContext(random()));
|
SegmentReader reader = new SegmentReader(info, DirectoryReader.DEFAULT_TERMS_INDEX_DIVISOR, newIOContext(random()));
|
||||||
assertTrue(reader != null);
|
assertTrue(reader != null);
|
||||||
Document doc = reader.document(0);
|
StoredDocument doc = reader.document(0);
|
||||||
assertTrue(doc != null);
|
assertTrue(doc != null);
|
||||||
|
|
||||||
//System.out.println("Document: " + doc);
|
//System.out.println("Document: " + doc);
|
||||||
IndexableField [] fields = doc.getFields("textField2");
|
StorableField[] fields = doc.getFields("textField2");
|
||||||
assertTrue(fields != null && fields.length == 1);
|
assertTrue(fields != null && fields.length == 1);
|
||||||
assertTrue(fields[0].stringValue().equals(DocHelper.FIELD_2_TEXT));
|
assertTrue(fields[0].stringValue().equals(DocHelper.FIELD_2_TEXT));
|
||||||
assertTrue(fields[0].fieldType().storeTermVectors());
|
assertTrue(fields[0].fieldType().storeTermVectors());
|
||||||
|
|
|
@ -539,25 +539,25 @@ public class TestDuelingCodecs extends LuceneTestCase {
|
||||||
public void assertStoredFields(IndexReader leftReader, IndexReader rightReader) throws Exception {
|
public void assertStoredFields(IndexReader leftReader, IndexReader rightReader) throws Exception {
|
||||||
assert leftReader.maxDoc() == rightReader.maxDoc();
|
assert leftReader.maxDoc() == rightReader.maxDoc();
|
||||||
for (int i = 0; i < leftReader.maxDoc(); i++) {
|
for (int i = 0; i < leftReader.maxDoc(); i++) {
|
||||||
Document leftDoc = leftReader.document(i);
|
StoredDocument leftDoc = leftReader.document(i);
|
||||||
Document rightDoc = rightReader.document(i);
|
StoredDocument rightDoc = rightReader.document(i);
|
||||||
|
|
||||||
// TODO: I think this is bogus because we don't document what the order should be
|
// TODO: I think this is bogus because we don't document what the order should be
|
||||||
// from these iterators, etc. I think the codec/IndexReader should be free to order this stuff
|
// from these iterators, etc. I think the codec/IndexReader should be free to order this stuff
|
||||||
// in whatever way it wants (e.g. maybe it packs related fields together or something)
|
// in whatever way it wants (e.g. maybe it packs related fields together or something)
|
||||||
// To fix this, we sort the fields in both documents by name, but
|
// To fix this, we sort the fields in both documents by name, but
|
||||||
// we still assume that all instances with same name are in order:
|
// we still assume that all instances with same name are in order:
|
||||||
Comparator<IndexableField> comp = new Comparator<IndexableField>() {
|
Comparator<StorableField> comp = new Comparator<StorableField>() {
|
||||||
@Override
|
@Override
|
||||||
public int compare(IndexableField arg0, IndexableField arg1) {
|
public int compare(StorableField arg0, StorableField arg1) {
|
||||||
return arg0.name().compareTo(arg1.name());
|
return arg0.name().compareTo(arg1.name());
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
Collections.sort(leftDoc.getFields(), comp);
|
Collections.sort(leftDoc.getFields(), comp);
|
||||||
Collections.sort(rightDoc.getFields(), comp);
|
Collections.sort(rightDoc.getFields(), comp);
|
||||||
|
|
||||||
Iterator<IndexableField> leftIterator = leftDoc.iterator();
|
Iterator<StorableField> leftIterator = leftDoc.iterator();
|
||||||
Iterator<IndexableField> rightIterator = rightDoc.iterator();
|
Iterator<StorableField> rightIterator = rightDoc.iterator();
|
||||||
while (leftIterator.hasNext()) {
|
while (leftIterator.hasNext()) {
|
||||||
assertTrue(info, rightIterator.hasNext());
|
assertTrue(info, rightIterator.hasNext());
|
||||||
assertStoredField(leftIterator.next(), rightIterator.next());
|
assertStoredField(leftIterator.next(), rightIterator.next());
|
||||||
|
@ -569,7 +569,7 @@ public class TestDuelingCodecs extends LuceneTestCase {
|
||||||
/**
|
/**
|
||||||
* checks that two stored fields are equivalent
|
* checks that two stored fields are equivalent
|
||||||
*/
|
*/
|
||||||
public void assertStoredField(IndexableField leftField, IndexableField rightField) {
|
public void assertStoredField(StorableField leftField, StorableField rightField) {
|
||||||
assertEquals(info, leftField.name(), rightField.name());
|
assertEquals(info, leftField.name(), rightField.name());
|
||||||
assertEquals(info, leftField.binaryValue(), rightField.binaryValue());
|
assertEquals(info, leftField.binaryValue(), rightField.binaryValue());
|
||||||
assertEquals(info, leftField.stringValue(), rightField.stringValue());
|
assertEquals(info, leftField.stringValue(), rightField.stringValue());
|
||||||
|
|
|
@ -47,7 +47,7 @@ public class TestFieldInfos extends LuceneTestCase {
|
||||||
//Positive test of FieldInfos
|
//Positive test of FieldInfos
|
||||||
assertTrue(testDoc != null);
|
assertTrue(testDoc != null);
|
||||||
FieldInfos.Builder builder = new FieldInfos.Builder();
|
FieldInfos.Builder builder = new FieldInfos.Builder();
|
||||||
for (IndexableField field : testDoc) {
|
for (IndexableField field : testDoc.getFields()) {
|
||||||
builder.addOrUpdate(field.name(), field.fieldType());
|
builder.addOrUpdate(field.name(), field.fieldType());
|
||||||
}
|
}
|
||||||
FieldInfos fieldInfos = builder.finish();
|
FieldInfos fieldInfos = builder.finish();
|
||||||
|
|
|
@ -55,7 +55,7 @@ public class TestFieldsReader extends LuceneTestCase {
|
||||||
public static void beforeClass() throws Exception {
|
public static void beforeClass() throws Exception {
|
||||||
fieldInfos = new FieldInfos.Builder();
|
fieldInfos = new FieldInfos.Builder();
|
||||||
DocHelper.setupDoc(testDoc);
|
DocHelper.setupDoc(testDoc);
|
||||||
for (IndexableField field : testDoc) {
|
for (IndexableField field : testDoc.getFields()) {
|
||||||
fieldInfos.addOrUpdate(field.name(), field.fieldType());
|
fieldInfos.addOrUpdate(field.name(), field.fieldType());
|
||||||
}
|
}
|
||||||
dir = newDirectory();
|
dir = newDirectory();
|
||||||
|
@ -79,7 +79,7 @@ public class TestFieldsReader extends LuceneTestCase {
|
||||||
assertTrue(dir != null);
|
assertTrue(dir != null);
|
||||||
assertTrue(fieldInfos != null);
|
assertTrue(fieldInfos != null);
|
||||||
IndexReader reader = DirectoryReader.open(dir);
|
IndexReader reader = DirectoryReader.open(dir);
|
||||||
Document doc = reader.document(0);
|
StoredDocument doc = reader.document(0);
|
||||||
assertTrue(doc != null);
|
assertTrue(doc != null);
|
||||||
assertTrue(doc.getField(DocHelper.TEXT_FIELD_1_KEY) != null);
|
assertTrue(doc.getField(DocHelper.TEXT_FIELD_1_KEY) != null);
|
||||||
|
|
||||||
|
@ -104,7 +104,7 @@ public class TestFieldsReader extends LuceneTestCase {
|
||||||
|
|
||||||
DocumentStoredFieldVisitor visitor = new DocumentStoredFieldVisitor(DocHelper.TEXT_FIELD_3_KEY);
|
DocumentStoredFieldVisitor visitor = new DocumentStoredFieldVisitor(DocHelper.TEXT_FIELD_3_KEY);
|
||||||
reader.document(0, visitor);
|
reader.document(0, visitor);
|
||||||
final List<IndexableField> fields = visitor.getDocument().getFields();
|
final List<StorableField> fields = visitor.getDocument().getFields();
|
||||||
assertEquals(1, fields.size());
|
assertEquals(1, fields.size());
|
||||||
assertEquals(DocHelper.TEXT_FIELD_3_KEY, fields.get(0).name());
|
assertEquals(DocHelper.TEXT_FIELD_3_KEY, fields.get(0).name());
|
||||||
reader.close();
|
reader.close();
|
||||||
|
@ -279,7 +279,7 @@ public class TestFieldsReader extends LuceneTestCase {
|
||||||
doc.add(sf);
|
doc.add(sf);
|
||||||
answers[id] = answer;
|
answers[id] = answer;
|
||||||
typeAnswers[id] = typeAnswer;
|
typeAnswers[id] = typeAnswer;
|
||||||
FieldType ft = new FieldType(IntField.TYPE_NOT_STORED);
|
FieldType ft = new FieldType(IntField.TYPE_STORED);
|
||||||
ft.setNumericPrecisionStep(Integer.MAX_VALUE);
|
ft.setNumericPrecisionStep(Integer.MAX_VALUE);
|
||||||
doc.add(new IntField("id", id, ft));
|
doc.add(new IntField("id", id, ft));
|
||||||
w.addDocument(doc);
|
w.addDocument(doc);
|
||||||
|
@ -293,7 +293,7 @@ public class TestFieldsReader extends LuceneTestCase {
|
||||||
final AtomicReader sub = ctx.reader();
|
final AtomicReader sub = ctx.reader();
|
||||||
final int[] ids = FieldCache.DEFAULT.getInts(sub, "id", false);
|
final int[] ids = FieldCache.DEFAULT.getInts(sub, "id", false);
|
||||||
for(int docID=0;docID<sub.numDocs();docID++) {
|
for(int docID=0;docID<sub.numDocs();docID++) {
|
||||||
final Document doc = sub.document(docID);
|
final StoredDocument doc = sub.document(docID);
|
||||||
final Field f = (Field) doc.getField("nf");
|
final Field f = (Field) doc.getField("nf");
|
||||||
assertTrue("got f=" + f, f instanceof StoredField);
|
assertTrue("got f=" + f, f instanceof StoredField);
|
||||||
assertEquals(answers[ids[docID]], f.numericValue());
|
assertEquals(answers[ids[docID]], f.numericValue());
|
||||||
|
|
|
@ -908,8 +908,8 @@ public class TestIndexWriter extends LuceneTestCase {
|
||||||
w.close();
|
w.close();
|
||||||
|
|
||||||
IndexReader ir = DirectoryReader.open(dir);
|
IndexReader ir = DirectoryReader.open(dir);
|
||||||
Document doc2 = ir.document(0);
|
StoredDocument doc2 = ir.document(0);
|
||||||
IndexableField f2 = doc2.getField("binary");
|
StorableField f2 = doc2.getField("binary");
|
||||||
b = f2.binaryValue().bytes;
|
b = f2.binaryValue().bytes;
|
||||||
assertTrue(b != null);
|
assertTrue(b != null);
|
||||||
assertEquals(17, b.length, 17);
|
assertEquals(17, b.length, 17);
|
||||||
|
@ -1165,8 +1165,8 @@ public class TestIndexWriter extends LuceneTestCase {
|
||||||
w.close();
|
w.close();
|
||||||
|
|
||||||
IndexReader ir = DirectoryReader.open(dir);
|
IndexReader ir = DirectoryReader.open(dir);
|
||||||
Document doc2 = ir.document(0);
|
StoredDocument doc2 = ir.document(0);
|
||||||
IndexableField f3 = doc2.getField("binary");
|
StorableField f3 = doc2.getField("binary");
|
||||||
b = f3.binaryValue().bytes;
|
b = f3.binaryValue().bytes;
|
||||||
assertTrue(b != null);
|
assertTrue(b != null);
|
||||||
assertEquals(17, b.length, 17);
|
assertEquals(17, b.length, 17);
|
||||||
|
@ -1207,8 +1207,8 @@ public class TestIndexWriter extends LuceneTestCase {
|
||||||
doc.add(newField("zzz", "1 2 3", customType));
|
doc.add(newField("zzz", "1 2 3", customType));
|
||||||
w.addDocument(doc);
|
w.addDocument(doc);
|
||||||
IndexReader r = w.getReader();
|
IndexReader r = w.getReader();
|
||||||
Document doc2 = r.document(0);
|
StoredDocument doc2 = r.document(0);
|
||||||
Iterator<IndexableField> it = doc2.getFields().iterator();
|
Iterator<StorableField> it = doc2.getFields().iterator();
|
||||||
assertTrue(it.hasNext());
|
assertTrue(it.hasNext());
|
||||||
Field f = (Field) it.next();
|
Field f = (Field) it.next();
|
||||||
assertEquals(f.name(), "zzz");
|
assertEquals(f.name(), "zzz");
|
||||||
|
|
|
@ -1559,50 +1559,46 @@ public class TestIndexWriterExceptions extends LuceneTestCase {
|
||||||
try {
|
try {
|
||||||
doc = new Document();
|
doc = new Document();
|
||||||
// try to boost with norms omitted
|
// try to boost with norms omitted
|
||||||
List<IndexableField> list = new ArrayList<IndexableField>();
|
IndexDocument docList = new IndexDocument() {
|
||||||
list.add(new IndexableField() {
|
|
||||||
|
List<IndexableField> list = new ArrayList<IndexableField>();
|
||||||
|
List<StorableField> storedList = new ArrayList<StorableField>();
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public String name() {
|
public Iterable<? extends IndexableField> indexableFields() {
|
||||||
return "foo";
|
if (list.size() == 0) {
|
||||||
|
list.add(new IndexableField() {
|
||||||
|
@Override
|
||||||
|
public String name() {
|
||||||
|
return "foo";
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public IndexableFieldType fieldType() {
|
||||||
|
return StringField.TYPE_NOT_STORED;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public float boost() {
|
||||||
|
return 5f;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public TokenStream tokenStream(Analyzer analyzer) throws IOException {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
return list;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public IndexableFieldType fieldType() {
|
public Iterable<? extends StorableField> storableFields() {
|
||||||
return StringField.TYPE_NOT_STORED;
|
return storedList;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
};
|
||||||
public float boost() {
|
iw.addDocument(docList);
|
||||||
return 5f;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public BytesRef binaryValue() {
|
|
||||||
return null;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public String stringValue() {
|
|
||||||
return "baz";
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public Reader readerValue() {
|
|
||||||
return null;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public Number numericValue() {
|
|
||||||
return null;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public TokenStream tokenStream(Analyzer analyzer) throws IOException {
|
|
||||||
return null;
|
|
||||||
}
|
|
||||||
});
|
|
||||||
iw.addDocument(list);
|
|
||||||
fail("didn't get any exception, boost silently discarded");
|
fail("didn't get any exception, boost silently discarded");
|
||||||
} catch (UnsupportedOperationException expected) {
|
} catch (UnsupportedOperationException expected) {
|
||||||
// expected
|
// expected
|
||||||
|
|
|
@ -84,7 +84,7 @@ public class TestIndexWriterMerging extends LuceneTestCase
|
||||||
int max = reader.maxDoc();
|
int max = reader.maxDoc();
|
||||||
for (int i = 0; i < max; i++)
|
for (int i = 0; i < max; i++)
|
||||||
{
|
{
|
||||||
Document temp = reader.document(i);
|
StoredDocument temp = reader.document(i);
|
||||||
//System.out.println("doc "+i+"="+temp.getField("count").stringValue());
|
//System.out.println("doc "+i+"="+temp.getField("count").stringValue());
|
||||||
//compare the index doc number to the value that it should be
|
//compare the index doc number to the value that it should be
|
||||||
if (!temp.getField("count").stringValue().equals((i + startAt) + ""))
|
if (!temp.getField("count").stringValue().equals((i + startAt) + ""))
|
||||||
|
|
|
@ -141,7 +141,7 @@ public class TestIndexWriterReader extends LuceneTestCase {
|
||||||
|
|
||||||
String id10 = r1.document(10).getField("id").stringValue();
|
String id10 = r1.document(10).getField("id").stringValue();
|
||||||
|
|
||||||
Document newDoc = r1.document(10);
|
Document newDoc = new Document(r1.document(10));
|
||||||
newDoc.removeField("id");
|
newDoc.removeField("id");
|
||||||
newDoc.add(newStringField("id", Integer.toString(8000), Field.Store.YES));
|
newDoc.add(newStringField("id", Integer.toString(8000), Field.Store.YES));
|
||||||
writer.updateDocument(new Term("id", id10), newDoc);
|
writer.updateDocument(new Term("id", id10), newDoc);
|
||||||
|
@ -271,9 +271,9 @@ public class TestIndexWriterReader extends LuceneTestCase {
|
||||||
assertEquals(100, index2df);
|
assertEquals(100, index2df);
|
||||||
|
|
||||||
// verify the docs are from different indexes
|
// verify the docs are from different indexes
|
||||||
Document doc5 = r1.document(5);
|
StoredDocument doc5 = r1.document(5);
|
||||||
assertEquals("index1", doc5.get("indexname"));
|
assertEquals("index1", doc5.get("indexname"));
|
||||||
Document doc150 = r1.document(150);
|
StoredDocument doc150 = r1.document(150);
|
||||||
assertEquals("index2", doc150.get("indexname"));
|
assertEquals("index2", doc150.get("indexname"));
|
||||||
r1.close();
|
r1.close();
|
||||||
writer.close();
|
writer.close();
|
||||||
|
|
|
@ -259,7 +259,7 @@ public class TestIndexWriterUnicode extends LuceneTestCase {
|
||||||
w.close();
|
w.close();
|
||||||
|
|
||||||
IndexReader ir = DirectoryReader.open(dir);
|
IndexReader ir = DirectoryReader.open(dir);
|
||||||
Document doc2 = ir.document(0);
|
StoredDocument doc2 = ir.document(0);
|
||||||
for(int i=0;i<count;i++) {
|
for(int i=0;i<count;i++) {
|
||||||
assertEquals("field " + i + " was not indexed correctly", 1, ir.docFreq(new Term("f"+i, utf8Data[2*i+1])));
|
assertEquals("field " + i + " was not indexed correctly", 1, ir.docFreq(new Term("f"+i, utf8Data[2*i+1])));
|
||||||
assertEquals("field " + i + " is incorrect", utf8Data[2*i+1], doc2.getField("f"+i).stringValue());
|
assertEquals("field " + i + " is incorrect", utf8Data[2*i+1], doc2.getField("f"+i).stringValue());
|
||||||
|
|
|
@ -24,8 +24,8 @@ import java.util.Iterator;
|
||||||
|
|
||||||
import org.apache.lucene.analysis.Analyzer;
|
import org.apache.lucene.analysis.Analyzer;
|
||||||
import org.apache.lucene.analysis.TokenStream;
|
import org.apache.lucene.analysis.TokenStream;
|
||||||
import org.apache.lucene.document.Document;
|
|
||||||
import org.apache.lucene.document.Field;
|
import org.apache.lucene.document.Field;
|
||||||
|
import org.apache.lucene.index.DocValues.Type;
|
||||||
import org.apache.lucene.search.BooleanClause;
|
import org.apache.lucene.search.BooleanClause;
|
||||||
import org.apache.lucene.search.BooleanQuery;
|
import org.apache.lucene.search.BooleanQuery;
|
||||||
import org.apache.lucene.search.DocIdSetIterator;
|
import org.apache.lucene.search.DocIdSetIterator;
|
||||||
|
@ -39,7 +39,7 @@ import org.apache.lucene.util._TestUtil;
|
||||||
|
|
||||||
public class TestIndexableField extends LuceneTestCase {
|
public class TestIndexableField extends LuceneTestCase {
|
||||||
|
|
||||||
private class MyField implements IndexableField {
|
private class MyField implements IndexableField, StorableField {
|
||||||
|
|
||||||
private final int counter;
|
private final int counter;
|
||||||
private final IndexableFieldType fieldType = new IndexableFieldType() {
|
private final IndexableFieldType fieldType = new IndexableFieldType() {
|
||||||
|
@ -89,7 +89,7 @@ public class TestIndexableField extends LuceneTestCase {
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public DocValues.Type docValueType() {
|
public Type docValueType() {
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
@ -183,35 +183,100 @@ public class TestIndexableField extends LuceneTestCase {
|
||||||
final int finalBaseCount = baseCount;
|
final int finalBaseCount = baseCount;
|
||||||
baseCount += fieldCount-1;
|
baseCount += fieldCount-1;
|
||||||
|
|
||||||
w.addDocument(new Iterable<IndexableField>() {
|
IndexDocument d = new IndexDocument() {
|
||||||
@Override
|
@Override
|
||||||
public Iterator<IndexableField> iterator() {
|
public Iterable<? extends IndexableField> indexableFields() {
|
||||||
return new Iterator<IndexableField>() {
|
return new Iterable<IndexableField>() {
|
||||||
int fieldUpto;
|
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public boolean hasNext() {
|
public Iterator<IndexableField> iterator() {
|
||||||
return fieldUpto < fieldCount;
|
return new Iterator<IndexableField>() {
|
||||||
}
|
int fieldUpto = 0;
|
||||||
|
private IndexableField next;
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public IndexableField next() {
|
public boolean hasNext() {
|
||||||
assert fieldUpto < fieldCount;
|
if (fieldUpto >= fieldCount) return false;
|
||||||
if (fieldUpto == 0) {
|
|
||||||
fieldUpto = 1;
|
|
||||||
return newStringField("id", ""+finalDocCount, Field.Store.YES);
|
|
||||||
} else {
|
|
||||||
return new MyField(finalBaseCount + (fieldUpto++-1));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
next = null;
|
||||||
public void remove() {
|
if (fieldUpto == 0) {
|
||||||
throw new UnsupportedOperationException();
|
fieldUpto = 1;
|
||||||
|
next = newStringField("id", ""+finalDocCount, Field.Store.YES);
|
||||||
|
} else {
|
||||||
|
next = new MyField(finalBaseCount + (fieldUpto++-1));
|
||||||
|
}
|
||||||
|
|
||||||
|
if (next != null && next.fieldType().indexed()) return true;
|
||||||
|
else return this.hasNext();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public IndexableField next() {
|
||||||
|
assert fieldUpto <= fieldCount;
|
||||||
|
if (next == null && !hasNext()) {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
return next;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void remove() {
|
||||||
|
throw new UnsupportedOperationException();
|
||||||
|
}
|
||||||
|
};
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
});
|
|
||||||
|
@Override
|
||||||
|
public Iterable<? extends StorableField> storableFields() {
|
||||||
|
return new Iterable<StorableField>() {
|
||||||
|
@Override
|
||||||
|
public Iterator<StorableField> iterator() {
|
||||||
|
return new Iterator<StorableField>() {
|
||||||
|
int fieldUpto = 0;
|
||||||
|
private StorableField next = null;
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean hasNext() {
|
||||||
|
|
||||||
|
if (fieldUpto == fieldCount) return false;
|
||||||
|
|
||||||
|
next = null;
|
||||||
|
if (fieldUpto == 0) {
|
||||||
|
fieldUpto = 1;
|
||||||
|
next = newStringField("id", ""+finalDocCount, Field.Store.YES);
|
||||||
|
} else {
|
||||||
|
next = new MyField(finalBaseCount + (fieldUpto++-1));
|
||||||
|
}
|
||||||
|
|
||||||
|
if (next != null && next.fieldType().stored()) return true;
|
||||||
|
else return this.hasNext();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public StorableField next() {
|
||||||
|
assert fieldUpto <= fieldCount;
|
||||||
|
if (next == null && !hasNext()) {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
return next;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void remove() {
|
||||||
|
throw new UnsupportedOperationException();
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
w.addDocument(d);
|
||||||
}
|
}
|
||||||
|
|
||||||
final IndexReader r = w.getReader();
|
final IndexReader r = w.getReader();
|
||||||
|
@ -223,10 +288,11 @@ public class TestIndexableField extends LuceneTestCase {
|
||||||
if (VERBOSE) {
|
if (VERBOSE) {
|
||||||
System.out.println("TEST: verify doc id=" + id + " (" + fieldsPerDoc[id] + " fields) counter=" + counter);
|
System.out.println("TEST: verify doc id=" + id + " (" + fieldsPerDoc[id] + " fields) counter=" + counter);
|
||||||
}
|
}
|
||||||
|
|
||||||
final TopDocs hits = s.search(new TermQuery(new Term("id", ""+id)), 1);
|
final TopDocs hits = s.search(new TermQuery(new Term("id", ""+id)), 1);
|
||||||
assertEquals(1, hits.totalHits);
|
assertEquals(1, hits.totalHits);
|
||||||
final int docID = hits.scoreDocs[0].doc;
|
final int docID = hits.scoreDocs[0].doc;
|
||||||
final Document doc = s.doc(docID);
|
final StoredDocument doc = s.doc(docID);
|
||||||
final int endCounter = counter + fieldsPerDoc[id];
|
final int endCounter = counter + fieldsPerDoc[id];
|
||||||
while(counter < endCounter) {
|
while(counter < endCounter) {
|
||||||
final String name = "f" + counter;
|
final String name = "f" + counter;
|
||||||
|
@ -245,7 +311,7 @@ public class TestIndexableField extends LuceneTestCase {
|
||||||
|
|
||||||
// stored:
|
// stored:
|
||||||
if (stored) {
|
if (stored) {
|
||||||
IndexableField f = doc.getField(name);
|
StorableField f = doc.getField(name);
|
||||||
assertNotNull("doc " + id + " doesn't have field f" + counter, f);
|
assertNotNull("doc " + id + " doesn't have field f" + counter, f);
|
||||||
if (binary) {
|
if (binary) {
|
||||||
assertNotNull("doc " + id + " doesn't have field f" + counter, f);
|
assertNotNull("doc " + id + " doesn't have field f" + counter, f);
|
||||||
|
|
|
@ -105,7 +105,7 @@ public class TestNorms extends LuceneTestCase {
|
||||||
assertEquals(Type.FIXED_INTS_8, normValues.getType());
|
assertEquals(Type.FIXED_INTS_8, normValues.getType());
|
||||||
byte[] norms = (byte[]) source.getArray();
|
byte[] norms = (byte[]) source.getArray();
|
||||||
for (int i = 0; i < open.maxDoc(); i++) {
|
for (int i = 0; i < open.maxDoc(); i++) {
|
||||||
Document document = open.document(i);
|
StoredDocument document = open.document(i);
|
||||||
int expected = Integer.parseInt(document.get(byteTestField));
|
int expected = Integer.parseInt(document.get(byteTestField));
|
||||||
assertEquals((byte)expected, norms[i]);
|
assertEquals((byte)expected, norms[i]);
|
||||||
}
|
}
|
||||||
|
@ -164,7 +164,7 @@ public class TestNorms extends LuceneTestCase {
|
||||||
assertEquals(Type.FIXED_INTS_8, normValues.getType());
|
assertEquals(Type.FIXED_INTS_8, normValues.getType());
|
||||||
byte[] norms = (byte[]) source.getArray();
|
byte[] norms = (byte[]) source.getArray();
|
||||||
for (int i = 0; i < mergedReader.maxDoc(); i++) {
|
for (int i = 0; i < mergedReader.maxDoc(); i++) {
|
||||||
Document document = mergedReader.document(i);
|
StoredDocument document = mergedReader.document(i);
|
||||||
int expected = Integer.parseInt(document.get(byteTestField));
|
int expected = Integer.parseInt(document.get(byteTestField));
|
||||||
assertEquals((byte) expected, norms[i]);
|
assertEquals((byte) expected, norms[i]);
|
||||||
}
|
}
|
||||||
|
|
|
@ -224,8 +224,8 @@ public class TestParallelAtomicReader extends LuceneTestCase {
|
||||||
assertEquals(parallelHits.length, singleHits.length);
|
assertEquals(parallelHits.length, singleHits.length);
|
||||||
for(int i = 0; i < parallelHits.length; i++) {
|
for(int i = 0; i < parallelHits.length; i++) {
|
||||||
assertEquals(parallelHits[i].score, singleHits[i].score, 0.001f);
|
assertEquals(parallelHits[i].score, singleHits[i].score, 0.001f);
|
||||||
Document docParallel = parallel.doc(parallelHits[i].doc);
|
StoredDocument docParallel = parallel.doc(parallelHits[i].doc);
|
||||||
Document docSingle = single.doc(singleHits[i].doc);
|
StoredDocument docSingle = single.doc(singleHits[i].doc);
|
||||||
assertEquals(docParallel.get("f1"), docSingle.get("f1"));
|
assertEquals(docParallel.get("f1"), docSingle.get("f1"));
|
||||||
assertEquals(docParallel.get("f2"), docSingle.get("f2"));
|
assertEquals(docParallel.get("f2"), docSingle.get("f2"));
|
||||||
assertEquals(docParallel.get("f3"), docSingle.get("f3"));
|
assertEquals(docParallel.get("f3"), docSingle.get("f3"));
|
||||||
|
|
|
@ -283,8 +283,8 @@ public class TestParallelCompositeReader extends LuceneTestCase {
|
||||||
assertEquals(parallelHits.length, singleHits.length);
|
assertEquals(parallelHits.length, singleHits.length);
|
||||||
for(int i = 0; i < parallelHits.length; i++) {
|
for(int i = 0; i < parallelHits.length; i++) {
|
||||||
assertEquals(parallelHits[i].score, singleHits[i].score, 0.001f);
|
assertEquals(parallelHits[i].score, singleHits[i].score, 0.001f);
|
||||||
Document docParallel = parallel.doc(parallelHits[i].doc);
|
StoredDocument docParallel = parallel.doc(parallelHits[i].doc);
|
||||||
Document docSingle = single.doc(singleHits[i].doc);
|
StoredDocument docSingle = single.doc(singleHits[i].doc);
|
||||||
assertEquals(docParallel.get("f1"), docSingle.get("f1"));
|
assertEquals(docParallel.get("f1"), docSingle.get("f1"));
|
||||||
assertEquals(docParallel.get("f2"), docSingle.get("f2"));
|
assertEquals(docParallel.get("f2"), docSingle.get("f2"));
|
||||||
assertEquals(docParallel.get("f3"), docSingle.get("f3"));
|
assertEquals(docParallel.get("f3"), docSingle.get("f3"));
|
||||||
|
|
|
@ -240,7 +240,7 @@ public class TestPostingsOffsets extends LuceneTestCase {
|
||||||
|
|
||||||
for(int docCount=0;docCount<numDocs;docCount++) {
|
for(int docCount=0;docCount<numDocs;docCount++) {
|
||||||
Document doc = new Document();
|
Document doc = new Document();
|
||||||
doc.add(new IntField("id", docCount, Field.Store.NO));
|
doc.add(new IntField("id", docCount, Field.Store.YES));
|
||||||
List<Token> tokens = new ArrayList<Token>();
|
List<Token> tokens = new ArrayList<Token>();
|
||||||
final int numTokens = atLeast(100);
|
final int numTokens = atLeast(100);
|
||||||
//final int numTokens = atLeast(20);
|
//final int numTokens = atLeast(20);
|
||||||
|
|
|
@ -121,7 +121,7 @@ public class TestRandomStoredFields extends LuceneTestCase {
|
||||||
}
|
}
|
||||||
TopDocs hits = s.search(new TermQuery(new Term("id", testID)), 1);
|
TopDocs hits = s.search(new TermQuery(new Term("id", testID)), 1);
|
||||||
assertEquals(1, hits.totalHits);
|
assertEquals(1, hits.totalHits);
|
||||||
Document doc = r.document(hits.scoreDocs[0].doc);
|
StoredDocument doc = r.document(hits.scoreDocs[0].doc);
|
||||||
Document docExp = docs.get(testID);
|
Document docExp = docs.get(testID);
|
||||||
for(int i=0;i<fieldCount;i++) {
|
for(int i=0;i<fieldCount;i++) {
|
||||||
assertEquals("doc " + testID + ", field f" + fieldCount + " is wrong", docExp.get("f"+i), doc.get("f"+i));
|
assertEquals("doc " + testID + ", field f" + fieldCount + " is wrong", docExp.get("f"+i), doc.get("f"+i));
|
||||||
|
|
|
@ -69,7 +69,7 @@ public class TestRollingUpdates extends LuceneTestCase {
|
||||||
if (VERBOSE) {
|
if (VERBOSE) {
|
||||||
System.out.println(" docIter=" + docIter + " id=" + id);
|
System.out.println(" docIter=" + docIter + " id=" + id);
|
||||||
}
|
}
|
||||||
((Field) doc.getField("docid")).setStringValue(myID);
|
doc.getField("docid").setStringValue(myID);
|
||||||
|
|
||||||
Term idTerm = new Term("docid", myID);
|
Term idTerm = new Term("docid", myID);
|
||||||
|
|
||||||
|
|
|
@ -96,11 +96,11 @@ public class TestSegmentMerger extends LuceneTestCase {
|
||||||
DirectoryReader.DEFAULT_TERMS_INDEX_DIVISOR, newIOContext(random()));
|
DirectoryReader.DEFAULT_TERMS_INDEX_DIVISOR, newIOContext(random()));
|
||||||
assertTrue(mergedReader != null);
|
assertTrue(mergedReader != null);
|
||||||
assertTrue(mergedReader.numDocs() == 2);
|
assertTrue(mergedReader.numDocs() == 2);
|
||||||
Document newDoc1 = mergedReader.document(0);
|
StoredDocument newDoc1 = mergedReader.document(0);
|
||||||
assertTrue(newDoc1 != null);
|
assertTrue(newDoc1 != null);
|
||||||
//There are 2 unstored fields on the document
|
//There are 2 unstored fields on the document
|
||||||
assertTrue(DocHelper.numFields(newDoc1) == DocHelper.numFields(doc1) - DocHelper.unstored.size());
|
assertTrue(DocHelper.numFields(newDoc1) == DocHelper.numFields(doc1) - DocHelper.unstored.size());
|
||||||
Document newDoc2 = mergedReader.document(1);
|
StoredDocument newDoc2 = mergedReader.document(1);
|
||||||
assertTrue(newDoc2 != null);
|
assertTrue(newDoc2 != null);
|
||||||
assertTrue(DocHelper.numFields(newDoc2) == DocHelper.numFields(doc2) - DocHelper.unstored.size());
|
assertTrue(DocHelper.numFields(newDoc2) == DocHelper.numFields(doc2) - DocHelper.unstored.size());
|
||||||
|
|
||||||
|
|
|
@ -62,13 +62,13 @@ public class TestSegmentReader extends LuceneTestCase {
|
||||||
public void testDocument() throws IOException {
|
public void testDocument() throws IOException {
|
||||||
assertTrue(reader.numDocs() == 1);
|
assertTrue(reader.numDocs() == 1);
|
||||||
assertTrue(reader.maxDoc() >= 1);
|
assertTrue(reader.maxDoc() >= 1);
|
||||||
Document result = reader.document(0);
|
StoredDocument result = reader.document(0);
|
||||||
assertTrue(result != null);
|
assertTrue(result != null);
|
||||||
//There are 2 unstored fields on the document that are not preserved across writing
|
//There are 2 unstored fields on the document that are not preserved across writing
|
||||||
assertTrue(DocHelper.numFields(result) == DocHelper.numFields(testDoc) - DocHelper.unstored.size());
|
assertTrue(DocHelper.numFields(result) == DocHelper.numFields(testDoc) - DocHelper.unstored.size());
|
||||||
|
|
||||||
List<IndexableField> fields = result.getFields();
|
List<StorableField> fields = result.getFields();
|
||||||
for (final IndexableField field : fields ) {
|
for (final StorableField field : fields ) {
|
||||||
assertTrue(field != null);
|
assertTrue(field != null);
|
||||||
assertTrue(DocHelper.nameValues.containsKey(field.name()));
|
assertTrue(DocHelper.nameValues.containsKey(field.name()));
|
||||||
}
|
}
|
||||||
|
|
|
@ -133,8 +133,8 @@ public class TestStressIndexing2 extends LuceneTestCase {
|
||||||
|
|
||||||
static Term idTerm = new Term("id","");
|
static Term idTerm = new Term("id","");
|
||||||
IndexingThread[] threads;
|
IndexingThread[] threads;
|
||||||
static Comparator<IndexableField> fieldNameComparator = new Comparator<IndexableField>() {
|
static Comparator<GeneralField> fieldNameComparator = new Comparator<GeneralField>() {
|
||||||
public int compare(IndexableField o1, IndexableField o2) {
|
public int compare(GeneralField o1, GeneralField o2) {
|
||||||
return o1.name().compareTo(o2.name());
|
return o1.name().compareTo(o2.name());
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
@ -250,7 +250,7 @@ public class TestStressIndexing2 extends LuceneTestCase {
|
||||||
Iterator<Document> iter = docs.values().iterator();
|
Iterator<Document> iter = docs.values().iterator();
|
||||||
while (iter.hasNext()) {
|
while (iter.hasNext()) {
|
||||||
Document d = iter.next();
|
Document d = iter.next();
|
||||||
ArrayList<IndexableField> fields = new ArrayList<IndexableField>();
|
ArrayList<Field> fields = new ArrayList<Field>();
|
||||||
fields.addAll(d.getFields());
|
fields.addAll(d.getFields());
|
||||||
// put fields in same order each time
|
// put fields in same order each time
|
||||||
Collections.sort(fields, fieldNameComparator);
|
Collections.sort(fields, fieldNameComparator);
|
||||||
|
@ -287,7 +287,7 @@ public class TestStressIndexing2 extends LuceneTestCase {
|
||||||
Bits liveDocs = sub.getLiveDocs();
|
Bits liveDocs = sub.getLiveDocs();
|
||||||
System.out.println(" " + ((SegmentReader) sub).getSegmentInfo());
|
System.out.println(" " + ((SegmentReader) sub).getSegmentInfo());
|
||||||
for(int docID=0;docID<sub.maxDoc();docID++) {
|
for(int docID=0;docID<sub.maxDoc();docID++) {
|
||||||
Document doc = sub.document(docID);
|
StoredDocument doc = sub.document(docID);
|
||||||
if (liveDocs == null || liveDocs.get(docID)) {
|
if (liveDocs == null || liveDocs.get(docID)) {
|
||||||
System.out.println(" docID=" + docID + " id:" + doc.get("id"));
|
System.out.println(" docID=" + docID + " id:" + doc.get("id"));
|
||||||
} else {
|
} else {
|
||||||
|
@ -573,9 +573,9 @@ public class TestStressIndexing2 extends LuceneTestCase {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
public static void verifyEquals(Document d1, Document d2) {
|
public static void verifyEquals(StoredDocument d1, StoredDocument d2) {
|
||||||
List<IndexableField> ff1 = d1.getFields();
|
List<StorableField> ff1 = d1.getFields();
|
||||||
List<IndexableField> ff2 = d2.getFields();
|
List<StorableField> ff2 = d2.getFields();
|
||||||
|
|
||||||
Collections.sort(ff1, fieldNameComparator);
|
Collections.sort(ff1, fieldNameComparator);
|
||||||
Collections.sort(ff2, fieldNameComparator);
|
Collections.sort(ff2, fieldNameComparator);
|
||||||
|
@ -583,8 +583,8 @@ public class TestStressIndexing2 extends LuceneTestCase {
|
||||||
assertEquals(ff1 + " : " + ff2, ff1.size(), ff2.size());
|
assertEquals(ff1 + " : " + ff2, ff1.size(), ff2.size());
|
||||||
|
|
||||||
for (int i=0; i<ff1.size(); i++) {
|
for (int i=0; i<ff1.size(); i++) {
|
||||||
IndexableField f1 = ff1.get(i);
|
StorableField f1 = ff1.get(i);
|
||||||
IndexableField f2 = ff2.get(i);
|
StorableField f2 = ff2.get(i);
|
||||||
if (f1.binaryValue() != null) {
|
if (f1.binaryValue() != null) {
|
||||||
assert(f2.binaryValue() != null);
|
assert(f2.binaryValue() != null);
|
||||||
} else {
|
} else {
|
||||||
|
|
|
@ -343,12 +343,12 @@ public class TestStressNRT extends LuceneTestCase {
|
||||||
if (results.totalHits != 1) {
|
if (results.totalHits != 1) {
|
||||||
System.out.println("FAIL: hits id:" + id + " val=" + val);
|
System.out.println("FAIL: hits id:" + id + " val=" + val);
|
||||||
for(ScoreDoc sd : results.scoreDocs) {
|
for(ScoreDoc sd : results.scoreDocs) {
|
||||||
final Document doc = r.document(sd.doc);
|
final StoredDocument doc = r.document(sd.doc);
|
||||||
System.out.println(" docID=" + sd.doc + " id:" + doc.get("id") + " foundVal=" + doc.get(field));
|
System.out.println(" docID=" + sd.doc + " id:" + doc.get("id") + " foundVal=" + doc.get(field));
|
||||||
}
|
}
|
||||||
fail("id=" + id + " reader=" + r + " totalHits=" + results.totalHits);
|
fail("id=" + id + " reader=" + r + " totalHits=" + results.totalHits);
|
||||||
}
|
}
|
||||||
Document doc = searcher.doc(results.scoreDocs[0].doc);
|
StoredDocument doc = searcher.doc(results.scoreDocs[0].doc);
|
||||||
long foundVal = Long.parseLong(doc.get(field));
|
long foundVal = Long.parseLong(doc.get(field));
|
||||||
if (foundVal < Math.abs(val)) {
|
if (foundVal < Math.abs(val)) {
|
||||||
fail("foundVal=" + foundVal + " val=" + val + " id=" + id + " reader=" + r);
|
fail("foundVal=" + foundVal + " val=" + val + " id=" + id + " reader=" + r);
|
||||||
|
|
|
@ -156,7 +156,7 @@ public class TestTermsEnum extends LuceneTestCase {
|
||||||
|
|
||||||
private void addDoc(RandomIndexWriter w, Collection<String> terms, Map<BytesRef,Integer> termToID, int id) throws IOException {
|
private void addDoc(RandomIndexWriter w, Collection<String> terms, Map<BytesRef,Integer> termToID, int id) throws IOException {
|
||||||
Document doc = new Document();
|
Document doc = new Document();
|
||||||
doc.add(new IntField("id", id, Field.Store.NO));
|
doc.add(new IntField("id", id, Field.Store.YES));
|
||||||
if (VERBOSE) {
|
if (VERBOSE) {
|
||||||
System.out.println("TEST: addDoc id:" + id + " terms=" + terms);
|
System.out.println("TEST: addDoc id:" + id + " terms=" + terms);
|
||||||
}
|
}
|
||||||
|
|
|
@ -22,6 +22,7 @@ import org.apache.lucene.util.LuceneTestCase;
|
||||||
import org.apache.lucene.document.Document;
|
import org.apache.lucene.document.Document;
|
||||||
import org.apache.lucene.index.IndexReader;
|
import org.apache.lucene.index.IndexReader;
|
||||||
import org.apache.lucene.index.RandomIndexWriter;
|
import org.apache.lucene.index.RandomIndexWriter;
|
||||||
|
import org.apache.lucene.index.StoredDocument;
|
||||||
import org.apache.lucene.index.Term;
|
import org.apache.lucene.index.Term;
|
||||||
import org.apache.lucene.search.similarities.DefaultSimilarity;
|
import org.apache.lucene.search.similarities.DefaultSimilarity;
|
||||||
import org.apache.lucene.search.similarities.Similarity;
|
import org.apache.lucene.search.similarities.Similarity;
|
||||||
|
@ -435,7 +436,7 @@ public class TestBooleanMinShouldMatch extends LuceneTestCase {
|
||||||
DecimalFormat f = new DecimalFormat("0.000000", DecimalFormatSymbols.getInstance(Locale.ROOT));
|
DecimalFormat f = new DecimalFormat("0.000000", DecimalFormatSymbols.getInstance(Locale.ROOT));
|
||||||
|
|
||||||
for (int i = 0; i < h.length; i++) {
|
for (int i = 0; i < h.length; i++) {
|
||||||
Document d = searcher.doc(h[i].doc);
|
StoredDocument d = searcher.doc(h[i].doc);
|
||||||
float score = h[i].score;
|
float score = h[i].score;
|
||||||
System.err.println("#" + i + ": " + f.format(score) + " - " +
|
System.err.println("#" + i + ": " + f.format(score) + " - " +
|
||||||
d.get("id") + " - " + d.get("data"));
|
d.get("id") + " - " + d.get("data"));
|
||||||
|
|
|
@ -27,6 +27,7 @@ import org.apache.lucene.document.Document;
|
||||||
import org.apache.lucene.document.Field;
|
import org.apache.lucene.document.Field;
|
||||||
import org.apache.lucene.index.IndexReader;
|
import org.apache.lucene.index.IndexReader;
|
||||||
import org.apache.lucene.index.RandomIndexWriter;
|
import org.apache.lucene.index.RandomIndexWriter;
|
||||||
|
import org.apache.lucene.index.StoredDocument;
|
||||||
import org.apache.lucene.store.Directory;
|
import org.apache.lucene.store.Directory;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -82,7 +83,7 @@ public class TestDateSort extends LuceneTestCase {
|
||||||
String[] actualOrder = new String[5];
|
String[] actualOrder = new String[5];
|
||||||
ScoreDoc[] hits = searcher.search(query, null, 1000, sort).scoreDocs;
|
ScoreDoc[] hits = searcher.search(query, null, 1000, sort).scoreDocs;
|
||||||
for (int i = 0; i < hits.length; i++) {
|
for (int i = 0; i < hits.length; i++) {
|
||||||
Document document = searcher.doc(hits[i].doc);
|
StoredDocument document = searcher.doc(hits[i].doc);
|
||||||
String text = document.get(TEXT_FIELD);
|
String text = document.get(TEXT_FIELD);
|
||||||
actualOrder[i] = text;
|
actualOrder[i] = text;
|
||||||
}
|
}
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue