lucene 4: Upgraded UidFieldTests class.
This commit is contained in:
parent
f796fe8d5e
commit
aa2a8c66cc
|
@ -19,6 +19,7 @@
|
|||
|
||||
package org.elasticsearch.common.lucene.uid;
|
||||
|
||||
import org.apache.lucene.analysis.Analyzer;
|
||||
import org.apache.lucene.analysis.TokenStream;
|
||||
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
|
||||
import org.apache.lucene.analysis.tokenattributes.PayloadAttribute;
|
||||
|
@ -55,11 +56,12 @@ public class UidField extends Field {
|
|||
|
||||
// this works fine for nested docs since they don't have the payload which has the version
|
||||
// so we iterate till we find the one with the payload
|
||||
// LUCENE 4 UPGRADE: We can get rid of the do while loop, since there is only one _uid value (live docs are taken into account)
|
||||
public static DocIdAndVersion loadDocIdAndVersion(AtomicReaderContext context, Term term) {
|
||||
int docId = Lucene.NO_DOC;
|
||||
try {
|
||||
DocsAndPositionsEnum uid = context.reader().termPositionsEnum(term);
|
||||
if (uid.nextDoc() == DocIdSetIterator.NO_MORE_DOCS) {
|
||||
if (uid == null || uid.nextDoc() == DocIdSetIterator.NO_MORE_DOCS) {
|
||||
return null; // no doc
|
||||
}
|
||||
// Note, only master docs uid have version payload, so we can use that info to not
|
||||
|
@ -87,10 +89,11 @@ public class UidField extends Field {
|
|||
* Load the version for the uid from the reader, returning -1 if no doc exists, or -2 if
|
||||
* no version is available (for backward comp.)
|
||||
*/
|
||||
// LUCENE 4 UPGRADE: We can get rid of the do while loop, since there is only one _uid value (live docs are taken into account)
|
||||
public static long loadVersion(AtomicReaderContext context, Term term) {
|
||||
try {
|
||||
DocsAndPositionsEnum uid = context.reader().termPositionsEnum(term);
|
||||
if (uid.nextDoc() == DocIdSetIterator.NO_MORE_DOCS) {
|
||||
if (uid == null || uid.nextDoc() == DocIdSetIterator.NO_MORE_DOCS) {
|
||||
return -1;
|
||||
}
|
||||
// Note, only master docs uid have version payload, so we can use that info to not
|
||||
|
@ -117,10 +120,8 @@ public class UidField extends Field {
|
|||
|
||||
private long version;
|
||||
|
||||
private final UidPayloadTokenStream tokenStream;
|
||||
|
||||
public UidField(String name, String uid, long version) {
|
||||
super(name, uid, UidFieldMapper.Defaults.UID_FIELD_TYPE);
|
||||
super(name, UidFieldMapper.Defaults.UID_FIELD_TYPE);
|
||||
this.uid = uid;
|
||||
this.version = version;
|
||||
this.tokenStream = new UidPayloadTokenStream(this);
|
||||
|
@ -153,7 +154,7 @@ public class UidField extends Field {
|
|||
}
|
||||
|
||||
@Override
|
||||
public TokenStream tokenStreamValue() {
|
||||
public TokenStream tokenStream(Analyzer analyzer) throws IOException {
|
||||
return tokenStream;
|
||||
}
|
||||
|
||||
|
|
|
@ -21,13 +21,11 @@ package org.elasticsearch.test.unit.common.lucene.uid;
|
|||
|
||||
import org.apache.lucene.document.Document;
|
||||
import org.apache.lucene.document.Field;
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.IndexWriter;
|
||||
import org.apache.lucene.index.IndexWriterConfig;
|
||||
import org.apache.lucene.index.Term;
|
||||
import org.apache.lucene.index.*;
|
||||
import org.apache.lucene.store.RAMDirectory;
|
||||
import org.elasticsearch.common.lucene.Lucene;
|
||||
import org.elasticsearch.common.lucene.uid.UidField;
|
||||
import org.elasticsearch.index.mapper.internal.UidFieldMapper;
|
||||
import org.hamcrest.MatcherAssert;
|
||||
import org.testng.annotations.Test;
|
||||
|
||||
|
@ -44,43 +42,49 @@ public class UidFieldTests {
|
|||
public void testUidField() throws Exception {
|
||||
IndexWriter writer = new IndexWriter(new RAMDirectory(), new IndexWriterConfig(Lucene.VERSION, Lucene.STANDARD_ANALYZER));
|
||||
|
||||
IndexReader reader = IndexReader.open(writer, true);
|
||||
MatcherAssert.assertThat(UidField.loadVersion(reader, new Term("_uid", "1")), equalTo(-1l));
|
||||
DirectoryReader directoryReader = DirectoryReader.open(writer, true);
|
||||
AtomicReader atomicReader = SlowCompositeReaderWrapper.wrap(directoryReader);
|
||||
MatcherAssert.assertThat(UidField.loadVersion(atomicReader.getContext(), new Term("_uid", "1")), equalTo(-1l));
|
||||
|
||||
Document doc = new Document();
|
||||
doc.add(new Field("_uid", "1", Field.Store.YES, Field.Index.NOT_ANALYZED));
|
||||
doc.add(new Field("_uid", "1", UidFieldMapper.Defaults.UID_FIELD_TYPE));
|
||||
writer.addDocument(doc);
|
||||
reader = reader.reopen();
|
||||
assertThat(UidField.loadVersion(reader, new Term("_uid", "1")), equalTo(-2l));
|
||||
assertThat(UidField.loadDocIdAndVersion(reader, 0, new Term("_uid", "1")).version, equalTo(-2l));
|
||||
directoryReader = DirectoryReader.openIfChanged(directoryReader);
|
||||
atomicReader = SlowCompositeReaderWrapper.wrap(directoryReader);
|
||||
assertThat(UidField.loadVersion(atomicReader.getContext(), new Term("_uid", "1")), equalTo(-2l));
|
||||
assertThat(UidField.loadDocIdAndVersion(atomicReader.getContext(), new Term("_uid", "1")).version, equalTo(-2l));
|
||||
|
||||
doc = new Document();
|
||||
doc.add(new UidField("_uid", "1", 1));
|
||||
writer.updateDocument(new Term("_uid", "1"), doc);
|
||||
reader = reader.reopen();
|
||||
assertThat(UidField.loadVersion(reader, new Term("_uid", "1")), equalTo(1l));
|
||||
assertThat(UidField.loadDocIdAndVersion(reader, 0, new Term("_uid", "1")).version, equalTo(1l));
|
||||
directoryReader = DirectoryReader.openIfChanged(directoryReader);
|
||||
atomicReader = SlowCompositeReaderWrapper.wrap(directoryReader);
|
||||
assertThat(UidField.loadVersion(atomicReader.getContext(), new Term("_uid", "1")), equalTo(1l));
|
||||
assertThat(UidField.loadDocIdAndVersion(atomicReader.getContext(), new Term("_uid", "1")).version, equalTo(1l));
|
||||
|
||||
doc = new Document();
|
||||
UidField uid = new UidField("_uid", "1", 2);
|
||||
doc.add(uid);
|
||||
writer.updateDocument(new Term("_uid", "1"), doc);
|
||||
reader = reader.reopen();
|
||||
assertThat(UidField.loadVersion(reader, new Term("_uid", "1")), equalTo(2l));
|
||||
assertThat(UidField.loadDocIdAndVersion(reader, 0, new Term("_uid", "1")).version, equalTo(2l));
|
||||
directoryReader = DirectoryReader.openIfChanged(directoryReader);
|
||||
atomicReader = SlowCompositeReaderWrapper.wrap(directoryReader);
|
||||
assertThat(UidField.loadVersion(atomicReader.getContext(), new Term("_uid", "1")), equalTo(2l));
|
||||
assertThat(UidField.loadDocIdAndVersion(atomicReader.getContext(), new Term("_uid", "1")).version, equalTo(2l));
|
||||
|
||||
// test reuse of uid field
|
||||
doc = new Document();
|
||||
uid.version(3);
|
||||
doc.add(uid);
|
||||
writer.updateDocument(new Term("_uid", "1"), doc);
|
||||
reader = reader.reopen();
|
||||
assertThat(UidField.loadVersion(reader, new Term("_uid", "1")), equalTo(3l));
|
||||
assertThat(UidField.loadDocIdAndVersion(reader, 0, new Term("_uid", "1")).version, equalTo(3l));
|
||||
directoryReader = DirectoryReader.openIfChanged(directoryReader);
|
||||
atomicReader = SlowCompositeReaderWrapper.wrap(directoryReader);
|
||||
assertThat(UidField.loadVersion(atomicReader.getContext(), new Term("_uid", "1")), equalTo(3l));
|
||||
assertThat(UidField.loadDocIdAndVersion(atomicReader.getContext(), new Term("_uid", "1")).version, equalTo(3l));
|
||||
|
||||
writer.deleteDocuments(new Term("_uid", "1"));
|
||||
reader = reader.reopen();
|
||||
assertThat(UidField.loadVersion(reader, new Term("_uid", "1")), equalTo(-1l));
|
||||
assertThat(UidField.loadDocIdAndVersion(reader, 0, new Term("_uid", "1")), nullValue());
|
||||
directoryReader = DirectoryReader.openIfChanged(directoryReader);
|
||||
atomicReader = SlowCompositeReaderWrapper.wrap(directoryReader);
|
||||
assertThat(UidField.loadVersion(atomicReader.getContext(), new Term("_uid", "1")), equalTo(-1l));
|
||||
assertThat(UidField.loadDocIdAndVersion(atomicReader.getContext(), new Term("_uid", "1")), nullValue());
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue