Remove old backward compatibility layer for version lookups. #18215
The current code tries to handle the case that document versions are either missing or stored in payloads rather than doc values. Since none of the 2.x releases allowed this, we can remove this logic.
This commit is contained in:
parent
5d8f684319
commit
68e7ac4166
|
@ -29,7 +29,6 @@ import org.apache.lucene.index.TermsEnum;
|
|||
import org.apache.lucene.search.DocIdSetIterator;
|
||||
import org.apache.lucene.util.Bits;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.elasticsearch.common.Numbers;
|
||||
import org.elasticsearch.common.lucene.uid.Versions.DocIdAndVersion;
|
||||
import org.elasticsearch.index.mapper.internal.UidFieldMapper;
|
||||
import org.elasticsearch.index.mapper.internal.VersionFieldMapper;
|
||||
|
@ -52,12 +51,8 @@ final class PerThreadIDAndVersionLookup {
|
|||
private final TermsEnum termsEnum;
|
||||
/** _version data */
|
||||
private final NumericDocValues versions;
|
||||
/** Only true when versions are indexed as payloads instead of docvalues */
|
||||
private final boolean hasPayloads;
|
||||
/** Reused for iteration (when the term exists) */
|
||||
private PostingsEnum docsEnum;
|
||||
/** Only used for back compat, to lookup a version from payload */
|
||||
private PostingsEnum posEnum;
|
||||
|
||||
/**
|
||||
* Initialize lookup for the provided segment
|
||||
|
@ -65,63 +60,37 @@ final class PerThreadIDAndVersionLookup {
|
|||
public PerThreadIDAndVersionLookup(LeafReader reader) throws IOException {
|
||||
TermsEnum termsEnum = null;
|
||||
NumericDocValues versions = null;
|
||||
boolean hasPayloads = false;
|
||||
|
||||
Fields fields = reader.fields();
|
||||
if (fields != null) {
|
||||
Terms terms = fields.terms(UidFieldMapper.NAME);
|
||||
if (terms != null) {
|
||||
hasPayloads = terms.hasPayloads();
|
||||
termsEnum = terms.iterator();
|
||||
assert termsEnum != null;
|
||||
versions = reader.getNumericDocValues(VersionFieldMapper.NAME);
|
||||
assert versions != null;
|
||||
}
|
||||
}
|
||||
|
||||
this.versions = versions;
|
||||
this.termsEnum = termsEnum;
|
||||
this.hasPayloads = hasPayloads;
|
||||
}
|
||||
|
||||
/** Return null if id is not found. */
|
||||
public DocIdAndVersion lookup(BytesRef id, Bits liveDocs, LeafReaderContext context) throws IOException {
|
||||
if (termsEnum.seekExact(id)) {
|
||||
if (versions != null || hasPayloads == false) {
|
||||
// Use NDV to retrieve the version, in which case we only need PostingsEnum:
|
||||
|
||||
// there may be more than one matching docID, in the case of nested docs, so we want the last one:
|
||||
docsEnum = termsEnum.postings(docsEnum, 0);
|
||||
int docID = DocIdSetIterator.NO_MORE_DOCS;
|
||||
for (int d = docsEnum.nextDoc(); d != DocIdSetIterator.NO_MORE_DOCS; d = docsEnum.nextDoc()) {
|
||||
if (liveDocs != null && liveDocs.get(d) == false) {
|
||||
continue;
|
||||
}
|
||||
docID = d;
|
||||
}
|
||||
|
||||
if (docID != DocIdSetIterator.NO_MORE_DOCS) {
|
||||
if (versions != null) {
|
||||
return new DocIdAndVersion(docID, versions.get(docID), context);
|
||||
} else {
|
||||
// _uid found, but no doc values and no payloads
|
||||
return new DocIdAndVersion(docID, Versions.NOT_SET, context);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ... but used to be stored as payloads; in this case we must use PostingsEnum
|
||||
posEnum = termsEnum.postings(posEnum, PostingsEnum.PAYLOADS);
|
||||
assert posEnum != null; // terms has payloads
|
||||
for (int d = posEnum.nextDoc(); d != DocIdSetIterator.NO_MORE_DOCS; d = posEnum.nextDoc()) {
|
||||
// there may be more than one matching docID, in the case of nested docs, so we want the last one:
|
||||
docsEnum = termsEnum.postings(docsEnum, 0);
|
||||
int docID = DocIdSetIterator.NO_MORE_DOCS;
|
||||
for (int d = docsEnum.nextDoc(); d != DocIdSetIterator.NO_MORE_DOCS; d = docsEnum.nextDoc()) {
|
||||
if (liveDocs != null && liveDocs.get(d) == false) {
|
||||
continue;
|
||||
}
|
||||
posEnum.nextPosition();
|
||||
final BytesRef payload = posEnum.getPayload();
|
||||
if (payload != null && payload.length == 8) {
|
||||
// TODO: does this break the nested docs case? we are not returning the last matching docID here?
|
||||
return new DocIdAndVersion(d, Numbers.bytesToLong(payload), context);
|
||||
}
|
||||
docID = d;
|
||||
}
|
||||
|
||||
if (docID != DocIdSetIterator.NO_MORE_DOCS) {
|
||||
return new DocIdAndVersion(docID, versions.get(docID), context);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -41,11 +41,7 @@ public class Versions {
|
|||
/** indicates that the current document was not found in lucene and in the version map */
|
||||
public static final long NOT_FOUND = -1L;
|
||||
|
||||
/**
|
||||
* used when the document is old and doesn't contain any version information in the index
|
||||
* see {@link PerThreadIDAndVersionLookup#lookup}
|
||||
*/
|
||||
public static final long NOT_SET = -2L;
|
||||
// -2 was used for docs that can be found in the index but do not have a version
|
||||
|
||||
/**
|
||||
* used to indicate that the write operation should be executed if the document is currently deleted
|
||||
|
@ -114,7 +110,7 @@ public class Versions {
|
|||
/**
|
||||
* Load the internal doc ID and version for the uid from the reader, returning<ul>
|
||||
* <li>null if the uid wasn't found,
|
||||
* <li>a doc ID and a version otherwise, the version being potentially set to {@link #NOT_SET} if the uid has no associated version
|
||||
* <li>a doc ID and a version otherwise
|
||||
* </ul>
|
||||
*/
|
||||
public static DocIdAndVersion loadDocIdAndVersion(IndexReader reader, Term term) throws IOException {
|
||||
|
@ -140,7 +136,6 @@ public class Versions {
|
|||
/**
|
||||
* Load the version for the uid from the reader, returning<ul>
|
||||
* <li>{@link #NOT_FOUND} if no matching doc exists,
|
||||
* <li>{@link #NOT_SET} if no version is available,
|
||||
* <li>the version associated with the provided uid otherwise
|
||||
* </ul>
|
||||
*/
|
||||
|
|
|
@ -54,9 +54,6 @@ public enum VersionType implements Writeable {
|
|||
}
|
||||
|
||||
private boolean isVersionConflict(long currentVersion, long expectedVersion, boolean deleted) {
|
||||
if (currentVersion == Versions.NOT_SET) {
|
||||
return false;
|
||||
}
|
||||
if (expectedVersion == Versions.MATCH_ANY) {
|
||||
return false;
|
||||
}
|
||||
|
@ -71,7 +68,7 @@ public enum VersionType implements Writeable {
|
|||
|
||||
@Override
|
||||
public long updateVersion(long currentVersion, long expectedVersion) {
|
||||
return (currentVersion == Versions.NOT_SET || currentVersion == Versions.NOT_FOUND) ? 1 : currentVersion + 1;
|
||||
return currentVersion == Versions.NOT_FOUND ? 1 : currentVersion + 1;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -95,9 +92,6 @@ public enum VersionType implements Writeable {
|
|||
EXTERNAL((byte) 1) {
|
||||
@Override
|
||||
public boolean isVersionConflictForWrites(long currentVersion, long expectedVersion, boolean deleted) {
|
||||
if (currentVersion == Versions.NOT_SET) {
|
||||
return false;
|
||||
}
|
||||
if (currentVersion == Versions.NOT_FOUND) {
|
||||
return false;
|
||||
}
|
||||
|
@ -117,9 +111,6 @@ public enum VersionType implements Writeable {
|
|||
|
||||
@Override
|
||||
public boolean isVersionConflictForReads(long currentVersion, long expectedVersion) {
|
||||
if (currentVersion == Versions.NOT_SET) {
|
||||
return false;
|
||||
}
|
||||
if (expectedVersion == Versions.MATCH_ANY) {
|
||||
return false;
|
||||
}
|
||||
|
@ -156,9 +147,6 @@ public enum VersionType implements Writeable {
|
|||
EXTERNAL_GTE((byte) 2) {
|
||||
@Override
|
||||
public boolean isVersionConflictForWrites(long currentVersion, long expectedVersion, boolean deleted) {
|
||||
if (currentVersion == Versions.NOT_SET) {
|
||||
return false;
|
||||
}
|
||||
if (currentVersion == Versions.NOT_FOUND) {
|
||||
return false;
|
||||
}
|
||||
|
@ -178,9 +166,6 @@ public enum VersionType implements Writeable {
|
|||
|
||||
@Override
|
||||
public boolean isVersionConflictForReads(long currentVersion, long expectedVersion) {
|
||||
if (currentVersion == Versions.NOT_SET) {
|
||||
return false;
|
||||
}
|
||||
if (expectedVersion == Versions.MATCH_ANY) {
|
||||
return false;
|
||||
}
|
||||
|
@ -220,9 +205,6 @@ public enum VersionType implements Writeable {
|
|||
FORCE((byte) 3) {
|
||||
@Override
|
||||
public boolean isVersionConflictForWrites(long currentVersion, long expectedVersion, boolean deleted) {
|
||||
if (currentVersion == Versions.NOT_SET) {
|
||||
return false;
|
||||
}
|
||||
if (currentVersion == Versions.NOT_FOUND) {
|
||||
return false;
|
||||
}
|
||||
|
|
|
@ -19,21 +19,17 @@
|
|||
|
||||
package org.elasticsearch.index.shard;
|
||||
|
||||
import org.apache.lucene.index.CodecReader;
|
||||
import org.apache.lucene.index.IndexWriter;
|
||||
import org.apache.lucene.index.MergePolicy;
|
||||
import org.apache.lucene.index.MergeTrigger;
|
||||
import org.apache.lucene.index.SegmentCommitInfo;
|
||||
import org.apache.lucene.index.SegmentInfos;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.common.logging.ESLogger;
|
||||
import org.elasticsearch.common.logging.Loggers;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
/**
|
||||
|
@ -67,59 +63,10 @@ public final class ElasticsearchMergePolicy extends MergePolicy {
|
|||
this.delegate = delegate;
|
||||
}
|
||||
|
||||
/** Return an "upgraded" view of the reader. */
|
||||
static CodecReader filter(CodecReader reader) throws IOException {
|
||||
// TODO: remove 0.90.x/1.x freqs/prox/payloads from _uid?
|
||||
// the previous code never did this, so some indexes carry around trash.
|
||||
return reader;
|
||||
}
|
||||
|
||||
static class IndexUpgraderOneMerge extends OneMerge {
|
||||
|
||||
public IndexUpgraderOneMerge(List<SegmentCommitInfo> segments) {
|
||||
super(segments);
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<CodecReader> getMergeReaders() throws IOException {
|
||||
final List<CodecReader> newReaders = new ArrayList<>();
|
||||
for (CodecReader reader : super.getMergeReaders()) {
|
||||
newReaders.add(filter(reader));
|
||||
}
|
||||
return newReaders;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
static class IndexUpgraderMergeSpecification extends MergeSpecification {
|
||||
|
||||
@Override
|
||||
public void add(OneMerge merge) {
|
||||
super.add(new IndexUpgraderOneMerge(merge.segments));
|
||||
}
|
||||
|
||||
@Override
|
||||
public String segString(Directory dir) {
|
||||
return "IndexUpgraderMergeSpec[" + super.segString(dir) + "]";
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
static MergeSpecification upgradedMergeSpecification(MergeSpecification spec) {
|
||||
if (spec == null) {
|
||||
return null;
|
||||
}
|
||||
MergeSpecification upgradedSpec = new IndexUpgraderMergeSpecification();
|
||||
for (OneMerge merge : spec.merges) {
|
||||
upgradedSpec.add(merge);
|
||||
}
|
||||
return upgradedSpec;
|
||||
}
|
||||
|
||||
@Override
|
||||
public MergeSpecification findMerges(MergeTrigger mergeTrigger,
|
||||
SegmentInfos segmentInfos, IndexWriter writer) throws IOException {
|
||||
return upgradedMergeSpecification(delegate.findMerges(mergeTrigger, segmentInfos, writer));
|
||||
return delegate.findMerges(mergeTrigger, segmentInfos, writer);
|
||||
}
|
||||
|
||||
private boolean shouldUpgrade(SegmentCommitInfo info) {
|
||||
|
@ -148,7 +95,7 @@ public final class ElasticsearchMergePolicy extends MergePolicy {
|
|||
throws IOException {
|
||||
|
||||
if (upgradeInProgress) {
|
||||
MergeSpecification spec = new IndexUpgraderMergeSpecification();
|
||||
MergeSpecification spec = new MergeSpecification();
|
||||
for (SegmentCommitInfo info : segmentInfos) {
|
||||
|
||||
if (shouldUpgrade(info)) {
|
||||
|
@ -183,13 +130,13 @@ public final class ElasticsearchMergePolicy extends MergePolicy {
|
|||
// has a chance to decide what to do (e.g. collapse the segments to satisfy maxSegmentCount)
|
||||
}
|
||||
|
||||
return upgradedMergeSpecification(delegate.findForcedMerges(segmentInfos, maxSegmentCount, segmentsToMerge, writer));
|
||||
return delegate.findForcedMerges(segmentInfos, maxSegmentCount, segmentsToMerge, writer);
|
||||
}
|
||||
|
||||
@Override
|
||||
public MergeSpecification findForcedDeletesMerges(SegmentInfos segmentInfos, IndexWriter writer)
|
||||
throws IOException {
|
||||
return upgradedMergeSpecification(delegate.findForcedDeletesMerges(segmentInfos, writer));
|
||||
return delegate.findForcedDeletesMerges(segmentInfos, writer);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -18,45 +18,27 @@
|
|||
*/
|
||||
package org.elasticsearch.common.lucene.uid;
|
||||
|
||||
import org.apache.lucene.analysis.Analyzer;
|
||||
import org.apache.lucene.analysis.TokenStream;
|
||||
import org.apache.lucene.analysis.core.KeywordAnalyzer;
|
||||
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
|
||||
import org.apache.lucene.analysis.tokenattributes.PayloadAttribute;
|
||||
import org.apache.lucene.document.Document;
|
||||
import org.apache.lucene.document.Field;
|
||||
import org.apache.lucene.document.Field.Store;
|
||||
import org.apache.lucene.document.FieldType;
|
||||
import org.apache.lucene.document.NumericDocValuesField;
|
||||
import org.apache.lucene.document.StringField;
|
||||
import org.apache.lucene.index.DirectoryReader;
|
||||
import org.apache.lucene.index.IndexOptions;
|
||||
import org.apache.lucene.index.IndexWriter;
|
||||
import org.apache.lucene.index.IndexWriterConfig;
|
||||
import org.apache.lucene.index.LeafReader;
|
||||
import org.apache.lucene.index.NumericDocValues;
|
||||
import org.apache.lucene.index.SlowCompositeReaderWrapper;
|
||||
import org.apache.lucene.index.Term;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.elasticsearch.common.Numbers;
|
||||
import org.elasticsearch.common.lucene.Lucene;
|
||||
import org.elasticsearch.common.lucene.index.ElasticsearchDirectoryReader;
|
||||
import org.elasticsearch.index.mapper.internal.UidFieldMapper;
|
||||
import org.elasticsearch.index.mapper.internal.VersionFieldMapper;
|
||||
import org.elasticsearch.index.shard.ElasticsearchMergePolicy;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.hamcrest.MatcherAssert;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.hamcrest.Matchers.notNullValue;
|
||||
import static org.hamcrest.Matchers.nullValue;
|
||||
|
||||
public class VersionsTests extends ESTestCase {
|
||||
|
@ -83,13 +65,6 @@ public class VersionsTests extends ESTestCase {
|
|||
|
||||
Document doc = new Document();
|
||||
doc.add(new Field(UidFieldMapper.NAME, "1", UidFieldMapper.Defaults.FIELD_TYPE));
|
||||
writer.addDocument(doc);
|
||||
directoryReader = reopen(directoryReader);
|
||||
assertThat(Versions.loadVersion(directoryReader, new Term(UidFieldMapper.NAME, "1")), equalTo(Versions.NOT_SET));
|
||||
assertThat(Versions.loadDocIdAndVersion(directoryReader, new Term(UidFieldMapper.NAME, "1")).version, equalTo(Versions.NOT_SET));
|
||||
|
||||
doc = new Document();
|
||||
doc.add(new Field(UidFieldMapper.NAME, "1", UidFieldMapper.Defaults.FIELD_TYPE));
|
||||
doc.add(new NumericDocValuesField(VersionFieldMapper.NAME, 1));
|
||||
writer.updateDocument(new Term(UidFieldMapper.NAME, "1"), doc);
|
||||
directoryReader = reopen(directoryReader);
|
||||
|
@ -166,135 +141,6 @@ public class VersionsTests extends ESTestCase {
|
|||
dir.close();
|
||||
}
|
||||
|
||||
public void testBackwardCompatibility() throws IOException {
|
||||
Directory dir = newDirectory();
|
||||
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(Lucene.STANDARD_ANALYZER));
|
||||
|
||||
DirectoryReader directoryReader = ElasticsearchDirectoryReader.wrap(DirectoryReader.open(writer), new ShardId("foo", "_na_", 1));
|
||||
MatcherAssert.assertThat(Versions.loadVersion(directoryReader, new Term(UidFieldMapper.NAME, "1")), equalTo(Versions.NOT_FOUND));
|
||||
|
||||
Document doc = new Document();
|
||||
UidField uidAndVersion = new UidField("1", 1L);
|
||||
doc.add(uidAndVersion);
|
||||
writer.addDocument(doc);
|
||||
|
||||
uidAndVersion.uid = "2";
|
||||
uidAndVersion.version = 2;
|
||||
writer.addDocument(doc);
|
||||
writer.commit();
|
||||
|
||||
directoryReader = reopen(directoryReader);
|
||||
assertThat(Versions.loadVersion(directoryReader, new Term(UidFieldMapper.NAME, "1")), equalTo(1L));
|
||||
assertThat(Versions.loadVersion(directoryReader, new Term(UidFieldMapper.NAME, "2")), equalTo(2L));
|
||||
assertThat(Versions.loadVersion(directoryReader, new Term(UidFieldMapper.NAME, "3")), equalTo(Versions.NOT_FOUND));
|
||||
directoryReader.close();
|
||||
writer.close();
|
||||
dir.close();
|
||||
}
|
||||
|
||||
// This is how versions used to be encoded
|
||||
private static class UidField extends Field {
|
||||
private static final FieldType FIELD_TYPE = new FieldType();
|
||||
static {
|
||||
FIELD_TYPE.setTokenized(true);
|
||||
FIELD_TYPE.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS);
|
||||
FIELD_TYPE.setStored(true);
|
||||
FIELD_TYPE.freeze();
|
||||
}
|
||||
String uid;
|
||||
long version;
|
||||
UidField(String uid, long version) {
|
||||
super(UidFieldMapper.NAME, uid, FIELD_TYPE);
|
||||
this.uid = uid;
|
||||
this.version = version;
|
||||
}
|
||||
@Override
|
||||
public TokenStream tokenStream(Analyzer analyzer, TokenStream reuse) {
|
||||
return new TokenStream() {
|
||||
boolean finished = true;
|
||||
final CharTermAttribute term = addAttribute(CharTermAttribute.class);
|
||||
final PayloadAttribute payload = addAttribute(PayloadAttribute.class);
|
||||
@Override
|
||||
public boolean incrementToken() throws IOException {
|
||||
if (finished) {
|
||||
return false;
|
||||
}
|
||||
term.setEmpty().append(uid);
|
||||
payload.setPayload(new BytesRef(Numbers.longToBytes(version)));
|
||||
finished = true;
|
||||
return true;
|
||||
}
|
||||
@Override
|
||||
public void reset() throws IOException {
|
||||
finished = false;
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
public void testMergingOldIndices() throws Exception {
|
||||
final IndexWriterConfig iwConf = new IndexWriterConfig(new KeywordAnalyzer());
|
||||
iwConf.setMergePolicy(new ElasticsearchMergePolicy(iwConf.getMergePolicy()));
|
||||
final Directory dir = newDirectory();
|
||||
final IndexWriter iw = new IndexWriter(dir, iwConf);
|
||||
|
||||
// 1st segment, no _version
|
||||
Document document = new Document();
|
||||
// Add a dummy field (enough to trigger #3237)
|
||||
document.add(new StringField("a", "b", Store.NO));
|
||||
StringField uid = new StringField(UidFieldMapper.NAME, "1", Store.YES);
|
||||
document.add(uid);
|
||||
iw.addDocument(document);
|
||||
uid.setStringValue("2");
|
||||
iw.addDocument(document);
|
||||
iw.commit();
|
||||
|
||||
// 2nd segment, old layout
|
||||
document = new Document();
|
||||
UidField uidAndVersion = new UidField("3", 3L);
|
||||
document.add(uidAndVersion);
|
||||
iw.addDocument(document);
|
||||
uidAndVersion.uid = "4";
|
||||
uidAndVersion.version = 4L;
|
||||
iw.addDocument(document);
|
||||
iw.commit();
|
||||
|
||||
// 3rd segment new layout
|
||||
document = new Document();
|
||||
uid.setStringValue("5");
|
||||
Field version = new NumericDocValuesField(VersionFieldMapper.NAME, 5L);
|
||||
document.add(uid);
|
||||
document.add(version);
|
||||
iw.addDocument(document);
|
||||
uid.setStringValue("6");
|
||||
version.setLongValue(6L);
|
||||
iw.addDocument(document);
|
||||
iw.commit();
|
||||
|
||||
Map<String, Long> expectedVersions = new HashMap<>();
|
||||
expectedVersions.put("1", 0L);
|
||||
expectedVersions.put("2", 0L);
|
||||
expectedVersions.put("3", 0L);
|
||||
expectedVersions.put("4", 4L);
|
||||
expectedVersions.put("5", 5L);
|
||||
expectedVersions.put("6", 6L);
|
||||
|
||||
// Force merge and check versions
|
||||
iw.forceMerge(1, true);
|
||||
final LeafReader ir = SlowCompositeReaderWrapper.wrap(ElasticsearchDirectoryReader.wrap(DirectoryReader.open(iw.getDirectory()), new ShardId("foo", "_na_", 1)));
|
||||
final NumericDocValues versions = ir.getNumericDocValues(VersionFieldMapper.NAME);
|
||||
assertThat(versions, notNullValue());
|
||||
for (int i = 0; i < ir.maxDoc(); ++i) {
|
||||
final String uidValue = ir.document(i).get(UidFieldMapper.NAME);
|
||||
final long expectedVersion = expectedVersions.get(uidValue);
|
||||
assertThat(versions.get(i), equalTo(expectedVersion));
|
||||
}
|
||||
|
||||
iw.close();
|
||||
ir.close();
|
||||
dir.close();
|
||||
}
|
||||
|
||||
/** Test that version map cache works, is evicted on close, etc */
|
||||
public void testCache() throws Exception {
|
||||
int size = Versions.lookupStates.size();
|
||||
|
|
|
@ -28,11 +28,6 @@ public class VersionTypeTests extends ESTestCase {
|
|||
public void testInternalVersionConflict() throws Exception {
|
||||
assertFalse(VersionType.INTERNAL.isVersionConflictForWrites(10, Versions.MATCH_ANY, randomBoolean()));
|
||||
assertFalse(VersionType.INTERNAL.isVersionConflictForReads(10, Versions.MATCH_ANY));
|
||||
// if we don't have a version in the index we accept everything
|
||||
assertFalse(VersionType.INTERNAL.isVersionConflictForWrites(Versions.NOT_SET, 10, randomBoolean()));
|
||||
assertFalse(VersionType.INTERNAL.isVersionConflictForReads(Versions.NOT_SET, 10));
|
||||
assertFalse(VersionType.INTERNAL.isVersionConflictForWrites(Versions.NOT_SET, Versions.MATCH_ANY, randomBoolean()));
|
||||
assertFalse(VersionType.INTERNAL.isVersionConflictForReads(Versions.NOT_SET, Versions.MATCH_ANY));
|
||||
|
||||
// if we didn't find a version (but the index does support it), we don't like it unless MATCH_ANY
|
||||
assertTrue(VersionType.INTERNAL.isVersionConflictForWrites(Versions.NOT_FOUND, 10, randomBoolean()));
|
||||
|
@ -99,7 +94,6 @@ public class VersionTypeTests extends ESTestCase {
|
|||
|
||||
public void testExternalVersionConflict() throws Exception {
|
||||
assertFalse(VersionType.EXTERNAL.isVersionConflictForWrites(Versions.NOT_FOUND, 10, randomBoolean()));
|
||||
assertFalse(VersionType.EXTERNAL.isVersionConflictForWrites(Versions.NOT_SET, 10, randomBoolean()));
|
||||
// MATCH_ANY must throw an exception in the case of external version, as the version must be set! it used as the new value
|
||||
assertTrue(VersionType.EXTERNAL.isVersionConflictForWrites(10, Versions.MATCH_ANY, randomBoolean()));
|
||||
|
||||
|
@ -135,7 +129,6 @@ public class VersionTypeTests extends ESTestCase {
|
|||
|
||||
public void testExternalGTEVersionConflict() throws Exception {
|
||||
assertFalse(VersionType.EXTERNAL_GTE.isVersionConflictForWrites(Versions.NOT_FOUND, 10, randomBoolean()));
|
||||
assertFalse(VersionType.EXTERNAL_GTE.isVersionConflictForWrites(Versions.NOT_SET, 10, randomBoolean()));
|
||||
// MATCH_ANY must throw an exception in the case of external version, as the version must be set! it used as the new value
|
||||
assertTrue(VersionType.EXTERNAL_GTE.isVersionConflictForWrites(10, Versions.MATCH_ANY, randomBoolean()));
|
||||
|
||||
|
@ -162,7 +155,6 @@ public class VersionTypeTests extends ESTestCase {
|
|||
|
||||
public void testForceVersionConflict() throws Exception {
|
||||
assertFalse(VersionType.FORCE.isVersionConflictForWrites(Versions.NOT_FOUND, 10, randomBoolean()));
|
||||
assertFalse(VersionType.FORCE.isVersionConflictForWrites(Versions.NOT_SET, 10, randomBoolean()));
|
||||
|
||||
// MATCH_ANY must throw an exception in the case of force version, as the version must be set! it used as the new value
|
||||
try {
|
||||
|
@ -192,22 +184,18 @@ public class VersionTypeTests extends ESTestCase {
|
|||
}
|
||||
|
||||
public void testUpdateVersion() {
|
||||
assertThat(VersionType.INTERNAL.updateVersion(Versions.NOT_SET, 10), equalTo(1L));
|
||||
assertThat(VersionType.INTERNAL.updateVersion(Versions.NOT_FOUND, 10), equalTo(1L));
|
||||
assertThat(VersionType.INTERNAL.updateVersion(1, 1), equalTo(2L));
|
||||
assertThat(VersionType.INTERNAL.updateVersion(2, Versions.MATCH_ANY), equalTo(3L));
|
||||
|
||||
|
||||
assertThat(VersionType.EXTERNAL.updateVersion(Versions.NOT_SET, 10), equalTo(10L));
|
||||
assertThat(VersionType.EXTERNAL.updateVersion(Versions.NOT_FOUND, 10), equalTo(10L));
|
||||
assertThat(VersionType.EXTERNAL.updateVersion(1, 10), equalTo(10L));
|
||||
|
||||
assertThat(VersionType.EXTERNAL_GTE.updateVersion(Versions.NOT_SET, 10), equalTo(10L));
|
||||
assertThat(VersionType.EXTERNAL_GTE.updateVersion(Versions.NOT_FOUND, 10), equalTo(10L));
|
||||
assertThat(VersionType.EXTERNAL_GTE.updateVersion(1, 10), equalTo(10L));
|
||||
assertThat(VersionType.EXTERNAL_GTE.updateVersion(10, 10), equalTo(10L));
|
||||
|
||||
assertThat(VersionType.FORCE.updateVersion(Versions.NOT_SET, 10), equalTo(10L));
|
||||
assertThat(VersionType.FORCE.updateVersion(Versions.NOT_FOUND, 10), equalTo(10L));
|
||||
assertThat(VersionType.FORCE.updateVersion(11, 10), equalTo(10L));
|
||||
|
||||
|
|
Loading…
Reference in New Issue