mirror of
https://github.com/honeymoose/OpenSearch.git
synced 2025-02-05 20:48:22 +00:00
Remove support for pre 2.0 indices
This commit removes all index level compatibilty and upgrade paths for pre 2.0 indices. This includes: * Remove leftover from delete_by_query to replay translog records, since in 3.x all pending delelte_by_query instances are applied on the upgrade to 2.x we can remove the bwc layer now. * Remove Elasticsearch090PostingsFormat - we maintained our own posting format until 2.0 this is now removed since folks need to upgrade to 2.x first before going to 3.0 * Remove BloomFilterPostingFormat - this was only used for ID fields in the Elasticsearch090PostingsFormat * Remove upgrade methods to pre 2.0 translogs without checkpoints
This commit is contained in:
parent
260ba67866
commit
39891594f0
@ -524,7 +524,7 @@ public class ElasticsearchException extends RuntimeException implements ToXConte
|
||||
ROUTING_VALIDATION_EXCEPTION(org.elasticsearch.cluster.routing.RoutingValidationException.class, org.elasticsearch.cluster.routing.RoutingValidationException::new, 61),
|
||||
NOT_SERIALIZABLE_EXCEPTION_WRAPPER(org.elasticsearch.common.io.stream.NotSerializableExceptionWrapper.class, org.elasticsearch.common.io.stream.NotSerializableExceptionWrapper::new, 62),
|
||||
ALIAS_FILTER_PARSING_EXCEPTION(org.elasticsearch.indices.AliasFilterParsingException.class, org.elasticsearch.indices.AliasFilterParsingException::new, 63),
|
||||
DELETE_BY_QUERY_FAILED_ENGINE_EXCEPTION(org.elasticsearch.index.engine.DeleteByQueryFailedEngineException.class, org.elasticsearch.index.engine.DeleteByQueryFailedEngineException::new, 64),
|
||||
// 64 was DeleteByQueryFailedEngineException, which was removed in 3.0
|
||||
GATEWAY_EXCEPTION(org.elasticsearch.gateway.GatewayException.class, org.elasticsearch.gateway.GatewayException::new, 65),
|
||||
INDEX_SHARD_NOT_RECOVERING_EXCEPTION(org.elasticsearch.index.shard.IndexShardNotRecoveringException.class, org.elasticsearch.index.shard.IndexShardNotRecoveringException::new, 66),
|
||||
HTTP_EXCEPTION(org.elasticsearch.http.HttpException.class, org.elasticsearch.http.HttpException::new, 67),
|
||||
|
@ -165,13 +165,13 @@ public class MetaDataIndexUpgradeService extends AbstractComponent {
|
||||
}
|
||||
|
||||
/**
|
||||
* Elasticsearch 2.0 no longer supports indices with pre Lucene v4.0 (Elasticsearch v 0.90.0) segments. All indices
|
||||
* that were created before Elasticsearch v0.90.0 should be upgraded using upgrade plugin before they can
|
||||
* Elasticsearch 3.0 no longer supports indices with pre Lucene v5.0 (Elasticsearch v2.0.0.beta1) segments. All indices
|
||||
* that were created before Elasticsearch v2.0.0.beta1 should be upgraded using upgrade plugin before they can
|
||||
* be open by this version of elasticsearch.
|
||||
*/
|
||||
private void checkSupportedVersion(IndexMetaData indexMetaData) {
|
||||
if (indexMetaData.getState() == IndexMetaData.State.OPEN && isSupportedVersion(indexMetaData) == false) {
|
||||
throw new IllegalStateException("The index [" + indexMetaData.getIndex() + "] was created before v0.90.0 and wasn't upgraded."
|
||||
throw new IllegalStateException("The index [" + indexMetaData.getIndex() + "] was created before v2.0.0.beta1 and wasn't upgraded."
|
||||
+ " This index should be open using a version before " + Version.CURRENT.minimumCompatibilityVersion()
|
||||
+ " and upgraded using the upgrade API.");
|
||||
}
|
||||
@ -181,7 +181,7 @@ public class MetaDataIndexUpgradeService extends AbstractComponent {
|
||||
* Returns true if this index can be supported by the current version of elasticsearch
|
||||
*/
|
||||
private static boolean isSupportedVersion(IndexMetaData indexMetaData) {
|
||||
if (indexMetaData.creationVersion().onOrAfter(Version.V_0_90_0_Beta1)) {
|
||||
if (indexMetaData.creationVersion().onOrAfter(Version.V_2_0_0_beta1)) {
|
||||
// The index was created with elasticsearch that was using Lucene 4.0
|
||||
return true;
|
||||
}
|
||||
|
@ -1,440 +0,0 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.index.codec.postingsformat;
|
||||
|
||||
import org.apache.lucene.codecs.*;
|
||||
import org.apache.lucene.index.*;
|
||||
import org.apache.lucene.search.DocIdSetIterator;
|
||||
import org.apache.lucene.store.*;
|
||||
import org.apache.lucene.util.*;
|
||||
import org.elasticsearch.common.util.BloomFilter;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.*;
|
||||
import java.util.Map.Entry;
|
||||
|
||||
/**
|
||||
* <p>
|
||||
* A {@link PostingsFormat} useful for low doc-frequency fields such as primary
|
||||
* keys. Bloom filters are maintained in a ".blm" file which offers "fast-fail"
|
||||
* for reads in segments known to have no record of the key. A choice of
|
||||
* delegate PostingsFormat is used to record all other Postings data.
|
||||
* </p>
|
||||
* <p>
|
||||
* This is a special bloom filter version, based on {@link org.elasticsearch.common.util.BloomFilter} and inspired
|
||||
* by Lucene {@code org.apache.lucene.codecs.bloom.BloomFilteringPostingsFormat}.
|
||||
* @deprecated only for reading old segments
|
||||
*/
|
||||
@Deprecated
|
||||
public class BloomFilterPostingsFormat extends PostingsFormat {
|
||||
|
||||
public static final String BLOOM_CODEC_NAME = "XBloomFilter"; // the Lucene one is named BloomFilter
|
||||
public static final int BLOOM_CODEC_VERSION = 1;
|
||||
public static final int BLOOM_CODEC_VERSION_CHECKSUM = 2;
|
||||
public static final int BLOOM_CODEC_VERSION_CURRENT = BLOOM_CODEC_VERSION_CHECKSUM;
|
||||
|
||||
/**
|
||||
* Extension of Bloom Filters file
|
||||
*/
|
||||
static final String BLOOM_EXTENSION = "blm";
|
||||
|
||||
private BloomFilter.Factory bloomFilterFactory = BloomFilter.Factory.DEFAULT;
|
||||
private PostingsFormat delegatePostingsFormat;
|
||||
|
||||
/**
|
||||
* Creates Bloom filters for a selection of fields created in the index. This
|
||||
* is recorded as a set of Bitsets held as a segment summary in an additional
|
||||
* "blm" file. This PostingsFormat delegates to a choice of delegate
|
||||
* PostingsFormat for encoding all other postings data.
|
||||
*
|
||||
* @param delegatePostingsFormat The PostingsFormat that records all the non-bloom filter data i.e.
|
||||
* postings info.
|
||||
* @param bloomFilterFactory The {@link org.elasticsearch.common.util.BloomFilter.Factory} responsible for sizing BloomFilters
|
||||
* appropriately
|
||||
*/
|
||||
public BloomFilterPostingsFormat(PostingsFormat delegatePostingsFormat,
|
||||
BloomFilter.Factory bloomFilterFactory) {
|
||||
super(BLOOM_CODEC_NAME);
|
||||
this.delegatePostingsFormat = delegatePostingsFormat;
|
||||
this.bloomFilterFactory = bloomFilterFactory;
|
||||
}
|
||||
|
||||
// Used only by core Lucene at read-time via Service Provider instantiation -
|
||||
// do not use at Write-time in application code.
|
||||
public BloomFilterPostingsFormat() {
|
||||
super(BLOOM_CODEC_NAME);
|
||||
}
|
||||
|
||||
@Override
|
||||
public BloomFilteredFieldsConsumer fieldsConsumer(SegmentWriteState state) throws IOException {
|
||||
throw new UnsupportedOperationException("this codec can only be used for reading");
|
||||
}
|
||||
|
||||
@Override
|
||||
public BloomFilteredFieldsProducer fieldsProducer(SegmentReadState state)
|
||||
throws IOException {
|
||||
return new BloomFilteredFieldsProducer(state);
|
||||
}
|
||||
|
||||
public PostingsFormat getDelegate() {
|
||||
return delegatePostingsFormat;
|
||||
}
|
||||
|
||||
private final class LazyBloomLoader implements Accountable {
|
||||
private final long offset;
|
||||
private final IndexInput indexInput;
|
||||
private BloomFilter filter;
|
||||
|
||||
private LazyBloomLoader(long offset, IndexInput origial) {
|
||||
this.offset = offset;
|
||||
this.indexInput = origial.clone();
|
||||
}
|
||||
|
||||
synchronized BloomFilter get() throws IOException {
|
||||
if (filter == null) {
|
||||
try (final IndexInput input = indexInput) {
|
||||
input.seek(offset);
|
||||
this.filter = BloomFilter.deserialize(input);
|
||||
}
|
||||
}
|
||||
return filter;
|
||||
}
|
||||
|
||||
@Override
|
||||
public long ramBytesUsed() {
|
||||
return filter == null ? 0l : filter.getSizeInBytes();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Collection<Accountable> getChildResources() {
|
||||
return Collections.singleton(Accountables.namedAccountable("bloom", ramBytesUsed()));
|
||||
}
|
||||
}
|
||||
|
||||
public final class BloomFilteredFieldsProducer extends FieldsProducer {
|
||||
private FieldsProducer delegateFieldsProducer;
|
||||
HashMap<String, LazyBloomLoader> bloomsByFieldName = new HashMap<>();
|
||||
private final int version;
|
||||
private final IndexInput data;
|
||||
|
||||
// for internal use only
|
||||
FieldsProducer getDelegate() {
|
||||
return delegateFieldsProducer;
|
||||
}
|
||||
|
||||
public BloomFilteredFieldsProducer(SegmentReadState state)
|
||||
throws IOException {
|
||||
|
||||
final String bloomFileName = IndexFileNames.segmentFileName(
|
||||
state.segmentInfo.name, state.segmentSuffix, BLOOM_EXTENSION);
|
||||
final Directory directory = state.directory;
|
||||
IndexInput dataInput = directory.openInput(bloomFileName, state.context);
|
||||
try {
|
||||
ChecksumIndexInput bloomIn = new BufferedChecksumIndexInput(dataInput.clone());
|
||||
version = CodecUtil.checkHeader(bloomIn, BLOOM_CODEC_NAME, BLOOM_CODEC_VERSION,
|
||||
BLOOM_CODEC_VERSION_CURRENT);
|
||||
// // Load the hash function used in the BloomFilter
|
||||
// hashFunction = HashFunction.forName(bloomIn.readString());
|
||||
// Load the delegate postings format
|
||||
final String delegatePostings = bloomIn.readString();
|
||||
this.delegateFieldsProducer = PostingsFormat.forName(delegatePostings)
|
||||
.fieldsProducer(state);
|
||||
this.data = dataInput;
|
||||
dataInput = null; // null it out such that we don't close it
|
||||
} finally {
|
||||
IOUtils.closeWhileHandlingException(dataInput);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public Iterator<String> iterator() {
|
||||
return delegateFieldsProducer.iterator();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() throws IOException {
|
||||
IOUtils.close(data, delegateFieldsProducer);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Terms terms(String field) throws IOException {
|
||||
LazyBloomLoader filter = bloomsByFieldName.get(field);
|
||||
if (filter == null) {
|
||||
return delegateFieldsProducer.terms(field);
|
||||
} else {
|
||||
Terms result = delegateFieldsProducer.terms(field);
|
||||
if (result == null) {
|
||||
return null;
|
||||
}
|
||||
return new BloomFilteredTerms(result, filter.get());
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public int size() {
|
||||
return delegateFieldsProducer.size();
|
||||
}
|
||||
|
||||
@Override
|
||||
public long ramBytesUsed() {
|
||||
long size = delegateFieldsProducer.ramBytesUsed();
|
||||
for (LazyBloomLoader bloomFilter : bloomsByFieldName.values()) {
|
||||
size += bloomFilter.ramBytesUsed();
|
||||
}
|
||||
return size;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Collection<Accountable> getChildResources() {
|
||||
List<Accountable> resources = new ArrayList<>();
|
||||
resources.addAll(Accountables.namedAccountables("field", bloomsByFieldName));
|
||||
if (delegateFieldsProducer != null) {
|
||||
resources.add(Accountables.namedAccountable("delegate", delegateFieldsProducer));
|
||||
}
|
||||
return Collections.unmodifiableList(resources);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void checkIntegrity() throws IOException {
|
||||
delegateFieldsProducer.checkIntegrity();
|
||||
if (version >= BLOOM_CODEC_VERSION_CHECKSUM) {
|
||||
CodecUtil.checksumEntireFile(data);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public FieldsProducer getMergeInstance() throws IOException {
|
||||
return delegateFieldsProducer.getMergeInstance();
|
||||
}
|
||||
}
|
||||
|
||||
public static final class BloomFilteredTerms extends FilterLeafReader.FilterTerms {
|
||||
private BloomFilter filter;
|
||||
|
||||
public BloomFilteredTerms(Terms terms, BloomFilter filter) {
|
||||
super(terms);
|
||||
this.filter = filter;
|
||||
}
|
||||
|
||||
public BloomFilter getFilter() {
|
||||
return filter;
|
||||
}
|
||||
|
||||
@Override
|
||||
public TermsEnum iterator() throws IOException {
|
||||
return new BloomFilteredTermsEnum(this.in, filter);
|
||||
}
|
||||
}
|
||||
|
||||
static final class BloomFilteredTermsEnum extends TermsEnum {
|
||||
|
||||
private Terms delegateTerms;
|
||||
private TermsEnum delegateTermsEnum;
|
||||
private BloomFilter filter;
|
||||
|
||||
public BloomFilteredTermsEnum(Terms other, BloomFilter filter) {
|
||||
this.delegateTerms = other;
|
||||
this.filter = filter;
|
||||
}
|
||||
|
||||
void reset(Terms others) {
|
||||
this.delegateTermsEnum = null;
|
||||
this.delegateTerms = others;
|
||||
}
|
||||
|
||||
private TermsEnum getDelegate() throws IOException {
|
||||
if (delegateTermsEnum == null) {
|
||||
/* pull the iterator only if we really need it -
|
||||
* this can be a relatively heavy operation depending on the
|
||||
* delegate postings format and they underlying directory
|
||||
* (clone IndexInput) */
|
||||
delegateTermsEnum = delegateTerms.iterator();
|
||||
}
|
||||
return delegateTermsEnum;
|
||||
}
|
||||
|
||||
@Override
|
||||
public final BytesRef next() throws IOException {
|
||||
return getDelegate().next();
|
||||
}
|
||||
|
||||
@Override
|
||||
public final boolean seekExact(BytesRef text)
|
||||
throws IOException {
|
||||
// The magical fail-fast speed up that is the entire point of all of
|
||||
// this code - save a disk seek if there is a match on an in-memory
|
||||
// structure
|
||||
// that may occasionally give a false positive but guaranteed no false
|
||||
// negatives
|
||||
if (!filter.mightContain(text)) {
|
||||
return false;
|
||||
}
|
||||
return getDelegate().seekExact(text);
|
||||
}
|
||||
|
||||
@Override
|
||||
public final SeekStatus seekCeil(BytesRef text)
|
||||
throws IOException {
|
||||
return getDelegate().seekCeil(text);
|
||||
}
|
||||
|
||||
@Override
|
||||
public final void seekExact(long ord) throws IOException {
|
||||
getDelegate().seekExact(ord);
|
||||
}
|
||||
|
||||
@Override
|
||||
public final BytesRef term() throws IOException {
|
||||
return getDelegate().term();
|
||||
}
|
||||
|
||||
@Override
|
||||
public final long ord() throws IOException {
|
||||
return getDelegate().ord();
|
||||
}
|
||||
|
||||
@Override
|
||||
public final int docFreq() throws IOException {
|
||||
return getDelegate().docFreq();
|
||||
}
|
||||
|
||||
@Override
|
||||
public final long totalTermFreq() throws IOException {
|
||||
return getDelegate().totalTermFreq();
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public PostingsEnum postings(PostingsEnum reuse, int flags) throws IOException {
|
||||
return getDelegate().postings(reuse, flags);
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: would be great to move this out to test code, but the interaction between es090 and bloom is complex
|
||||
// at least it is not accessible via SPI
|
||||
public final class BloomFilteredFieldsConsumer extends FieldsConsumer {
|
||||
private final FieldsConsumer delegateFieldsConsumer;
|
||||
private final Map<FieldInfo, BloomFilter> bloomFilters = new HashMap<>();
|
||||
private final SegmentWriteState state;
|
||||
private boolean closed = false;
|
||||
|
||||
// private PostingsFormat delegatePostingsFormat;
|
||||
|
||||
public BloomFilteredFieldsConsumer(FieldsConsumer fieldsConsumer,
|
||||
SegmentWriteState state, PostingsFormat delegatePostingsFormat) {
|
||||
this.delegateFieldsConsumer = fieldsConsumer;
|
||||
// this.delegatePostingsFormat=delegatePostingsFormat;
|
||||
this.state = state;
|
||||
}
|
||||
|
||||
// for internal use only
|
||||
public FieldsConsumer getDelegate() {
|
||||
return delegateFieldsConsumer;
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public void write(Fields fields) throws IOException {
|
||||
|
||||
// Delegate must write first: it may have opened files
|
||||
// on creating the class
|
||||
// (e.g. Lucene41PostingsConsumer), and write() will
|
||||
// close them; alternatively, if we delayed pulling
|
||||
// the fields consumer until here, we could do it
|
||||
// afterwards:
|
||||
delegateFieldsConsumer.write(fields);
|
||||
|
||||
for(String field : fields) {
|
||||
Terms terms = fields.terms(field);
|
||||
if (terms == null) {
|
||||
continue;
|
||||
}
|
||||
FieldInfo fieldInfo = state.fieldInfos.fieldInfo(field);
|
||||
TermsEnum termsEnum = terms.iterator();
|
||||
|
||||
BloomFilter bloomFilter = null;
|
||||
|
||||
PostingsEnum postings = null;
|
||||
while (true) {
|
||||
BytesRef term = termsEnum.next();
|
||||
if (term == null) {
|
||||
break;
|
||||
}
|
||||
if (bloomFilter == null) {
|
||||
bloomFilter = bloomFilterFactory.createFilter(state.segmentInfo.maxDoc());
|
||||
assert bloomFilters.containsKey(field) == false;
|
||||
bloomFilters.put(fieldInfo, bloomFilter);
|
||||
}
|
||||
// Make sure there's at least one doc for this term:
|
||||
postings = termsEnum.postings(postings, 0);
|
||||
if (postings.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) {
|
||||
bloomFilter.put(term);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() throws IOException {
|
||||
if (closed) {
|
||||
return;
|
||||
}
|
||||
closed = true;
|
||||
delegateFieldsConsumer.close();
|
||||
// Now we are done accumulating values for these fields
|
||||
List<Entry<FieldInfo, BloomFilter>> nonSaturatedBlooms = new ArrayList<>();
|
||||
|
||||
for (Entry<FieldInfo, BloomFilter> entry : bloomFilters.entrySet()) {
|
||||
nonSaturatedBlooms.add(entry);
|
||||
}
|
||||
String bloomFileName = IndexFileNames.segmentFileName(
|
||||
state.segmentInfo.name, state.segmentSuffix, BLOOM_EXTENSION);
|
||||
IndexOutput bloomOutput = null;
|
||||
try {
|
||||
bloomOutput = state.directory
|
||||
.createOutput(bloomFileName, state.context);
|
||||
CodecUtil.writeHeader(bloomOutput, BLOOM_CODEC_NAME,
|
||||
BLOOM_CODEC_VERSION_CURRENT);
|
||||
// remember the name of the postings format we will delegate to
|
||||
bloomOutput.writeString(delegatePostingsFormat.getName());
|
||||
|
||||
// First field in the output file is the number of fields+blooms saved
|
||||
bloomOutput.writeInt(nonSaturatedBlooms.size());
|
||||
for (Entry<FieldInfo, BloomFilter> entry : nonSaturatedBlooms) {
|
||||
FieldInfo fieldInfo = entry.getKey();
|
||||
BloomFilter bloomFilter = entry.getValue();
|
||||
bloomOutput.writeInt(fieldInfo.number);
|
||||
saveAppropriatelySizedBloomFilter(bloomOutput, bloomFilter, fieldInfo);
|
||||
}
|
||||
CodecUtil.writeFooter(bloomOutput);
|
||||
} finally {
|
||||
IOUtils.close(bloomOutput);
|
||||
}
|
||||
//We are done with large bitsets so no need to keep them hanging around
|
||||
bloomFilters.clear();
|
||||
}
|
||||
|
||||
private void saveAppropriatelySizedBloomFilter(IndexOutput bloomOutput,
|
||||
BloomFilter bloomFilter, FieldInfo fieldInfo) throws IOException {
|
||||
BloomFilter.serilaize(bloomFilter, bloomOutput);
|
||||
}
|
||||
|
||||
}
|
||||
}
|
@ -1,72 +0,0 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.index.codec.postingsformat;
|
||||
|
||||
import org.apache.lucene.codecs.FieldsConsumer;
|
||||
import org.apache.lucene.codecs.FieldsProducer;
|
||||
import org.apache.lucene.codecs.PostingsFormat;
|
||||
import org.apache.lucene.codecs.lucene50.Lucene50PostingsFormat;
|
||||
import org.apache.lucene.index.SegmentReadState;
|
||||
import org.apache.lucene.index.SegmentWriteState;
|
||||
import org.elasticsearch.common.lucene.Lucene;
|
||||
import org.elasticsearch.common.util.BloomFilter;
|
||||
import org.elasticsearch.index.mapper.internal.UidFieldMapper;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.function.Predicate;
|
||||
|
||||
/**
|
||||
* This is the old default postings format for Elasticsearch that special cases
|
||||
* the <tt>_uid</tt> field to use a bloom filter while all other fields
|
||||
* will use a {@link Lucene50PostingsFormat}. This format will reuse the underlying
|
||||
* {@link Lucene50PostingsFormat} and its files also for the <tt>_uid</tt> saving up to
|
||||
* 5 files per segment in the default case.
|
||||
* <p>
|
||||
* @deprecated only for reading old segments
|
||||
*/
|
||||
@Deprecated
|
||||
public class Elasticsearch090PostingsFormat extends PostingsFormat {
|
||||
protected final BloomFilterPostingsFormat bloomPostings;
|
||||
|
||||
public Elasticsearch090PostingsFormat() {
|
||||
super("es090");
|
||||
Lucene50PostingsFormat delegate = new Lucene50PostingsFormat();
|
||||
assert delegate.getName().equals(Lucene.LATEST_POSTINGS_FORMAT);
|
||||
bloomPostings = new BloomFilterPostingsFormat(delegate, BloomFilter.Factory.DEFAULT);
|
||||
}
|
||||
|
||||
public PostingsFormat getDefaultWrapped() {
|
||||
return bloomPostings.getDelegate();
|
||||
}
|
||||
|
||||
protected static final Predicate<String> UID_FIELD_FILTER = field -> UidFieldMapper.NAME.equals(field);
|
||||
|
||||
@Override
|
||||
public FieldsConsumer fieldsConsumer(SegmentWriteState state) throws IOException {
|
||||
throw new UnsupportedOperationException("this codec can only be used for reading");
|
||||
}
|
||||
|
||||
@Override
|
||||
public FieldsProducer fieldsProducer(SegmentReadState state) throws IOException {
|
||||
// we can just return the delegate here since we didn't record bloom filters for
|
||||
// the other fields.
|
||||
return bloomPostings.fieldsProducer(state);
|
||||
}
|
||||
|
||||
}
|
@ -1,38 +0,0 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.index.engine;
|
||||
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
/** @deprecated Delete-by-query is removed in 2.0, but we keep this so translog can replay on upgrade. */
|
||||
@Deprecated
|
||||
public class DeleteByQueryFailedEngineException extends EngineException {
|
||||
|
||||
public DeleteByQueryFailedEngineException(ShardId shardId, Engine.DeleteByQuery deleteByQuery, Throwable cause) {
|
||||
super(shardId, "Delete by query failed for [" + deleteByQuery.query() + "]", cause);
|
||||
}
|
||||
|
||||
public DeleteByQueryFailedEngineException(StreamInput in) throws IOException{
|
||||
super(in);
|
||||
}
|
||||
}
|
@ -206,10 +206,6 @@ public abstract class Engine implements Closeable {
|
||||
|
||||
public abstract void delete(Delete delete) throws EngineException;
|
||||
|
||||
/** @deprecated This was removed, but we keep this API so translog can replay any DBQs on upgrade. */
|
||||
@Deprecated
|
||||
public abstract void delete(DeleteByQuery delete) throws EngineException;
|
||||
|
||||
/**
|
||||
* Attempts to do a special commit where the given syncID is put into the commit data. The attempt
|
||||
* succeeds if there are not pending writes in lucene and the current point is equal to the expected one.
|
||||
|
@ -21,8 +21,9 @@ package org.elasticsearch.index.engine;
|
||||
|
||||
import org.apache.lucene.index.*;
|
||||
import org.apache.lucene.index.IndexWriter.IndexReaderWarmer;
|
||||
import org.apache.lucene.search.BooleanClause.Occur;
|
||||
import org.apache.lucene.search.*;
|
||||
import org.apache.lucene.search.IndexSearcher;
|
||||
import org.apache.lucene.search.SearcherFactory;
|
||||
import org.apache.lucene.search.SearcherManager;
|
||||
import org.apache.lucene.store.AlreadyClosedException;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.store.LockObtainFailedException;
|
||||
@ -48,7 +49,6 @@ import org.elasticsearch.index.indexing.ShardIndexingService;
|
||||
import org.elasticsearch.index.mapper.Uid;
|
||||
import org.elasticsearch.index.merge.MergeStats;
|
||||
import org.elasticsearch.index.merge.OnGoingMerge;
|
||||
import org.elasticsearch.index.search.nested.IncludeNestedDocsQuery;
|
||||
import org.elasticsearch.index.shard.ElasticsearchMergePolicy;
|
||||
import org.elasticsearch.index.shard.MergeSchedulerConfig;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
@ -182,8 +182,7 @@ public class InternalEngine extends Engine {
|
||||
}
|
||||
translogConfig.setTranslogGeneration(generation);
|
||||
if (generation != null && generation.translogUUID == null) {
|
||||
// only upgrade on pre-2.0 indices...
|
||||
Translog.upgradeLegacyTranslog(logger, translogConfig);
|
||||
throw new IndexFormatTooOldException("trasnlog", "translog has no generation nor a UUID - this might be an index from a previous version consider upgrading to N-1 first");
|
||||
}
|
||||
}
|
||||
final Translog translog = new Translog(translogConfig);
|
||||
@ -512,48 +511,6 @@ public class InternalEngine extends Engine {
|
||||
}
|
||||
}
|
||||
|
||||
/** @deprecated This was removed, but we keep this API so translog can replay any DBQs on upgrade. */
|
||||
@Deprecated
|
||||
@Override
|
||||
public void delete(DeleteByQuery delete) throws EngineException {
|
||||
try (ReleasableLock lock = readLock.acquire()) {
|
||||
ensureOpen();
|
||||
if (delete.origin() == Operation.Origin.RECOVERY) {
|
||||
// Don't throttle recovery operations
|
||||
innerDelete(delete);
|
||||
} else {
|
||||
try (Releasable r = throttle.acquireThrottle()) {
|
||||
innerDelete(delete);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private void innerDelete(DeleteByQuery delete) throws EngineException {
|
||||
try {
|
||||
Query query = delete.query();
|
||||
if (delete.aliasFilter() != null) {
|
||||
query = new BooleanQuery.Builder()
|
||||
.add(query, Occur.MUST)
|
||||
.add(delete.aliasFilter(), Occur.FILTER)
|
||||
.build();
|
||||
}
|
||||
if (delete.nested()) {
|
||||
query = new IncludeNestedDocsQuery(query, delete.parentFilter());
|
||||
}
|
||||
|
||||
indexWriter.deleteDocuments(query);
|
||||
translog.add(new Translog.DeleteByQuery(delete));
|
||||
} catch (Throwable t) {
|
||||
maybeFailEngine("delete_by_query", t);
|
||||
throw new DeleteByQueryFailedEngineException(shardId, delete, t);
|
||||
}
|
||||
|
||||
// TODO: This is heavy, since we refresh, but we must do this because we don't know which documents were in fact deleted (i.e., our
|
||||
// versionMap isn't updated), so we must force a cutover to a new reader to "see" the deletions:
|
||||
refresh("delete_by_query");
|
||||
}
|
||||
|
||||
@Override
|
||||
public void refresh(String source) throws EngineException {
|
||||
// we obtain a read lock here, since we don't want a flush to happen while we are refreshing
|
||||
|
@ -112,13 +112,6 @@ public class ShadowEngine extends Engine {
|
||||
throw new UnsupportedOperationException(shardId + " delete operation not allowed on shadow engine");
|
||||
}
|
||||
|
||||
/** @deprecated This was removed, but we keep this API so translog can replay any DBQs on upgrade. */
|
||||
@Deprecated
|
||||
@Override
|
||||
public void delete(DeleteByQuery delete) throws EngineException {
|
||||
throw new UnsupportedOperationException(shardId + " delete-by-query operation not allowed on shadow engine");
|
||||
}
|
||||
|
||||
@Override
|
||||
public SyncedFlushResult syncFlush(String syncId, CommitId expectedCommitId) {
|
||||
throw new UnsupportedOperationException(shardId + " sync commit operation not allowed on shadow engine");
|
||||
|
@ -124,12 +124,10 @@ public class IndexShard extends AbstractIndexShardComponent implements IndexSett
|
||||
|
||||
private final ThreadPool threadPool;
|
||||
private final MapperService mapperService;
|
||||
private final IndexQueryParserService queryParserService;
|
||||
private final IndexCache indexCache;
|
||||
private final InternalIndicesLifecycle indicesLifecycle;
|
||||
private final Store store;
|
||||
private final MergeSchedulerConfig mergeSchedulerConfig;
|
||||
private final IndexAliasesService indexAliasesService;
|
||||
private final ShardIndexingService indexingService;
|
||||
private final ShardSearchStats searchService;
|
||||
private final ShardGetService getService;
|
||||
@ -211,11 +209,9 @@ public class IndexShard extends AbstractIndexShardComponent implements IndexSett
|
||||
this.indicesLifecycle = (InternalIndicesLifecycle) provider.getIndicesLifecycle();
|
||||
this.store = store;
|
||||
this.mergeSchedulerConfig = new MergeSchedulerConfig(indexSettings);
|
||||
this.threadPool = provider.getThreadPool();
|
||||
this.mapperService = provider.getMapperService();
|
||||
this.queryParserService = provider.getQueryParserService();
|
||||
this.indexCache = provider.getIndexCache();
|
||||
this.indexAliasesService = provider.getIndexAliasesService();
|
||||
this.threadPool = provider.getThreadPool();
|
||||
this.mapperService = provider.getMapperService();
|
||||
this.indexCache = provider.getIndexCache();
|
||||
this.indexingService = new ShardIndexingService(shardId, indexSettings);
|
||||
this.getService = new ShardGetService(this, mapperService);
|
||||
this.termVectorsService = provider.getTermVectorsService();
|
||||
@ -254,7 +250,7 @@ public class IndexShard extends AbstractIndexShardComponent implements IndexSett
|
||||
this.indexingMemoryController = provider.getIndexingMemoryController();
|
||||
|
||||
this.searcherWrapper = provider.getIndexSearcherWrapper();
|
||||
this.percolatorQueriesRegistry = new PercolatorQueriesRegistry(shardId, indexSettings, queryParserService, indexingService, mapperService, indexFieldDataService);
|
||||
this.percolatorQueriesRegistry = new PercolatorQueriesRegistry(shardId, indexSettings, provider.getQueryParserService(), indexingService, mapperService, indexFieldDataService);
|
||||
if (mapperService.hasMapping(PercolatorService.TYPE_NAME)) {
|
||||
percolatorQueriesRegistry.enableRealTimePercolator();
|
||||
}
|
||||
@ -1442,8 +1438,7 @@ public class IndexShard extends AbstractIndexShardComponent implements IndexSett
|
||||
}
|
||||
|
||||
private final EngineConfig newEngineConfig(TranslogConfig translogConfig, QueryCachingPolicy cachingPolicy) {
|
||||
final TranslogRecoveryPerformer translogRecoveryPerformer = new TranslogRecoveryPerformer(shardId, mapperService, queryParserService,
|
||||
indexAliasesService, indexCache, logger) {
|
||||
final TranslogRecoveryPerformer translogRecoveryPerformer = new TranslogRecoveryPerformer(shardId, mapperService, logger) {
|
||||
@Override
|
||||
protected void operationProcessed() {
|
||||
assert recoveryState != null;
|
||||
|
@ -18,27 +18,13 @@
|
||||
*/
|
||||
package org.elasticsearch.index.shard;
|
||||
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.apache.lucene.search.join.BitSetProducer;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.ParsingException;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.logging.ESLogger;
|
||||
import org.elasticsearch.common.lucene.search.Queries;
|
||||
import org.elasticsearch.common.xcontent.XContentHelper;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.index.aliases.IndexAliasesService;
|
||||
import org.elasticsearch.index.cache.IndexCache;
|
||||
import org.elasticsearch.index.engine.Engine;
|
||||
import org.elasticsearch.index.engine.IgnoreOnRecoveryEngineException;
|
||||
import org.elasticsearch.index.mapper.*;
|
||||
import org.elasticsearch.index.query.IndexQueryParserService;
|
||||
import org.elasticsearch.index.query.ParsedQuery;
|
||||
import org.elasticsearch.index.translog.Translog;
|
||||
|
||||
import java.io.IOException;
|
||||
@ -53,20 +39,13 @@ import static org.elasticsearch.index.mapper.SourceToParse.source;
|
||||
*/
|
||||
public class TranslogRecoveryPerformer {
|
||||
private final MapperService mapperService;
|
||||
private final IndexQueryParserService queryParserService;
|
||||
private final IndexAliasesService indexAliasesService;
|
||||
private final IndexCache indexCache;
|
||||
private final ESLogger logger;
|
||||
private final Map<String, Mapping> recoveredTypes = new HashMap<>();
|
||||
private final ShardId shardId;
|
||||
|
||||
protected TranslogRecoveryPerformer(ShardId shardId, MapperService mapperService, IndexQueryParserService queryParserService,
|
||||
IndexAliasesService indexAliasesService, IndexCache indexCache, ESLogger logger) {
|
||||
protected TranslogRecoveryPerformer(ShardId shardId, MapperService mapperService, ESLogger logger) {
|
||||
this.shardId = shardId;
|
||||
this.mapperService = mapperService;
|
||||
this.queryParserService = queryParserService;
|
||||
this.indexAliasesService = indexAliasesService;
|
||||
this.indexCache = indexCache;
|
||||
this.logger = logger;
|
||||
}
|
||||
|
||||
@ -165,11 +144,6 @@ public class TranslogRecoveryPerformer {
|
||||
engine.delete(new Engine.Delete(uid.type(), uid.id(), delete.uid(), delete.version(),
|
||||
delete.versionType().versionTypeForReplicationAndRecovery(), Engine.Operation.Origin.RECOVERY, System.nanoTime(), false));
|
||||
break;
|
||||
case DELETE_BY_QUERY:
|
||||
Translog.DeleteByQuery deleteByQuery = (Translog.DeleteByQuery) operation;
|
||||
engine.delete(prepareDeleteByQuery(queryParserService, mapperService, indexAliasesService, indexCache,
|
||||
deleteByQuery.source(), deleteByQuery.filteringAliases(), Engine.Operation.Origin.RECOVERY, deleteByQuery.types()));
|
||||
break;
|
||||
default:
|
||||
throw new IllegalStateException("No operation defined for [" + operation + "]");
|
||||
}
|
||||
@ -194,38 +168,6 @@ public class TranslogRecoveryPerformer {
|
||||
operationProcessed();
|
||||
}
|
||||
|
||||
private static Engine.DeleteByQuery prepareDeleteByQuery(IndexQueryParserService queryParserService, MapperService mapperService, IndexAliasesService indexAliasesService, IndexCache indexCache, BytesReference source, @Nullable String[] filteringAliases, Engine.Operation.Origin origin, String... types) {
|
||||
long startTime = System.nanoTime();
|
||||
if (types == null) {
|
||||
types = Strings.EMPTY_ARRAY;
|
||||
}
|
||||
Query query;
|
||||
try {
|
||||
query = queryParserService.parseQuery(source).query();
|
||||
} catch (ParsingException ex) {
|
||||
// for BWC we try to parse directly the query since pre 1.0.0.Beta2 we didn't require a top level query field
|
||||
if (queryParserService.getIndexCreatedVersion().onOrBefore(Version.V_1_0_0_Beta2)) {
|
||||
try {
|
||||
XContentParser parser = XContentHelper.createParser(source);
|
||||
ParsedQuery parse = queryParserService.parse(parser);
|
||||
query = parse.query();
|
||||
} catch (Throwable t) {
|
||||
ex.addSuppressed(t);
|
||||
throw ex;
|
||||
}
|
||||
} else {
|
||||
throw ex;
|
||||
}
|
||||
}
|
||||
Query searchFilter = mapperService.searchFilter(types);
|
||||
if (searchFilter != null) {
|
||||
query = Queries.filtered(query, searchFilter);
|
||||
}
|
||||
|
||||
Query aliasFilter = indexAliasesService.aliasFilter(filteringAliases);
|
||||
BitSetProducer parentFilter = mapperService.hasNested() ? indexCache.bitsetFilterCache().getBitSetProducer(Queries.newNonNestedFilter()) : null;
|
||||
return new Engine.DeleteByQuery(query, source, filteringAliases, aliasFilter, parentFilter, origin, startTime, types);
|
||||
}
|
||||
|
||||
/**
|
||||
* Called once for every processed operation by this recovery performer.
|
||||
|
@ -23,11 +23,9 @@ import org.apache.lucene.index.Term;
|
||||
import org.apache.lucene.index.TwoPhaseCommit;
|
||||
import org.apache.lucene.store.AlreadyClosedException;
|
||||
import org.apache.lucene.util.Accountable;
|
||||
import org.apache.lucene.util.CollectionUtil;
|
||||
import org.apache.lucene.util.IOUtils;
|
||||
import org.apache.lucene.util.RamUsageEstimator;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.bytes.BytesArray;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
@ -38,7 +36,6 @@ import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.io.stream.Streamable;
|
||||
import org.elasticsearch.common.lease.Releasable;
|
||||
import org.elasticsearch.common.lease.Releasables;
|
||||
import org.elasticsearch.common.logging.ESLogger;
|
||||
import org.elasticsearch.common.lucene.uid.Versions;
|
||||
import org.elasticsearch.common.unit.ByteSizeValue;
|
||||
import org.elasticsearch.common.util.BigArrays;
|
||||
@ -54,7 +51,6 @@ import org.elasticsearch.threadpool.ThreadPool;
|
||||
|
||||
import java.io.Closeable;
|
||||
import java.io.EOFException;
|
||||
import java.io.FileNotFoundException;
|
||||
import java.io.IOException;
|
||||
import java.nio.channels.FileChannel;
|
||||
import java.nio.file.*;
|
||||
@ -189,99 +185,6 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* This method is used to upgarde a pre 2.0 translog structure to the new checkpoint based structure.
|
||||
* The {@link org.elasticsearch.index.translog.Translog.TranslogGeneration} in the given config is
|
||||
* used to determine the smallest file generation to upgrade. The procedure will travers the translog
|
||||
* directory to find all files that have a generation greater or equal to the translog generation and
|
||||
* renames the files to the new <tt>.tlog</tt> file format.
|
||||
* <p>
|
||||
* For each of the files a <tt>${filename}.ckp</tt>
|
||||
* file is written containing the size of the translog in bytes, it's ID and the number of operations. Since
|
||||
* these files are all relying on the pre 2.0 truncation feature where we read operations until hitting an {@link EOFException}
|
||||
* the number of operations are recoreded as <tt>-1</tt>. Later once these files are opened for reading legacy readers will
|
||||
* allow for unknown number of operations and mimic the old behavior.
|
||||
* </p>
|
||||
*/
|
||||
public static void upgradeLegacyTranslog(ESLogger logger, TranslogConfig config) throws IOException {
|
||||
Path translogPath = config.getTranslogPath();
|
||||
TranslogGeneration translogGeneration = config.getTranslogGeneration();
|
||||
if (translogGeneration == null) {
|
||||
throw new IllegalArgumentException("TranslogGeneration must be set in order to upgrade");
|
||||
}
|
||||
if (translogGeneration.translogUUID != null) {
|
||||
throw new IllegalArgumentException("TranslogGeneration has a non-null UUID - index must have already been upgraded");
|
||||
}
|
||||
try {
|
||||
if (Checkpoint.read(translogPath.resolve(CHECKPOINT_FILE_NAME)) != null) {
|
||||
throw new IllegalStateException(CHECKPOINT_FILE_NAME + " file already present, translog is already upgraded");
|
||||
}
|
||||
} catch (NoSuchFileException | FileNotFoundException ex) {
|
||||
logger.debug("upgrading translog - no checkpoint found");
|
||||
}
|
||||
final Pattern parseLegacyIdPattern = Pattern.compile("^" + TRANSLOG_FILE_PREFIX + "(\\d+)((\\.recovering))?$"); // here we have to be lenient - nowhere else!
|
||||
try (DirectoryStream<Path> stream = Files.newDirectoryStream(translogPath, new DirectoryStream.Filter<Path>() {
|
||||
@Override
|
||||
public boolean accept(Path entry) throws IOException {
|
||||
Matcher matcher = parseLegacyIdPattern.matcher(entry.getFileName().toString());
|
||||
if (matcher.matches() == false) {
|
||||
Matcher newIdMatcher = PARSE_STRICT_ID_PATTERN.matcher(entry.getFileName().toString());
|
||||
return newIdMatcher.matches();
|
||||
} else {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
})) {
|
||||
long latestGeneration = -1;
|
||||
List<PathWithGeneration> filesToUpgrade = new ArrayList<>();
|
||||
for (Path path : stream) {
|
||||
Matcher matcher = parseLegacyIdPattern.matcher(path.getFileName().toString());
|
||||
if (matcher.matches()) {
|
||||
long generation = Long.parseLong(matcher.group(1));
|
||||
if (generation >= translogGeneration.translogFileGeneration) {
|
||||
latestGeneration = Math.max(translogGeneration.translogFileGeneration, generation);
|
||||
}
|
||||
filesToUpgrade.add(new PathWithGeneration(path, generation));
|
||||
} else {
|
||||
Matcher strict_matcher = PARSE_STRICT_ID_PATTERN.matcher(path.getFileName().toString());
|
||||
if (strict_matcher.matches()) {
|
||||
throw new IllegalStateException("non-legacy translog file [" + path.getFileName().toString() + "] found on a translog that wasn't upgraded yet");
|
||||
}
|
||||
}
|
||||
}
|
||||
if (latestGeneration < translogGeneration.translogFileGeneration) {
|
||||
throw new IllegalStateException("latest found translog has a lower generation that the excepcted uncommitted " + translogGeneration.translogFileGeneration + " > " + latestGeneration);
|
||||
}
|
||||
CollectionUtil.timSort(filesToUpgrade, new Comparator<PathWithGeneration>() {
|
||||
@Override
|
||||
public int compare(PathWithGeneration o1, PathWithGeneration o2) {
|
||||
long gen1 = o1.getGeneration();
|
||||
long gen2 = o2.getGeneration();
|
||||
return Long.compare(gen1, gen2);
|
||||
}
|
||||
});
|
||||
for (PathWithGeneration pathAndGeneration : filesToUpgrade) {
|
||||
final Path path = pathAndGeneration.getPath();
|
||||
final long generation = pathAndGeneration.getGeneration();
|
||||
final Path target = path.resolveSibling(getFilename(generation));
|
||||
logger.debug("upgrading translog copy file from {} to {}", path, target);
|
||||
Files.move(path, target, StandardCopyOption.ATOMIC_MOVE);
|
||||
logger.debug("write commit point for {}", target);
|
||||
if (generation == latestGeneration) {
|
||||
// for the last one we only write a checkpoint not a real commit
|
||||
Checkpoint checkpoint = new Checkpoint(Files.size(translogPath.resolve(getFilename(latestGeneration))), -1, latestGeneration);
|
||||
Checkpoint.write(translogPath.resolve(CHECKPOINT_FILE_NAME), checkpoint, StandardOpenOption.WRITE, StandardOpenOption.CREATE_NEW);
|
||||
} else {
|
||||
Checkpoint checkpoint = new Checkpoint(Files.size(target), -1, generation);
|
||||
Checkpoint.write(translogPath.resolve(getCommitCheckpointFileName(generation)), checkpoint, StandardOpenOption.WRITE, StandardOpenOption.CREATE_NEW);
|
||||
}
|
||||
}
|
||||
|
||||
IOUtils.fsync(translogPath, true);
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
/** recover all translog files found on disk */
|
||||
private ArrayList<ImmutableTranslogReader> recoverFromFiles(TranslogGeneration translogGeneration, Checkpoint checkpoint) throws IOException {
|
||||
boolean success = false;
|
||||
@ -876,8 +779,7 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC
|
||||
@Deprecated
|
||||
CREATE((byte) 1),
|
||||
INDEX((byte) 2),
|
||||
DELETE((byte) 3),
|
||||
DELETE_BY_QUERY((byte) 4);
|
||||
DELETE((byte) 3);
|
||||
|
||||
private final byte id;
|
||||
|
||||
@ -897,8 +799,6 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC
|
||||
return INDEX;
|
||||
case 3:
|
||||
return DELETE;
|
||||
case 4:
|
||||
return DELETE_BY_QUERY;
|
||||
default:
|
||||
throw new IllegalArgumentException("No type mapped for [" + id + "]");
|
||||
}
|
||||
@ -1232,137 +1132,6 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC
|
||||
}
|
||||
}
|
||||
|
||||
/** @deprecated Delete-by-query is removed in 2.0, but we keep this so translog can replay on upgrade. */
|
||||
@Deprecated
|
||||
public static class DeleteByQuery implements Operation {
|
||||
|
||||
public static final int SERIALIZATION_FORMAT = 2;
|
||||
private BytesReference source;
|
||||
@Nullable
|
||||
private String[] filteringAliases;
|
||||
private String[] types = Strings.EMPTY_ARRAY;
|
||||
|
||||
public DeleteByQuery() {
|
||||
}
|
||||
|
||||
public DeleteByQuery(Engine.DeleteByQuery deleteByQuery) {
|
||||
this(deleteByQuery.source(), deleteByQuery.filteringAliases(), deleteByQuery.types());
|
||||
}
|
||||
|
||||
public DeleteByQuery(BytesReference source, String[] filteringAliases, String... types) {
|
||||
this.source = source;
|
||||
this.types = types == null ? Strings.EMPTY_ARRAY : types;
|
||||
this.filteringAliases = filteringAliases;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Type opType() {
|
||||
return Type.DELETE_BY_QUERY;
|
||||
}
|
||||
|
||||
@Override
|
||||
public long estimateSize() {
|
||||
return source.length() + 8;
|
||||
}
|
||||
|
||||
public BytesReference source() {
|
||||
return this.source;
|
||||
}
|
||||
|
||||
public String[] filteringAliases() {
|
||||
return filteringAliases;
|
||||
}
|
||||
|
||||
public String[] types() {
|
||||
return this.types;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Source getSource() {
|
||||
throw new IllegalStateException("trying to read doc source from delete_by_query operation");
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
int version = in.readVInt(); // version
|
||||
source = in.readBytesReference();
|
||||
if (version < 2) {
|
||||
// for query_parser_name, which was removed
|
||||
if (in.readBoolean()) {
|
||||
in.readString();
|
||||
}
|
||||
}
|
||||
int typesSize = in.readVInt();
|
||||
if (typesSize > 0) {
|
||||
types = new String[typesSize];
|
||||
for (int i = 0; i < typesSize; i++) {
|
||||
types[i] = in.readString();
|
||||
}
|
||||
}
|
||||
if (version >= 1) {
|
||||
int aliasesSize = in.readVInt();
|
||||
if (aliasesSize > 0) {
|
||||
filteringAliases = new String[aliasesSize];
|
||||
for (int i = 0; i < aliasesSize; i++) {
|
||||
filteringAliases[i] = in.readString();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
out.writeVInt(SERIALIZATION_FORMAT);
|
||||
out.writeBytesReference(source);
|
||||
out.writeVInt(types.length);
|
||||
for (String type : types) {
|
||||
out.writeString(type);
|
||||
}
|
||||
if (filteringAliases != null) {
|
||||
out.writeVInt(filteringAliases.length);
|
||||
for (String alias : filteringAliases) {
|
||||
out.writeString(alias);
|
||||
}
|
||||
} else {
|
||||
out.writeVInt(0);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
if (this == o) {
|
||||
return true;
|
||||
}
|
||||
if (o == null || getClass() != o.getClass()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
DeleteByQuery that = (DeleteByQuery) o;
|
||||
|
||||
if (!Arrays.equals(filteringAliases, that.filteringAliases)) {
|
||||
return false;
|
||||
}
|
||||
if (!Arrays.equals(types, that.types)) {
|
||||
return false;
|
||||
}
|
||||
return source.equals(that.source);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
int result = source.hashCode();
|
||||
result = 31 * result + (filteringAliases != null ? Arrays.hashCode(filteringAliases) : 0);
|
||||
result = 31 * result + Arrays.hashCode(types);
|
||||
return result;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "DeleteByQuery{" +
|
||||
"types=" + Arrays.toString(types) +
|
||||
'}';
|
||||
}
|
||||
}
|
||||
|
||||
public enum Durabilty {
|
||||
/**
|
||||
@ -1478,8 +1247,6 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC
|
||||
return new Index();
|
||||
case DELETE:
|
||||
return new Translog.Delete();
|
||||
case DELETE_BY_QUERY:
|
||||
return new Translog.DeleteByQuery();
|
||||
case INDEX:
|
||||
return new Index();
|
||||
default:
|
||||
@ -1589,10 +1356,6 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC
|
||||
return current.getFirstOperationOffset();
|
||||
}
|
||||
|
||||
List<ImmutableTranslogReader> getRecoveredReaders() { // for testing
|
||||
return this.recoveredTranslogs;
|
||||
}
|
||||
|
||||
private void ensureOpen() {
|
||||
if (closed.get()) {
|
||||
throw new AlreadyClosedException("translog is already closed");
|
||||
@ -1606,21 +1369,4 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC
|
||||
return outstandingViews.size();
|
||||
}
|
||||
|
||||
private static class PathWithGeneration {
|
||||
private final Path path;
|
||||
private final long generation;
|
||||
|
||||
public PathWithGeneration(Path path, long generation) {
|
||||
this.path = path;
|
||||
this.generation = generation;
|
||||
}
|
||||
|
||||
public Path getPath() {
|
||||
return path;
|
||||
}
|
||||
|
||||
public long getGeneration() {
|
||||
return generation;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1,3 +1 @@
|
||||
org.elasticsearch.index.codec.postingsformat.Elasticsearch090PostingsFormat
|
||||
org.elasticsearch.search.suggest.completion.Completion090PostingsFormat
|
||||
org.elasticsearch.index.codec.postingsformat.BloomFilterPostingsFormat
|
||||
org.elasticsearch.search.suggest.completion.Completion090PostingsFormat
|
@ -246,7 +246,7 @@ public class OldIndexBackwardsCompatibilityIT extends ESIntegTestCase {
|
||||
SortedSet<String> expectedVersions = new TreeSet<>();
|
||||
for (Version v : VersionUtils.allVersions()) {
|
||||
if (v.snapshot()) continue; // snapshots are unreleased, so there is no backcompat yet
|
||||
if (v.onOrBefore(Version.V_0_20_6)) continue; // we can only test back one major lucene version
|
||||
if (v.onOrBefore(Version.V_2_0_0_beta1)) continue; // we can only test back one major lucene version
|
||||
if (v.equals(Version.CURRENT)) continue; // the current version is always compatible with itself
|
||||
expectedVersions.add("index-" + v.toString() + ".zip");
|
||||
}
|
||||
@ -312,7 +312,7 @@ public class OldIndexBackwardsCompatibilityIT extends ESIntegTestCase {
|
||||
client().admin().indices().prepareOpen(indexName).get();
|
||||
fail("Shouldn't be able to open an old index");
|
||||
} catch (IllegalStateException ex) {
|
||||
assertThat(ex.getMessage(), containsString("was created before v0.90.0 and wasn't upgraded"));
|
||||
assertThat(ex.getMessage(), containsString("was created before v2.0.0.beta1 and wasn't upgraded"));
|
||||
}
|
||||
unloadIndex(indexName);
|
||||
logger.info("--> Done testing " + index + ", took " + ((System.currentTimeMillis() - startTime) / 1000.0) + " seconds");
|
||||
|
@ -1,77 +0,0 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.index.codec.postingformat;
|
||||
|
||||
import org.apache.lucene.codecs.FieldsConsumer;
|
||||
import org.apache.lucene.codecs.PostingsFormat;
|
||||
import org.apache.lucene.index.Fields;
|
||||
import org.apache.lucene.index.FilterLeafReader;
|
||||
import org.apache.lucene.index.SegmentWriteState;
|
||||
import org.elasticsearch.common.util.BloomFilter;
|
||||
import org.elasticsearch.index.codec.postingsformat.BloomFilterPostingsFormat;
|
||||
import org.elasticsearch.index.codec.postingsformat.BloomFilterPostingsFormat.BloomFilteredFieldsConsumer;
|
||||
import org.elasticsearch.index.codec.postingsformat.Elasticsearch090PostingsFormat;
|
||||
import org.elasticsearch.index.mapper.internal.UidFieldMapper;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Collections;
|
||||
import java.util.Iterator;
|
||||
import java.util.stream.StreamSupport;
|
||||
|
||||
/** read-write version with blooms for testing */
|
||||
public class Elasticsearch090RWPostingsFormat extends Elasticsearch090PostingsFormat {
|
||||
@Override
|
||||
public FieldsConsumer fieldsConsumer(SegmentWriteState state) throws IOException {
|
||||
final PostingsFormat delegate = getDefaultWrapped();
|
||||
final BloomFilteredFieldsConsumer fieldsConsumer = new BloomFilterPostingsFormat(delegate, BloomFilter.Factory.DEFAULT) {
|
||||
@Override
|
||||
public BloomFilteredFieldsConsumer fieldsConsumer(SegmentWriteState state) throws IOException {
|
||||
return new BloomFilteredFieldsConsumer(delegate.fieldsConsumer(state), state,delegate);
|
||||
}
|
||||
}.fieldsConsumer(state);
|
||||
return new FieldsConsumer() {
|
||||
|
||||
@Override
|
||||
public void write(Fields fields) throws IOException {
|
||||
|
||||
Fields maskedFields = new FilterLeafReader.FilterFields(fields) {
|
||||
@Override
|
||||
public Iterator<String> iterator() {
|
||||
return StreamSupport.stream(this.in.spliterator(), false).filter(UID_FIELD_FILTER.negate()).iterator();
|
||||
}
|
||||
};
|
||||
fieldsConsumer.getDelegate().write(maskedFields);
|
||||
maskedFields = new FilterLeafReader.FilterFields(fields) {
|
||||
@Override
|
||||
public Iterator<String> iterator() {
|
||||
return Collections.singleton(UidFieldMapper.NAME).iterator();
|
||||
}
|
||||
};
|
||||
// only go through bloom for the UID field
|
||||
fieldsConsumer.write(maskedFields);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() throws IOException {
|
||||
fieldsConsumer.close();
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
@ -1,44 +0,0 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.index.codec.postingformat;
|
||||
|
||||
import com.carrotsearch.randomizedtesting.annotations.Listeners;
|
||||
import com.carrotsearch.randomizedtesting.annotations.TimeoutSuite;
|
||||
import org.apache.lucene.codecs.Codec;
|
||||
import org.apache.lucene.index.BasePostingsFormatTestCase;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.apache.lucene.util.TestUtil;
|
||||
import org.apache.lucene.util.TimeUnits;
|
||||
import org.elasticsearch.test.junit.listeners.ReproduceInfoPrinter;
|
||||
|
||||
/** Runs elasticsearch postings format against lucene's standard postings format tests */
|
||||
@Listeners({
|
||||
ReproduceInfoPrinter.class
|
||||
})
|
||||
@TimeoutSuite(millis = TimeUnits.HOUR)
|
||||
@LuceneTestCase.SuppressSysoutChecks(bugUrl = "we log a lot on purpose")
|
||||
public class PostingsFormatTests extends BasePostingsFormatTestCase {
|
||||
|
||||
@Override
|
||||
protected Codec getCodec() {
|
||||
return TestUtil.alwaysPostingsFormat(new Elasticsearch090RWPostingsFormat());
|
||||
}
|
||||
|
||||
}
|
@ -1686,10 +1686,6 @@ public class InternalEngineTests extends ESTestCase {
|
||||
Collections.shuffle(indexes, random());
|
||||
for (Path indexFile : indexes.subList(0, scaledRandomIntBetween(1, indexes.size() / 2))) {
|
||||
final String indexName = indexFile.getFileName().toString().replace(".zip", "").toLowerCase(Locale.ROOT);
|
||||
Version version = Version.fromString(indexName.replace("index-", ""));
|
||||
if (version.onOrAfter(Version.V_2_0_0_beta1)) {
|
||||
continue;
|
||||
}
|
||||
Path unzipDir = createTempDir();
|
||||
Path unzipDataDir = unzipDir.resolve("data");
|
||||
// decompress the index
|
||||
@ -1709,11 +1705,9 @@ public class InternalEngineTests extends ESTestCase {
|
||||
assertTrue("[" + indexFile + "] missing index dir: " + src.toString(), Files.exists(src));
|
||||
assertTrue("[" + indexFile + "] missing translog dir: " + translog.toString(), Files.exists(translog));
|
||||
Path[] tlogFiles = filterExtraFSFiles(FileSystemUtils.files(translog));
|
||||
assertEquals(Arrays.toString(tlogFiles), tlogFiles.length, 1);
|
||||
assertEquals(Arrays.toString(tlogFiles), tlogFiles.length, 2); // ckp & tlog
|
||||
Path tlogFile = tlogFiles[0].getFileName().toString().endsWith("tlog") ? tlogFiles[0] : tlogFiles[1];
|
||||
final long size = Files.size(tlogFiles[0]);
|
||||
|
||||
final long generation = TranslogTests.parseLegacyTranslogFile(tlogFiles[0]);
|
||||
assertTrue(generation >= 1);
|
||||
logger.debug("upgrading index {} file: {} size: {}", indexName, tlogFiles[0].getFileName(), size);
|
||||
Directory directory = newFSDirectory(src.resolve("0").resolve("index"));
|
||||
Store store = createStore(directory);
|
||||
@ -1870,7 +1864,7 @@ public class InternalEngineTests extends ESTestCase {
|
||||
public final AtomicInteger recoveredOps = new AtomicInteger(0);
|
||||
|
||||
public TranslogHandler(String indexName, ESLogger logger) {
|
||||
super(new ShardId("test", 0), null, null, null, null, logger);
|
||||
super(new ShardId("test", 0), null, logger);
|
||||
Settings settings = Settings.settingsBuilder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT).build();
|
||||
RootObjectMapper.Builder rootBuilder = new RootObjectMapper.Builder("test");
|
||||
Index index = new Index(indexName);
|
||||
@ -1892,13 +1886,6 @@ public class InternalEngineTests extends ESTestCase {
|
||||
protected void operationProcessed() {
|
||||
recoveredOps.incrementAndGet();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void performRecoveryOperation(Engine engine, Translog.Operation operation, boolean allowMappingUpdates) {
|
||||
if (operation.opType() != Translog.Operation.Type.DELETE_BY_QUERY) { // we don't support del by query in this test
|
||||
super.performRecoveryOperation(engine, operation, allowMappingUpdates);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public void testRecoverFromForeignTranslog() throws IOException {
|
||||
|
@ -399,35 +399,6 @@ public class IndexShardTests extends ESSingleNodeTestCase {
|
||||
assertEquals(durabilty, shard.getTranslogDurability());
|
||||
}
|
||||
|
||||
public void testDeleteByQueryBWC() {
|
||||
Version version = VersionUtils.randomVersion(random());
|
||||
assertAcked(client().admin().indices().prepareCreate("test")
|
||||
.setSettings(SETTING_NUMBER_OF_SHARDS, 1, SETTING_NUMBER_OF_REPLICAS, 0, IndexMetaData.SETTING_VERSION_CREATED, version.id));
|
||||
ensureGreen("test");
|
||||
client().prepareIndex("test", "person").setSource("{ \"user\" : \"kimchy\" }").get();
|
||||
|
||||
IndicesService indicesService = getInstanceFromNode(IndicesService.class);
|
||||
IndexService test = indicesService.indexService("test");
|
||||
IndexShard shard = test.getShardOrNull(0);
|
||||
int numDocs = 1;
|
||||
shard.state = IndexShardState.RECOVERING;
|
||||
try {
|
||||
shard.recoveryState().getTranslog().totalOperations(1);
|
||||
shard.getEngine().config().getTranslogRecoveryPerformer().performRecoveryOperation(shard.getEngine(), new Translog.DeleteByQuery(new Engine.DeleteByQuery(null, new BytesArray("{\"term\" : { \"user\" : \"kimchy\" }}"), null, null, null, Engine.Operation.Origin.RECOVERY, 0, "person")), false);
|
||||
assertTrue(version.onOrBefore(Version.V_1_0_0_Beta2));
|
||||
numDocs = 0;
|
||||
} catch (ParsingException ex) {
|
||||
assertTrue(version.after(Version.V_1_0_0_Beta2));
|
||||
} finally {
|
||||
shard.state = IndexShardState.STARTED;
|
||||
}
|
||||
shard.getEngine().refresh("foo");
|
||||
|
||||
try (Engine.Searcher searcher = shard.getEngine().acquireSearcher("foo")) {
|
||||
assertEquals(numDocs, searcher.reader().numDocs());
|
||||
}
|
||||
}
|
||||
|
||||
public void testMinimumCompatVersion() {
|
||||
Version versionCreated = VersionUtils.randomVersion(random());
|
||||
assertAcked(client().admin().indices().prepareCreate("test")
|
||||
|
@ -488,9 +488,6 @@ public class TranslogTests extends ESTestCase {
|
||||
1 + randomInt(100000),
|
||||
randomFrom(VersionType.values()));
|
||||
break;
|
||||
case DELETE_BY_QUERY:
|
||||
// deprecated
|
||||
continue;
|
||||
default:
|
||||
throw new ElasticsearchException("not supported op type");
|
||||
}
|
||||
@ -695,9 +692,6 @@ public class TranslogTests extends ESTestCase {
|
||||
case DELETE:
|
||||
op = new Translog.Delete(newUid("" + id));
|
||||
break;
|
||||
case DELETE_BY_QUERY:
|
||||
// deprecated
|
||||
continue;
|
||||
default:
|
||||
throw new ElasticsearchException("unknown type");
|
||||
}
|
||||
@ -1139,147 +1133,4 @@ public class TranslogTests extends ESTestCase {
|
||||
assertNull(snapshot.next());
|
||||
}
|
||||
}
|
||||
|
||||
public void testUpgradeOldTranslogFiles() throws IOException {
|
||||
List<Path> indexes = new ArrayList<>();
|
||||
try (DirectoryStream<Path> stream = Files.newDirectoryStream(getBwcIndicesPath(), "index-*.zip")) {
|
||||
for (Path path : stream) {
|
||||
indexes.add(path);
|
||||
}
|
||||
}
|
||||
TranslogConfig config = this.translog.getConfig();
|
||||
Translog.TranslogGeneration gen = translog.getGeneration();
|
||||
this.translog.close();
|
||||
try {
|
||||
Translog.upgradeLegacyTranslog(logger, translog.getConfig());
|
||||
fail("no generation set");
|
||||
} catch (IllegalArgumentException ex) {
|
||||
|
||||
}
|
||||
translog.getConfig().setTranslogGeneration(gen);
|
||||
try {
|
||||
Translog.upgradeLegacyTranslog(logger, translog.getConfig());
|
||||
fail("already upgraded generation set");
|
||||
} catch (IllegalArgumentException ex) {
|
||||
|
||||
}
|
||||
|
||||
for (Path indexFile : indexes) {
|
||||
final String indexName = indexFile.getFileName().toString().replace(".zip", "").toLowerCase(Locale.ROOT);
|
||||
Version version = Version.fromString(indexName.replace("index-", ""));
|
||||
if (version.onOrAfter(Version.V_2_0_0_beta1)) {
|
||||
continue;
|
||||
}
|
||||
Path unzipDir = createTempDir();
|
||||
Path unzipDataDir = unzipDir.resolve("data");
|
||||
// decompress the index
|
||||
try (InputStream stream = Files.newInputStream(indexFile)) {
|
||||
TestUtil.unzip(stream, unzipDir);
|
||||
}
|
||||
// check it is unique
|
||||
assertTrue(Files.exists(unzipDataDir));
|
||||
Path[] list = FileSystemUtils.files(unzipDataDir);
|
||||
if (list.length != 1) {
|
||||
throw new IllegalStateException("Backwards index must contain exactly one cluster but was " + list.length);
|
||||
}
|
||||
// the bwc scripts packs the indices under this path
|
||||
Path src = list[0].resolve("nodes/0/indices/" + indexName);
|
||||
Path translog = list[0].resolve("nodes/0/indices/" + indexName).resolve("0").resolve("translog");
|
||||
|
||||
assertTrue("[" + indexFile + "] missing index dir: " + src.toString(), Files.exists(src));
|
||||
assertTrue("[" + indexFile + "] missing translog dir: " + translog.toString(), Files.exists(translog));
|
||||
Path[] tlogFiles = FileSystemUtils.files(translog);
|
||||
assertEquals(tlogFiles.length, 1);
|
||||
final long size = Files.size(tlogFiles[0]);
|
||||
|
||||
final long generation = parseLegacyTranslogFile(tlogFiles[0]);
|
||||
assertTrue(generation >= 1);
|
||||
logger.info("upgrading index {} file: {} size: {}", indexName, tlogFiles[0].getFileName(), size);
|
||||
TranslogConfig upgradeConfig = new TranslogConfig(config.getShardId(), translog, config.getIndexSettings(), config.getDurabilty(), config.getBigArrays(), config.getThreadPool());
|
||||
upgradeConfig.setTranslogGeneration(new Translog.TranslogGeneration(null, generation));
|
||||
Translog.upgradeLegacyTranslog(logger, upgradeConfig);
|
||||
try (Translog upgraded = new Translog(upgradeConfig)) {
|
||||
assertEquals(generation + 1, upgraded.getGeneration().translogFileGeneration);
|
||||
assertEquals(upgraded.getRecoveredReaders().size(), 1);
|
||||
final long headerSize;
|
||||
if (version.before(Version.V_1_4_0_Beta1)) {
|
||||
assertTrue(upgraded.getRecoveredReaders().get(0).getClass().toString(), upgraded.getRecoveredReaders().get(0).getClass() == LegacyTranslogReader.class);
|
||||
headerSize = 0;
|
||||
} else {
|
||||
assertTrue(upgraded.getRecoveredReaders().get(0).getClass().toString(), upgraded.getRecoveredReaders().get(0).getClass() == LegacyTranslogReaderBase.class);
|
||||
headerSize = CodecUtil.headerLength(TranslogWriter.TRANSLOG_CODEC);
|
||||
}
|
||||
List<Translog.Operation> operations = new ArrayList<>();
|
||||
try (Translog.Snapshot snapshot = upgraded.newSnapshot()) {
|
||||
Translog.Operation op = null;
|
||||
while ((op = snapshot.next()) != null) {
|
||||
operations.add(op);
|
||||
}
|
||||
}
|
||||
if (size > headerSize) {
|
||||
assertFalse(operations.toString(), operations.isEmpty());
|
||||
} else {
|
||||
assertTrue(operations.toString(), operations.isEmpty());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* this tests a set of files that has some of the operations flushed with a buffered translog such that tlogs are truncated.
|
||||
* 3 of the 6 files are created with ES 1.3 and the rest is created wiht ES 1.4 such that both the checksummed as well as the
|
||||
* super old version of the translog without a header is tested.
|
||||
*/
|
||||
public void testOpenAndReadTruncatedLegacyTranslogs() throws IOException {
|
||||
Path zip = getDataPath("/org/elasticsearch/index/translog/legacy_translogs.zip");
|
||||
Path unzipDir = createTempDir();
|
||||
try (InputStream stream = Files.newInputStream(zip)) {
|
||||
TestUtil.unzip(stream, unzipDir);
|
||||
}
|
||||
TranslogConfig config = this.translog.getConfig();
|
||||
int count = 0;
|
||||
try (DirectoryStream<Path> stream = Files.newDirectoryStream(unzipDir)) {
|
||||
|
||||
for (Path legacyTranslog : stream) {
|
||||
logger.debug("upgrading {} ", legacyTranslog.getFileName());
|
||||
Path directory = legacyTranslog.resolveSibling("translog_" + count++);
|
||||
Files.createDirectories(directory);
|
||||
Files.copy(legacyTranslog, directory.resolve(legacyTranslog.getFileName()));
|
||||
TranslogConfig upgradeConfig = new TranslogConfig(config.getShardId(), directory, config.getIndexSettings(), config.getDurabilty(), config.getBigArrays(), config.getThreadPool());
|
||||
try {
|
||||
Translog.upgradeLegacyTranslog(logger, upgradeConfig);
|
||||
fail("no generation set");
|
||||
} catch (IllegalArgumentException ex) {
|
||||
// expected
|
||||
}
|
||||
long generation = parseLegacyTranslogFile(legacyTranslog);
|
||||
upgradeConfig.setTranslogGeneration(new Translog.TranslogGeneration(null, generation));
|
||||
Translog.upgradeLegacyTranslog(logger, upgradeConfig);
|
||||
try (Translog tlog = new Translog(upgradeConfig)) {
|
||||
List<Translog.Operation> operations = new ArrayList<>();
|
||||
try (Translog.Snapshot snapshot = tlog.newSnapshot()) {
|
||||
Translog.Operation op = null;
|
||||
while ((op = snapshot.next()) != null) {
|
||||
operations.add(op);
|
||||
}
|
||||
}
|
||||
logger.debug("num ops recovered: {} for file {} ", operations.size(), legacyTranslog.getFileName());
|
||||
assertFalse(operations.isEmpty());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public static long parseLegacyTranslogFile(Path translogFile) {
|
||||
final String fileName = translogFile.getFileName().toString();
|
||||
final Matcher matcher = PARSE_LEGACY_ID_PATTERN.matcher(fileName);
|
||||
if (matcher.matches()) {
|
||||
try {
|
||||
return Long.parseLong(matcher.group(1));
|
||||
} catch (NumberFormatException e) {
|
||||
throw new IllegalStateException("number formatting issue in a file that passed PARSE_STRICT_ID_PATTERN: " + fileName + "]", e);
|
||||
}
|
||||
}
|
||||
throw new IllegalArgumentException("can't parse id from file: " + fileName);
|
||||
}
|
||||
}
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user