mirror of
https://github.com/honeymoose/OpenSearch.git
synced 2025-02-17 10:25:15 +00:00
* Some Cleanup in o.e.i.engine * Remove dead code and parameters * Reduce visibility in some obvious spots * Add missing `assert`s (not that important here since the methods themselves will probably be dead-code eliminated) but still
This commit is contained in:
parent
d2cd36bd9f
commit
a5ca20a250
@ -911,7 +911,7 @@ public abstract class Engine implements Closeable {
|
|||||||
map.put(extension, length);
|
map.put(extension, length);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (useCompoundFile && directory != null) {
|
if (useCompoundFile) {
|
||||||
try {
|
try {
|
||||||
directory.close();
|
directory.close();
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
@ -954,8 +954,7 @@ public abstract class Engine implements Closeable {
|
|||||||
|
|
||||||
// now, correlate or add the committed ones...
|
// now, correlate or add the committed ones...
|
||||||
if (lastCommittedSegmentInfos != null) {
|
if (lastCommittedSegmentInfos != null) {
|
||||||
SegmentInfos infos = lastCommittedSegmentInfos;
|
for (SegmentCommitInfo info : lastCommittedSegmentInfos) {
|
||||||
for (SegmentCommitInfo info : infos) {
|
|
||||||
Segment segment = segments.get(info.info.name);
|
Segment segment = segments.get(info.info.name);
|
||||||
if (segment == null) {
|
if (segment == null) {
|
||||||
segment = new Segment(info.info.name);
|
segment = new Segment(info.info.name);
|
||||||
@ -1783,11 +1782,8 @@ public abstract class Engine implements Closeable {
|
|||||||
|
|
||||||
CommitId commitId = (CommitId) o;
|
CommitId commitId = (CommitId) o;
|
||||||
|
|
||||||
if (!Arrays.equals(id, commitId.id)) {
|
return Arrays.equals(id, commitId.id);
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
return true;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
@ -563,7 +563,7 @@ public class InternalEngine extends Engine {
|
|||||||
/**
|
/**
|
||||||
* Reads the current stored history ID from the IW commit data.
|
* Reads the current stored history ID from the IW commit data.
|
||||||
*/
|
*/
|
||||||
private String loadHistoryUUID(final IndexWriter writer) throws IOException {
|
private String loadHistoryUUID(final IndexWriter writer) {
|
||||||
final String uuid = commitDataAsMap(writer).get(HISTORY_UUID_KEY);
|
final String uuid = commitDataAsMap(writer).get(HISTORY_UUID_KEY);
|
||||||
if (uuid == null) {
|
if (uuid == null) {
|
||||||
throw new IllegalStateException("commit doesn't contain history uuid");
|
throw new IllegalStateException("commit doesn't contain history uuid");
|
||||||
@ -635,9 +635,8 @@ public class InternalEngine extends Engine {
|
|||||||
if (operation != null) {
|
if (operation != null) {
|
||||||
// in the case of a already pruned translog generation we might get null here - yet very unlikely
|
// in the case of a already pruned translog generation we might get null here - yet very unlikely
|
||||||
final Translog.Index index = (Translog.Index) operation;
|
final Translog.Index index = (Translog.Index) operation;
|
||||||
TranslogLeafReader reader = new TranslogLeafReader(index, engineConfig
|
TranslogLeafReader reader = new TranslogLeafReader(index);
|
||||||
.getIndexSettings().getIndexVersionCreated());
|
return new GetResult(new Searcher("realtime_get", new IndexSearcher(reader), reader),
|
||||||
return new GetResult(new Searcher("realtime_get", new IndexSearcher(reader), reader::close),
|
|
||||||
new VersionsAndSeqNoResolver.DocIdAndVersion(0, index.version(), index.seqNo(), index.primaryTerm(),
|
new VersionsAndSeqNoResolver.DocIdAndVersion(0, index.version(), index.seqNo(), index.primaryTerm(),
|
||||||
reader, 0));
|
reader, 0));
|
||||||
}
|
}
|
||||||
@ -756,7 +755,7 @@ public class InternalEngine extends Engine {
|
|||||||
+ index.getAutoGeneratedIdTimestamp();
|
+ index.getAutoGeneratedIdTimestamp();
|
||||||
switch (index.origin()) {
|
switch (index.origin()) {
|
||||||
case PRIMARY:
|
case PRIMARY:
|
||||||
assertPrimaryCanOptimizeAddDocument(index);
|
assert assertPrimaryCanOptimizeAddDocument(index);
|
||||||
return true;
|
return true;
|
||||||
case PEER_RECOVERY:
|
case PEER_RECOVERY:
|
||||||
case REPLICA:
|
case REPLICA:
|
||||||
@ -782,7 +781,7 @@ public class InternalEngine extends Engine {
|
|||||||
|
|
||||||
private boolean assertIncomingSequenceNumber(final Engine.Operation.Origin origin, final long seqNo) {
|
private boolean assertIncomingSequenceNumber(final Engine.Operation.Origin origin, final long seqNo) {
|
||||||
if (origin == Operation.Origin.PRIMARY) {
|
if (origin == Operation.Origin.PRIMARY) {
|
||||||
assertPrimaryIncomingSequenceNumber(origin, seqNo);
|
assert assertPrimaryIncomingSequenceNumber(origin, seqNo);
|
||||||
} else {
|
} else {
|
||||||
// sequence number should be set when operation origin is not primary
|
// sequence number should be set when operation origin is not primary
|
||||||
assert seqNo >= 0 : "recovery or replica ops should have an assigned seq no.; origin: " + origin;
|
assert seqNo >= 0 : "recovery or replica ops should have an assigned seq no.; origin: " + origin;
|
||||||
@ -923,7 +922,7 @@ public class InternalEngine extends Engine {
|
|||||||
}
|
}
|
||||||
|
|
||||||
protected final IndexingStrategy planIndexingAsNonPrimary(Index index) throws IOException {
|
protected final IndexingStrategy planIndexingAsNonPrimary(Index index) throws IOException {
|
||||||
assertNonPrimaryOrigin(index);
|
assert assertNonPrimaryOrigin(index);
|
||||||
final IndexingStrategy plan;
|
final IndexingStrategy plan;
|
||||||
final boolean appendOnlyRequest = canOptimizeAddDocument(index);
|
final boolean appendOnlyRequest = canOptimizeAddDocument(index);
|
||||||
if (appendOnlyRequest && mayHaveBeenIndexedBefore(index) == false && index.seqNo() > maxSeqNoOfNonAppendOnlyOperations.get()) {
|
if (appendOnlyRequest && mayHaveBeenIndexedBefore(index) == false && index.seqNo() > maxSeqNoOfNonAppendOnlyOperations.get()) {
|
||||||
@ -978,13 +977,13 @@ public class InternalEngine extends Engine {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
protected final IndexingStrategy planIndexingAsPrimary(Index index) throws IOException {
|
private IndexingStrategy planIndexingAsPrimary(Index index) throws IOException {
|
||||||
assert index.origin() == Operation.Origin.PRIMARY : "planing as primary but origin isn't. got " + index.origin();
|
assert index.origin() == Operation.Origin.PRIMARY : "planing as primary but origin isn't. got " + index.origin();
|
||||||
final IndexingStrategy plan;
|
final IndexingStrategy plan;
|
||||||
// resolve an external operation into an internal one which is safe to replay
|
// resolve an external operation into an internal one which is safe to replay
|
||||||
if (canOptimizeAddDocument(index)) {
|
if (canOptimizeAddDocument(index)) {
|
||||||
if (mayHaveBeenIndexedBefore(index)) {
|
if (mayHaveBeenIndexedBefore(index)) {
|
||||||
plan = IndexingStrategy.overrideExistingAsIfNotThere(1L);
|
plan = IndexingStrategy.overrideExistingAsIfNotThere();
|
||||||
versionMap.enforceSafeAccess();
|
versionMap.enforceSafeAccess();
|
||||||
} else {
|
} else {
|
||||||
plan = IndexingStrategy.optimizedAppendOnly(1L);
|
plan = IndexingStrategy.optimizedAppendOnly(1L);
|
||||||
@ -1006,7 +1005,7 @@ public class InternalEngine extends Engine {
|
|||||||
if (index.getIfSeqNo() != SequenceNumbers.UNASSIGNED_SEQ_NO && versionValue == null) {
|
if (index.getIfSeqNo() != SequenceNumbers.UNASSIGNED_SEQ_NO && versionValue == null) {
|
||||||
final VersionConflictEngineException e = new VersionConflictEngineException(shardId, index.id(),
|
final VersionConflictEngineException e = new VersionConflictEngineException(shardId, index.id(),
|
||||||
index.getIfSeqNo(), index.getIfPrimaryTerm(), SequenceNumbers.UNASSIGNED_SEQ_NO, 0);
|
index.getIfSeqNo(), index.getIfPrimaryTerm(), SequenceNumbers.UNASSIGNED_SEQ_NO, 0);
|
||||||
plan = IndexingStrategy.skipDueToVersionConflict(e, currentNotFoundOrDeleted, currentVersion, getPrimaryTerm());
|
plan = IndexingStrategy.skipDueToVersionConflict(e, true, currentVersion, getPrimaryTerm());
|
||||||
} else if (index.getIfSeqNo() != SequenceNumbers.UNASSIGNED_SEQ_NO && (
|
} else if (index.getIfSeqNo() != SequenceNumbers.UNASSIGNED_SEQ_NO && (
|
||||||
versionValue.seqNo != index.getIfSeqNo() || versionValue.term != index.getIfPrimaryTerm()
|
versionValue.seqNo != index.getIfSeqNo() || versionValue.term != index.getIfPrimaryTerm()
|
||||||
)) {
|
)) {
|
||||||
@ -1164,9 +1163,9 @@ public class InternalEngine extends Engine {
|
|||||||
true, false, versionForIndexing, null);
|
true, false, versionForIndexing, null);
|
||||||
}
|
}
|
||||||
|
|
||||||
static IndexingStrategy overrideExistingAsIfNotThere(long versionForIndexing) {
|
static IndexingStrategy overrideExistingAsIfNotThere() {
|
||||||
return new IndexingStrategy(true, true, true,
|
return new IndexingStrategy(true, true, true,
|
||||||
false, versionForIndexing, null);
|
false, 1L, null);
|
||||||
}
|
}
|
||||||
|
|
||||||
public static IndexingStrategy processButSkipLucene(boolean currentNotFoundOrDeleted, long versionForIndexing) {
|
public static IndexingStrategy processButSkipLucene(boolean currentNotFoundOrDeleted, long versionForIndexing) {
|
||||||
@ -1285,7 +1284,7 @@ public class InternalEngine extends Engine {
|
|||||||
}
|
}
|
||||||
|
|
||||||
protected final DeletionStrategy planDeletionAsNonPrimary(Delete delete) throws IOException {
|
protected final DeletionStrategy planDeletionAsNonPrimary(Delete delete) throws IOException {
|
||||||
assertNonPrimaryOrigin(delete);
|
assert assertNonPrimaryOrigin(delete);
|
||||||
maxSeqNoOfNonAppendOnlyOperations.updateAndGet(curr -> Math.max(delete.seqNo(), curr));
|
maxSeqNoOfNonAppendOnlyOperations.updateAndGet(curr -> Math.max(delete.seqNo(), curr));
|
||||||
assert maxSeqNoOfNonAppendOnlyOperations.get() >= delete.seqNo() : "max_seqno of non-append-only was not updated;" +
|
assert maxSeqNoOfNonAppendOnlyOperations.get() >= delete.seqNo() : "max_seqno of non-append-only was not updated;" +
|
||||||
"max_seqno non-append-only [" + maxSeqNoOfNonAppendOnlyOperations.get() + "], seqno of delete [" + delete.seqNo() + "]";
|
"max_seqno non-append-only [" + maxSeqNoOfNonAppendOnlyOperations.get() + "], seqno of delete [" + delete.seqNo() + "]";
|
||||||
@ -1305,7 +1304,7 @@ public class InternalEngine extends Engine {
|
|||||||
} else {
|
} else {
|
||||||
final OpVsLuceneDocStatus opVsLucene = compareOpToLuceneDocBasedOnSeqNo(delete);
|
final OpVsLuceneDocStatus opVsLucene = compareOpToLuceneDocBasedOnSeqNo(delete);
|
||||||
if (opVsLucene == OpVsLuceneDocStatus.OP_STALE_OR_EQUAL) {
|
if (opVsLucene == OpVsLuceneDocStatus.OP_STALE_OR_EQUAL) {
|
||||||
plan = DeletionStrategy.processAsStaleOp(softDeleteEnabled, false, delete.version());
|
plan = DeletionStrategy.processAsStaleOp(softDeleteEnabled, delete.version());
|
||||||
} else {
|
} else {
|
||||||
plan = DeletionStrategy.processNormally(opVsLucene == OpVsLuceneDocStatus.LUCENE_DOC_NOT_FOUND, delete.version());
|
plan = DeletionStrategy.processNormally(opVsLucene == OpVsLuceneDocStatus.LUCENE_DOC_NOT_FOUND, delete.version());
|
||||||
}
|
}
|
||||||
@ -1318,7 +1317,7 @@ public class InternalEngine extends Engine {
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
protected final DeletionStrategy planDeletionAsPrimary(Delete delete) throws IOException {
|
private DeletionStrategy planDeletionAsPrimary(Delete delete) throws IOException {
|
||||||
assert delete.origin() == Operation.Origin.PRIMARY : "planing as primary but got " + delete.origin();
|
assert delete.origin() == Operation.Origin.PRIMARY : "planing as primary but got " + delete.origin();
|
||||||
// resolve operation from external to internal
|
// resolve operation from external to internal
|
||||||
final VersionValue versionValue = resolveDocVersion(delete, delete.getIfSeqNo() != SequenceNumbers.UNASSIGNED_SEQ_NO);
|
final VersionValue versionValue = resolveDocVersion(delete, delete.getIfSeqNo() != SequenceNumbers.UNASSIGNED_SEQ_NO);
|
||||||
@ -1336,7 +1335,7 @@ public class InternalEngine extends Engine {
|
|||||||
if (delete.getIfSeqNo() != SequenceNumbers.UNASSIGNED_SEQ_NO && versionValue == null) {
|
if (delete.getIfSeqNo() != SequenceNumbers.UNASSIGNED_SEQ_NO && versionValue == null) {
|
||||||
final VersionConflictEngineException e = new VersionConflictEngineException(shardId, delete.id(),
|
final VersionConflictEngineException e = new VersionConflictEngineException(shardId, delete.id(),
|
||||||
delete.getIfSeqNo(), delete.getIfPrimaryTerm(), SequenceNumbers.UNASSIGNED_SEQ_NO, 0);
|
delete.getIfSeqNo(), delete.getIfPrimaryTerm(), SequenceNumbers.UNASSIGNED_SEQ_NO, 0);
|
||||||
plan = DeletionStrategy.skipDueToVersionConflict(e, currentVersion, getPrimaryTerm(), currentlyDeleted);
|
plan = DeletionStrategy.skipDueToVersionConflict(e, currentVersion, getPrimaryTerm(), true);
|
||||||
} else if (delete.getIfSeqNo() != SequenceNumbers.UNASSIGNED_SEQ_NO && (
|
} else if (delete.getIfSeqNo() != SequenceNumbers.UNASSIGNED_SEQ_NO && (
|
||||||
versionValue.seqNo != delete.getIfSeqNo() || versionValue.term != delete.getIfPrimaryTerm()
|
versionValue.seqNo != delete.getIfSeqNo() || versionValue.term != delete.getIfPrimaryTerm()
|
||||||
)) {
|
)) {
|
||||||
@ -1428,8 +1427,8 @@ public class InternalEngine extends Engine {
|
|||||||
return new DeletionStrategy(false, false, currentlyDeleted, versionOfDeletion, null);
|
return new DeletionStrategy(false, false, currentlyDeleted, versionOfDeletion, null);
|
||||||
}
|
}
|
||||||
|
|
||||||
static DeletionStrategy processAsStaleOp(boolean addStaleOpToLucene, boolean currentlyDeleted, long versionOfDeletion) {
|
static DeletionStrategy processAsStaleOp(boolean addStaleOpToLucene, long versionOfDeletion) {
|
||||||
return new DeletionStrategy(false, addStaleOpToLucene, currentlyDeleted, versionOfDeletion, null);
|
return new DeletionStrategy(false, addStaleOpToLucene, false, versionOfDeletion, null);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -234,7 +234,7 @@ final class LiveVersionMap implements ReferenceManager.RefreshListener, Accounta
|
|||||||
/**
|
/**
|
||||||
* Tracks bytes used by tombstones (deletes)
|
* Tracks bytes used by tombstones (deletes)
|
||||||
*/
|
*/
|
||||||
final AtomicLong ramBytesUsedTombstones = new AtomicLong();
|
private final AtomicLong ramBytesUsedTombstones = new AtomicLong();
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void beforeRefresh() throws IOException {
|
public void beforeRefresh() throws IOException {
|
||||||
|
@ -188,8 +188,7 @@ final class LuceneChangesSnapshot implements Translog.Snapshot {
|
|||||||
int readerIndex = 0;
|
int readerIndex = 0;
|
||||||
CombinedDocValues combinedDocValues = null;
|
CombinedDocValues combinedDocValues = null;
|
||||||
LeafReaderContext leaf = null;
|
LeafReaderContext leaf = null;
|
||||||
for (int i = 0; i < scoreDocs.length; i++) {
|
for (ScoreDoc scoreDoc : scoreDocs) {
|
||||||
ScoreDoc scoreDoc = scoreDocs[i];
|
|
||||||
if (scoreDoc.doc >= docBase + maxDoc) {
|
if (scoreDoc.doc >= docBase + maxDoc) {
|
||||||
do {
|
do {
|
||||||
leaf = leaves.get(readerIndex++);
|
leaf = leaves.get(readerIndex++);
|
||||||
|
@ -457,8 +457,8 @@ public class ReadOnlyEngine extends Engine {
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
protected void processReaders(IndexReader reader, IndexReader previousReader) {
|
protected void processReader(IndexReader reader) {
|
||||||
searcherFactory.processReaders(reader, previousReader);
|
searcherFactory.processReaders(reader, null);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
@ -1,65 +0,0 @@
|
|||||||
/*
|
|
||||||
* Licensed to Elasticsearch under one or more contributor
|
|
||||||
* license agreements. See the NOTICE file distributed with
|
|
||||||
* this work for additional information regarding copyright
|
|
||||||
* ownership. Elasticsearch licenses this file to you under
|
|
||||||
* the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
* not use this file except in compliance with the License.
|
|
||||||
* You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing,
|
|
||||||
* software distributed under the License is distributed on an
|
|
||||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
|
||||||
* KIND, either express or implied. See the License for the
|
|
||||||
* specific language governing permissions and limitations
|
|
||||||
* under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package org.elasticsearch.index.engine;
|
|
||||||
|
|
||||||
import org.elasticsearch.common.lease.Releasable;
|
|
||||||
import org.elasticsearch.index.store.Store;
|
|
||||||
|
|
||||||
import java.util.concurrent.atomic.AtomicInteger;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* RecoveryCounter keeps tracks of the number of ongoing recoveries for a
|
|
||||||
* particular {@link Store}
|
|
||||||
*/
|
|
||||||
public class RecoveryCounter implements Releasable {
|
|
||||||
|
|
||||||
private final Store store;
|
|
||||||
|
|
||||||
RecoveryCounter(Store store) {
|
|
||||||
this.store = store;
|
|
||||||
}
|
|
||||||
|
|
||||||
private final AtomicInteger onGoingRecoveries = new AtomicInteger();
|
|
||||||
|
|
||||||
void startRecovery() {
|
|
||||||
store.incRef();
|
|
||||||
onGoingRecoveries.incrementAndGet();
|
|
||||||
}
|
|
||||||
|
|
||||||
public int get() {
|
|
||||||
return onGoingRecoveries.get();
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* End the recovery counter by decrementing the store's ref and the ongoing recovery counter
|
|
||||||
* @return number of ongoing recoveries remaining
|
|
||||||
*/
|
|
||||||
int endRecovery() {
|
|
||||||
store.decRef();
|
|
||||||
int left = onGoingRecoveries.decrementAndGet();
|
|
||||||
assert onGoingRecoveries.get() >= 0 : "ongoingRecoveries must be >= 0 but was: " + onGoingRecoveries.get();
|
|
||||||
return left;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public void close() {
|
|
||||||
endRecovery();
|
|
||||||
}
|
|
||||||
}
|
|
@ -58,8 +58,7 @@ final class RecoverySourcePruneMergePolicy extends OneMergeWrappingMergePolicy {
|
|||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
// pkg private for testing
|
private static CodecReader wrapReader(String recoverySourceField, CodecReader reader, Supplier<Query> retainSourceQuerySupplier)
|
||||||
static CodecReader wrapReader(String recoverySourceField, CodecReader reader, Supplier<Query> retainSourceQuerySupplier)
|
|
||||||
throws IOException {
|
throws IOException {
|
||||||
NumericDocValues recoverySource = reader.getNumericDocValues(recoverySourceField);
|
NumericDocValues recoverySource = reader.getNumericDocValues(recoverySourceField);
|
||||||
if (recoverySource == null || recoverySource.nextDoc() == DocIdSetIterator.NO_MORE_DOCS) {
|
if (recoverySource == null || recoverySource.nextDoc() == DocIdSetIterator.NO_MORE_DOCS) {
|
||||||
|
@ -40,6 +40,7 @@ import java.util.ArrayList;
|
|||||||
import java.util.Collection;
|
import java.util.Collection;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
|
import java.util.Objects;
|
||||||
|
|
||||||
public class Segment implements Streamable {
|
public class Segment implements Streamable {
|
||||||
|
|
||||||
@ -94,10 +95,6 @@ public class Segment implements Streamable {
|
|||||||
return new ByteSizeValue(sizeInBytes);
|
return new ByteSizeValue(sizeInBytes);
|
||||||
}
|
}
|
||||||
|
|
||||||
public long getSizeInBytes() {
|
|
||||||
return this.sizeInBytes;
|
|
||||||
}
|
|
||||||
|
|
||||||
public org.apache.lucene.util.Version getVersion() {
|
public org.apache.lucene.util.Version getVersion() {
|
||||||
return version;
|
return version;
|
||||||
}
|
}
|
||||||
@ -145,9 +142,8 @@ public class Segment implements Streamable {
|
|||||||
|
|
||||||
Segment segment = (Segment) o;
|
Segment segment = (Segment) o;
|
||||||
|
|
||||||
if (name != null ? !name.equals(segment.name) : segment.name != null) return false;
|
return Objects.equals(name, segment.name);
|
||||||
|
|
||||||
return true;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
@ -220,7 +216,7 @@ public class Segment implements Streamable {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
Sort readSegmentSort(StreamInput in) throws IOException {
|
private Sort readSegmentSort(StreamInput in) throws IOException {
|
||||||
int size = in.readVInt();
|
int size = in.readVInt();
|
||||||
if (size == 0) {
|
if (size == 0) {
|
||||||
return null;
|
return null;
|
||||||
@ -271,7 +267,7 @@ public class Segment implements Streamable {
|
|||||||
return new Sort(fields);
|
return new Sort(fields);
|
||||||
}
|
}
|
||||||
|
|
||||||
void writeSegmentSort(StreamOutput out, Sort sort) throws IOException {
|
private void writeSegmentSort(StreamOutput out, Sort sort) throws IOException {
|
||||||
if (sort == null) {
|
if (sort == null) {
|
||||||
out.writeVInt(0);
|
out.writeVInt(0);
|
||||||
return;
|
return;
|
||||||
@ -311,14 +307,14 @@ public class Segment implements Streamable {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
Accountable readRamTree(StreamInput in) throws IOException {
|
private Accountable readRamTree(StreamInput in) throws IOException {
|
||||||
final String name = in.readString();
|
final String name = in.readString();
|
||||||
final long bytes = in.readVLong();
|
final long bytes = in.readVLong();
|
||||||
int numChildren = in.readVInt();
|
int numChildren = in.readVInt();
|
||||||
if (numChildren == 0) {
|
if (numChildren == 0) {
|
||||||
return Accountables.namedAccountable(name, bytes);
|
return Accountables.namedAccountable(name, bytes);
|
||||||
}
|
}
|
||||||
List<Accountable> children = new ArrayList(numChildren);
|
List<Accountable> children = new ArrayList<>(numChildren);
|
||||||
while (numChildren-- > 0) {
|
while (numChildren-- > 0) {
|
||||||
children.add(readRamTree(in));
|
children.add(readRamTree(in));
|
||||||
}
|
}
|
||||||
@ -326,7 +322,7 @@ public class Segment implements Streamable {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// the ram tree is written recursively since the depth is fairly low (5 or 6)
|
// the ram tree is written recursively since the depth is fairly low (5 or 6)
|
||||||
void writeRamTree(StreamOutput out, Accountable tree) throws IOException {
|
private void writeRamTree(StreamOutput out, Accountable tree) throws IOException {
|
||||||
out.writeString(tree.toString());
|
out.writeString(tree.toString());
|
||||||
out.writeVLong(tree.ramBytesUsed());
|
out.writeVLong(tree.ramBytesUsed());
|
||||||
Collection<Accountable> children = tree.getChildResources();
|
Collection<Accountable> children = tree.getChildResources();
|
||||||
|
@ -30,7 +30,6 @@ import org.elasticsearch.common.xcontent.ToXContentFragment;
|
|||||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.util.Iterator;
|
|
||||||
|
|
||||||
public class SegmentsStats implements Streamable, Writeable, ToXContentFragment {
|
public class SegmentsStats implements Streamable, Writeable, ToXContentFragment {
|
||||||
|
|
||||||
@ -54,7 +53,7 @@ public class SegmentsStats implements Streamable, Writeable, ToXContentFragment
|
|||||||
* Ideally this should be in sync to what the current version of Lucene is using, but it's harmless to leave extensions out,
|
* Ideally this should be in sync to what the current version of Lucene is using, but it's harmless to leave extensions out,
|
||||||
* they'll just miss a proper description in the stats
|
* they'll just miss a proper description in the stats
|
||||||
*/
|
*/
|
||||||
private static ImmutableOpenMap<String, String> fileDescriptions = ImmutableOpenMap.<String, String>builder()
|
private static final ImmutableOpenMap<String, String> FILE_DESCRIPTIONS = ImmutableOpenMap.<String, String>builder()
|
||||||
.fPut("si", "Segment Info")
|
.fPut("si", "Segment Info")
|
||||||
.fPut("fnm", "Fields")
|
.fPut("fnm", "Fields")
|
||||||
.fPut("fdx", "Field Index")
|
.fPut("fdx", "Field Index")
|
||||||
@ -150,8 +149,7 @@ public class SegmentsStats implements Streamable, Writeable, ToXContentFragment
|
|||||||
public void addFileSizes(ImmutableOpenMap<String, Long> fileSizes) {
|
public void addFileSizes(ImmutableOpenMap<String, Long> fileSizes) {
|
||||||
ImmutableOpenMap.Builder<String, Long> map = ImmutableOpenMap.builder(this.fileSizes);
|
ImmutableOpenMap.Builder<String, Long> map = ImmutableOpenMap.builder(this.fileSizes);
|
||||||
|
|
||||||
for (Iterator<ObjectObjectCursor<String, Long>> it = fileSizes.iterator(); it.hasNext();) {
|
for (ObjectObjectCursor<String, Long> entry : fileSizes) {
|
||||||
ObjectObjectCursor<String, Long> entry = it.next();
|
|
||||||
if (map.containsKey(entry.key)) {
|
if (map.containsKey(entry.key)) {
|
||||||
Long oldValue = map.get(entry.key);
|
Long oldValue = map.get(entry.key);
|
||||||
map.put(entry.key, oldValue + entry.value);
|
map.put(entry.key, oldValue + entry.value);
|
||||||
@ -206,7 +204,7 @@ public class SegmentsStats implements Streamable, Writeable, ToXContentFragment
|
|||||||
return this.termsMemoryInBytes;
|
return this.termsMemoryInBytes;
|
||||||
}
|
}
|
||||||
|
|
||||||
public ByteSizeValue getTermsMemory() {
|
private ByteSizeValue getTermsMemory() {
|
||||||
return new ByteSizeValue(termsMemoryInBytes);
|
return new ByteSizeValue(termsMemoryInBytes);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -217,7 +215,7 @@ public class SegmentsStats implements Streamable, Writeable, ToXContentFragment
|
|||||||
return this.storedFieldsMemoryInBytes;
|
return this.storedFieldsMemoryInBytes;
|
||||||
}
|
}
|
||||||
|
|
||||||
public ByteSizeValue getStoredFieldsMemory() {
|
private ByteSizeValue getStoredFieldsMemory() {
|
||||||
return new ByteSizeValue(storedFieldsMemoryInBytes);
|
return new ByteSizeValue(storedFieldsMemoryInBytes);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -228,7 +226,7 @@ public class SegmentsStats implements Streamable, Writeable, ToXContentFragment
|
|||||||
return this.termVectorsMemoryInBytes;
|
return this.termVectorsMemoryInBytes;
|
||||||
}
|
}
|
||||||
|
|
||||||
public ByteSizeValue getTermVectorsMemory() {
|
private ByteSizeValue getTermVectorsMemory() {
|
||||||
return new ByteSizeValue(termVectorsMemoryInBytes);
|
return new ByteSizeValue(termVectorsMemoryInBytes);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -239,7 +237,7 @@ public class SegmentsStats implements Streamable, Writeable, ToXContentFragment
|
|||||||
return this.normsMemoryInBytes;
|
return this.normsMemoryInBytes;
|
||||||
}
|
}
|
||||||
|
|
||||||
public ByteSizeValue getNormsMemory() {
|
private ByteSizeValue getNormsMemory() {
|
||||||
return new ByteSizeValue(normsMemoryInBytes);
|
return new ByteSizeValue(normsMemoryInBytes);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -250,7 +248,7 @@ public class SegmentsStats implements Streamable, Writeable, ToXContentFragment
|
|||||||
return this.pointsMemoryInBytes;
|
return this.pointsMemoryInBytes;
|
||||||
}
|
}
|
||||||
|
|
||||||
public ByteSizeValue getPointsMemory() {
|
private ByteSizeValue getPointsMemory() {
|
||||||
return new ByteSizeValue(pointsMemoryInBytes);
|
return new ByteSizeValue(pointsMemoryInBytes);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -261,7 +259,7 @@ public class SegmentsStats implements Streamable, Writeable, ToXContentFragment
|
|||||||
return this.docValuesMemoryInBytes;
|
return this.docValuesMemoryInBytes;
|
||||||
}
|
}
|
||||||
|
|
||||||
public ByteSizeValue getDocValuesMemory() {
|
private ByteSizeValue getDocValuesMemory() {
|
||||||
return new ByteSizeValue(docValuesMemoryInBytes);
|
return new ByteSizeValue(docValuesMemoryInBytes);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -326,11 +324,10 @@ public class SegmentsStats implements Streamable, Writeable, ToXContentFragment
|
|||||||
builder.humanReadableField(Fields.FIXED_BIT_SET_MEMORY_IN_BYTES, Fields.FIXED_BIT_SET, getBitsetMemory());
|
builder.humanReadableField(Fields.FIXED_BIT_SET_MEMORY_IN_BYTES, Fields.FIXED_BIT_SET, getBitsetMemory());
|
||||||
builder.field(Fields.MAX_UNSAFE_AUTO_ID_TIMESTAMP, maxUnsafeAutoIdTimestamp);
|
builder.field(Fields.MAX_UNSAFE_AUTO_ID_TIMESTAMP, maxUnsafeAutoIdTimestamp);
|
||||||
builder.startObject(Fields.FILE_SIZES);
|
builder.startObject(Fields.FILE_SIZES);
|
||||||
for (Iterator<ObjectObjectCursor<String, Long>> it = fileSizes.iterator(); it.hasNext();) {
|
for (ObjectObjectCursor<String, Long> entry : fileSizes) {
|
||||||
ObjectObjectCursor<String, Long> entry = it.next();
|
|
||||||
builder.startObject(entry.key);
|
builder.startObject(entry.key);
|
||||||
builder.humanReadableField(Fields.SIZE_IN_BYTES, Fields.SIZE, new ByteSizeValue(entry.value));
|
builder.humanReadableField(Fields.SIZE_IN_BYTES, Fields.SIZE, new ByteSizeValue(entry.value));
|
||||||
builder.field(Fields.DESCRIPTION, fileDescriptions.getOrDefault(entry.key, "Others"));
|
builder.field(Fields.DESCRIPTION, FILE_DESCRIPTIONS.getOrDefault(entry.key, "Others"));
|
||||||
builder.endObject();
|
builder.endObject();
|
||||||
}
|
}
|
||||||
builder.endObject();
|
builder.endObject();
|
||||||
@ -391,7 +388,7 @@ public class SegmentsStats implements Streamable, Writeable, ToXContentFragment
|
|||||||
out.writeVInt(fileSizes.size());
|
out.writeVInt(fileSizes.size());
|
||||||
for (ObjectObjectCursor<String, Long> entry : fileSizes) {
|
for (ObjectObjectCursor<String, Long> entry : fileSizes) {
|
||||||
out.writeString(entry.key);
|
out.writeString(entry.key);
|
||||||
out.writeLong(entry.value.longValue());
|
out.writeLong(entry.value);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -20,16 +20,11 @@
|
|||||||
package org.elasticsearch.index.engine;
|
package org.elasticsearch.index.engine;
|
||||||
|
|
||||||
import org.elasticsearch.common.io.stream.StreamInput;
|
import org.elasticsearch.common.io.stream.StreamInput;
|
||||||
import org.elasticsearch.index.shard.ShardId;
|
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
|
||||||
public class SnapshotFailedEngineException extends EngineException {
|
public class SnapshotFailedEngineException extends EngineException {
|
||||||
|
|
||||||
public SnapshotFailedEngineException(ShardId shardId, Throwable cause) {
|
|
||||||
super(shardId, "Snapshot failed", cause);
|
|
||||||
}
|
|
||||||
|
|
||||||
public SnapshotFailedEngineException(StreamInput in) throws IOException{
|
public SnapshotFailedEngineException(StreamInput in) throws IOException{
|
||||||
super(in);
|
super(in);
|
||||||
}
|
}
|
||||||
|
@ -35,7 +35,6 @@ import org.apache.lucene.index.StoredFieldVisitor;
|
|||||||
import org.apache.lucene.index.Terms;
|
import org.apache.lucene.index.Terms;
|
||||||
import org.apache.lucene.util.Bits;
|
import org.apache.lucene.util.Bits;
|
||||||
import org.apache.lucene.util.BytesRef;
|
import org.apache.lucene.util.BytesRef;
|
||||||
import org.elasticsearch.Version;
|
|
||||||
import org.elasticsearch.index.mapper.IdFieldMapper;
|
import org.elasticsearch.index.mapper.IdFieldMapper;
|
||||||
import org.elasticsearch.index.mapper.RoutingFieldMapper;
|
import org.elasticsearch.index.mapper.RoutingFieldMapper;
|
||||||
import org.elasticsearch.index.mapper.SourceFieldMapper;
|
import org.elasticsearch.index.mapper.SourceFieldMapper;
|
||||||
@ -61,11 +60,9 @@ final class TranslogLeafReader extends LeafReader {
|
|||||||
private static final FieldInfo FAKE_ID_FIELD
|
private static final FieldInfo FAKE_ID_FIELD
|
||||||
= new FieldInfo(IdFieldMapper.NAME, 3, false, false, false, IndexOptions.NONE, DocValuesType.NONE, -1, Collections.emptyMap(),
|
= new FieldInfo(IdFieldMapper.NAME, 3, false, false, false, IndexOptions.NONE, DocValuesType.NONE, -1, Collections.emptyMap(),
|
||||||
0, 0, 0, false);
|
0, 0, 0, false);
|
||||||
private final Version indexVersionCreated;
|
|
||||||
|
|
||||||
TranslogLeafReader(Translog.Index operation, Version indexVersionCreated) {
|
TranslogLeafReader(Translog.Index operation) {
|
||||||
this.operation = operation;
|
this.operation = operation;
|
||||||
this.indexVersionCreated = indexVersionCreated;
|
|
||||||
}
|
}
|
||||||
@Override
|
@Override
|
||||||
public CacheHelper getCoreCacheHelper() {
|
public CacheHelper getCoreCacheHelper() {
|
||||||
@ -161,14 +158,9 @@ final class TranslogLeafReader extends LeafReader {
|
|||||||
visitor.stringField(FAKE_ROUTING_FIELD, operation.routing().getBytes(StandardCharsets.UTF_8));
|
visitor.stringField(FAKE_ROUTING_FIELD, operation.routing().getBytes(StandardCharsets.UTF_8));
|
||||||
}
|
}
|
||||||
if (visitor.needsField(FAKE_ID_FIELD) == StoredFieldVisitor.Status.YES) {
|
if (visitor.needsField(FAKE_ID_FIELD) == StoredFieldVisitor.Status.YES) {
|
||||||
final byte[] id;
|
|
||||||
if (indexVersionCreated.onOrAfter(Version.V_6_0_0)) {
|
|
||||||
BytesRef bytesRef = Uid.encodeId(operation.id());
|
BytesRef bytesRef = Uid.encodeId(operation.id());
|
||||||
id = new byte[bytesRef.length];
|
final byte[] id = new byte[bytesRef.length];
|
||||||
System.arraycopy(bytesRef.bytes, bytesRef.offset, id, 0, bytesRef.length);
|
System.arraycopy(bytesRef.bytes, bytesRef.offset, id, 0, bytesRef.length);
|
||||||
} else { // TODO this can go away in 7.0 after backport
|
|
||||||
id = operation.id().getBytes(StandardCharsets.UTF_8);
|
|
||||||
}
|
|
||||||
visitor.stringField(FAKE_ID_FIELD, id);
|
visitor.stringField(FAKE_ID_FIELD, id);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -42,11 +42,7 @@ public class VersionConflictEngineException extends EngineException {
|
|||||||
}
|
}
|
||||||
|
|
||||||
public VersionConflictEngineException(ShardId shardId, String id, String explanation) {
|
public VersionConflictEngineException(ShardId shardId, String id, String explanation) {
|
||||||
this(shardId, null, id, explanation);
|
this(shardId, "[{}]: version conflict, {}", null, id, explanation);
|
||||||
}
|
|
||||||
|
|
||||||
public VersionConflictEngineException(ShardId shardId, Throwable cause, String id, String explanation) {
|
|
||||||
this(shardId, "[{}]: version conflict, {}", cause, id, explanation);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
public VersionConflictEngineException(ShardId shardId, String msg, Throwable cause, Object... params) {
|
public VersionConflictEngineException(ShardId shardId, String msg, Throwable cause, Object... params) {
|
||||||
|
@ -169,7 +169,7 @@ public final class FrozenEngine extends ReadOnlyEngine {
|
|||||||
listeners.beforeRefresh();
|
listeners.beforeRefresh();
|
||||||
}
|
}
|
||||||
reader = DirectoryReader.open(engineConfig.getStore().directory());
|
reader = DirectoryReader.open(engineConfig.getStore().directory());
|
||||||
processReaders(reader, null);
|
processReader(reader);
|
||||||
reader = lastOpenedReader = wrapReader(reader, Function.identity());
|
reader = lastOpenedReader = wrapReader(reader, Function.identity());
|
||||||
reader.getReaderCacheHelper().addClosedListener(this::onReaderClosed);
|
reader.getReaderCacheHelper().addClosedListener(this::onReaderClosed);
|
||||||
for (ReferenceManager.RefreshListener listeners : config ().getInternalRefreshListener()) {
|
for (ReferenceManager.RefreshListener listeners : config ().getInternalRefreshListener()) {
|
||||||
|
Loading…
x
Reference in New Issue
Block a user