[Remove] Segment memory estimation and tracking (#2029)

Lucene 9 removed CodecReader#ramBytesUsed and all file formats that no longer
consume large amounts of memory. As a result RAM estimation for segments is no
longer possible and is removed by this commit. backwards compatibility is retained 
for the tranport layer.

Signed-off-by: Nicholas Walter Knize <nknize@apache.org>
This commit is contained in:
Nick Knize 2022-02-04 13:47:48 -06:00 committed by GitHub
parent 1e5d98329e
commit fc0d3a368d
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
21 changed files with 80 additions and 591 deletions

View File

@ -36,8 +36,6 @@ import org.apache.lucene.store.LockObtainFailedException;
import org.opensearch.ExceptionsHelper;
import org.opensearch.Version;
import org.opensearch.action.ActionListener;
import org.opensearch.action.admin.cluster.node.stats.NodeStats;
import org.opensearch.action.admin.cluster.node.stats.NodesStatsResponse;
import org.opensearch.action.index.IndexRequest;
import org.opensearch.action.search.SearchRequest;
import org.opensearch.action.search.SearchResponse;
@ -57,7 +55,6 @@ import org.opensearch.common.CheckedFunction;
import org.opensearch.common.CheckedRunnable;
import org.opensearch.common.Strings;
import org.opensearch.common.UUIDs;
import org.opensearch.common.breaker.CircuitBreaker;
import org.opensearch.common.bytes.BytesArray;
import org.opensearch.common.lucene.uid.Versions;
import org.opensearch.common.settings.Settings;
@ -76,7 +73,6 @@ import org.opensearch.index.VersionType;
import org.opensearch.index.engine.CommitStats;
import org.opensearch.index.engine.Engine;
import org.opensearch.index.engine.NoOpEngine;
import org.opensearch.index.engine.SegmentsStats;
import org.opensearch.index.flush.FlushStats;
import org.opensearch.index.mapper.SourceToParse;
import org.opensearch.index.seqno.RetentionLeaseSyncer;
@ -86,10 +82,8 @@ import org.opensearch.index.translog.Translog;
import org.opensearch.index.translog.TranslogStats;
import org.opensearch.indices.IndicesService;
import org.opensearch.indices.breaker.CircuitBreakerService;
import org.opensearch.indices.breaker.CircuitBreakerStats;
import org.opensearch.indices.recovery.RecoveryState;
import org.opensearch.plugins.Plugin;
import org.opensearch.search.aggregations.AggregationBuilders;
import org.opensearch.search.builder.SearchSourceBuilder;
import org.opensearch.test.DummyShardLock;
import org.opensearch.test.OpenSearchSingleNodeTestCase;
@ -122,7 +116,6 @@ import java.util.stream.Stream;
import static com.carrotsearch.randomizedtesting.RandomizedTest.randomAsciiLettersOfLength;
import static java.util.Collections.emptyMap;
import static java.util.Collections.emptySet;
import static org.opensearch.action.admin.cluster.node.stats.NodesStatsRequest.Metric.BREAKER;
import static org.opensearch.action.support.WriteRequest.RefreshPolicy.IMMEDIATE;
import static org.opensearch.action.support.WriteRequest.RefreshPolicy.NONE;
import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_REPLICAS;
@ -135,13 +128,11 @@ import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount;
import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertNoFailures;
import static org.hamcrest.Matchers.allOf;
import static org.hamcrest.Matchers.containsString;
import static org.hamcrest.Matchers.either;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.greaterThan;
import static org.hamcrest.Matchers.instanceOf;
import static org.hamcrest.Matchers.lessThanOrEqualTo;
import static org.hamcrest.Matchers.notNullValue;
public class IndexShardIT extends OpenSearchSingleNodeTestCase {
@ -643,86 +634,6 @@ public class IndexShardIT extends OpenSearchSingleNodeTestCase {
}
}
/** Check that the accounting breaker correctly matches the segments API for memory usage */
private void checkAccountingBreaker() {
CircuitBreakerService breakerService = getInstanceFromNode(CircuitBreakerService.class);
CircuitBreaker acctBreaker = breakerService.getBreaker(CircuitBreaker.ACCOUNTING);
long usedMem = acctBreaker.getUsed();
assertThat(usedMem, greaterThan(0L));
NodesStatsResponse response = client().admin().cluster().prepareNodesStats().setIndices(true).addMetric(BREAKER.metricName()).get();
NodeStats stats = response.getNodes().get(0);
assertNotNull(stats);
SegmentsStats segmentsStats = stats.getIndices().getSegments();
CircuitBreakerStats breakerStats = stats.getBreaker().getStats(CircuitBreaker.ACCOUNTING);
assertEquals(usedMem, segmentsStats.getMemoryInBytes());
assertEquals(usedMem, breakerStats.getEstimated());
}
public void testCircuitBreakerIncrementedByIndexShard() throws Exception {
client().admin()
.cluster()
.prepareUpdateSettings()
.setTransientSettings(Settings.builder().put("network.breaker.inflight_requests.overhead", 0.0))
.get();
// Generate a couple of segments
client().prepareIndex("test", "_doc", "1")
.setSource("{\"foo\":\"" + randomAlphaOfLength(100) + "\"}", XContentType.JSON)
.setRefreshPolicy(IMMEDIATE)
.get();
// Use routing so 2 documents are guaranteed to be on the same shard
String routing = randomAlphaOfLength(5);
client().prepareIndex("test", "_doc", "2")
.setSource("{\"foo\":\"" + randomAlphaOfLength(100) + "\"}", XContentType.JSON)
.setRefreshPolicy(IMMEDIATE)
.setRouting(routing)
.get();
client().prepareIndex("test", "_doc", "3")
.setSource("{\"foo\":\"" + randomAlphaOfLength(100) + "\"}", XContentType.JSON)
.setRefreshPolicy(IMMEDIATE)
.setRouting(routing)
.get();
checkAccountingBreaker();
// Test that force merging causes the breaker to be correctly adjusted
logger.info("--> force merging to a single segment");
client().admin().indices().prepareForceMerge("test").setMaxNumSegments(1).setFlush(randomBoolean()).get();
client().admin().indices().prepareRefresh().get();
checkAccountingBreaker();
client().admin()
.cluster()
.prepareUpdateSettings()
.setTransientSettings(Settings.builder().put("indices.breaker.total.limit", "1kb"))
.get();
// Test that we're now above the parent limit due to the segments
Exception e = expectThrows(
Exception.class,
() -> client().prepareSearch("test").addAggregation(AggregationBuilders.terms("foo_terms").field("foo.keyword")).get()
);
logger.info("--> got an expected exception", e);
assertThat(e.getCause(), notNullValue());
assertThat(e.getCause().getMessage(), containsString("[parent] Data too large, data for [<agg [foo_terms]>]"));
client().admin()
.cluster()
.prepareUpdateSettings()
.setTransientSettings(
Settings.builder().putNull("indices.breaker.total.limit").putNull("network.breaker.inflight_requests.overhead")
)
.get();
// Test that deleting the index causes the breaker to correctly be decremented
logger.info("--> deleting index");
client().admin().indices().prepareDelete("test").get();
// Accounting breaker should now be 0
CircuitBreakerService breakerService = getInstanceFromNode(CircuitBreakerService.class);
CircuitBreaker acctBreaker = breakerService.getBreaker(CircuitBreaker.ACCOUNTING);
assertThat(acctBreaker.getUsed(), equalTo(0L));
}
public static final IndexShard recoverShard(IndexShard newShard) throws IOException {
DiscoveryNode localNode = new DiscoveryNode("foo", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT);
newShard.markAsRecovering("store", new RecoveryState(newShard.routingEntry(), localNode, null));

View File

@ -828,7 +828,6 @@ public class IndexStatsIT extends OpenSearchIntegTestCase {
assertThat(stats.getTotal().getSegments(), notNullValue());
assertThat(stats.getTotal().getSegments().getCount(), equalTo((long) test1.totalNumShards));
assertThat(stats.getTotal().getSegments().getMemoryInBytes(), greaterThan(0L));
}
public void testAllFlags() throws Exception {

View File

@ -139,7 +139,7 @@ public class IndicesSegmentResponse extends BroadcastResponse {
builder.field(Fields.NUM_DOCS, segment.getNumDocs());
builder.field(Fields.DELETED_DOCS, segment.getDeletedDocs());
builder.humanReadableField(Fields.SIZE_IN_BYTES, Fields.SIZE, segment.getSize());
builder.humanReadableField(Fields.MEMORY_IN_BYTES, Fields.MEMORY, new ByteSizeValue(segment.getMemoryInBytes()));
builder.humanReadableField(Fields.MEMORY_IN_BYTES, Fields.MEMORY, segment.getZeroMemory());
builder.field(Fields.COMMITTED, segment.isCommitted());
builder.field(Fields.SEARCH, segment.isSearch());
if (segment.getVersion() != null) {

View File

@ -493,7 +493,7 @@ public class CommonStats implements Writeable, ToXContentFragment {
/**
* Utility method which computes total memory by adding
* FieldData, PercolatorCache, Segments (memory, index writer, version map)
* FieldData, PercolatorCache, Segments (index writer, version map)
*/
public ByteSizeValue getTotalMemory() {
long size = 0;
@ -504,8 +504,7 @@ public class CommonStats implements Writeable, ToXContentFragment {
size += this.getQueryCache().getMemorySizeInBytes();
}
if (this.getSegments() != null) {
size += this.getSegments().getMemoryInBytes() + this.getSegments().getIndexWriterMemoryInBytes() + this.getSegments()
.getVersionMapMemoryInBytes();
size += this.getSegments().getIndexWriterMemoryInBytes() + this.getSegments().getVersionMapMemoryInBytes();
}
return new ByteSizeValue(size);

View File

@ -51,7 +51,6 @@ import org.apache.lucene.search.similarities.Similarity;
import org.apache.lucene.store.AlreadyClosedException;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.IOContext;
import org.apache.lucene.util.Accountable;
import org.apache.lucene.util.Accountables;
import org.apache.lucene.util.SetOnce;
import org.opensearch.ExceptionsHelper;
@ -161,14 +160,6 @@ public abstract class Engine implements Closeable {
this.eventListener = engineConfig.getEventListener();
}
/** Returns 0 in the case where accountable is null, otherwise returns {@code ramBytesUsed()} */
protected static long guardedRamBytesUsed(Accountable a) {
if (a == null) {
return 0;
}
return a.ramBytesUsed();
}
public final EngineConfig config() {
return engineConfig;
}
@ -875,14 +866,7 @@ public abstract class Engine implements Closeable {
}
protected void fillSegmentStats(SegmentReader segmentReader, boolean includeSegmentFileSizes, SegmentsStats stats) {
stats.add(1, segmentReader.ramBytesUsed());
stats.addTermsMemoryInBytes(guardedRamBytesUsed(segmentReader.getPostingsReader()));
stats.addStoredFieldsMemoryInBytes(guardedRamBytesUsed(segmentReader.getFieldsReader()));
stats.addTermVectorsMemoryInBytes(guardedRamBytesUsed(segmentReader.getTermVectorsReader()));
stats.addNormsMemoryInBytes(guardedRamBytesUsed(segmentReader.getNormsReader()));
stats.addPointsMemoryInBytes(guardedRamBytesUsed(segmentReader.getPointsReader()));
stats.addDocValuesMemoryInBytes(guardedRamBytesUsed(segmentReader.getDocValuesReader()));
stats.add(1);
if (includeSegmentFileSizes) {
// TODO: consider moving this to StoreStats
stats.addFileSizes(getSegmentFileSizes(segmentReader));
@ -1048,7 +1032,6 @@ public abstract class Engine implements Closeable {
} catch (IOException e) {
logger.trace(() -> new ParameterizedMessage("failed to get size for [{}]", info.info.name), e);
}
segment.memoryInBytes = segmentReader.ramBytesUsed();
segment.segmentSort = info.info.getIndexSort();
if (verbose) {
segment.ramTree = Accountables.namedAccountable("root", segmentReader);

View File

@ -701,10 +701,7 @@ public class InternalEngine extends Engine {
DirectoryReader.open(indexWriter),
shardId
);
internalReaderManager = new OpenSearchReaderManager(
directoryReader,
new RamAccountingRefreshListener(engineConfig.getCircuitBreakerService())
);
internalReaderManager = new OpenSearchReaderManager(directoryReader);
lastCommittedSegmentInfos = store.readLastCommittedSegmentsInfo();
ExternalReaderManager externalReaderManager = new ExternalReaderManager(internalReaderManager, externalRefreshListener);
success = true;

View File

@ -33,7 +33,6 @@
package org.opensearch.index.engine;
import java.io.IOException;
import java.util.function.BiConsumer;
import org.apache.lucene.index.DirectoryReader;
import org.apache.lucene.search.ReferenceManager;
@ -52,23 +51,15 @@ import org.opensearch.common.lucene.index.OpenSearchDirectoryReader;
*/
@SuppressForbidden(reason = "reference counting is required here")
class OpenSearchReaderManager extends ReferenceManager<OpenSearchDirectoryReader> {
private final BiConsumer<OpenSearchDirectoryReader, OpenSearchDirectoryReader> refreshListener;
/**
* Creates and returns a new OpenSearchReaderManager from the given
* already-opened {@link OpenSearchDirectoryReader}, stealing
* the incoming reference.
*
* @param reader the directoryReader to use for future reopens
* @param refreshListener A consumer that is called every time a new reader is opened
*/
OpenSearchReaderManager(
OpenSearchDirectoryReader reader,
BiConsumer<OpenSearchDirectoryReader, OpenSearchDirectoryReader> refreshListener
) {
OpenSearchReaderManager(OpenSearchDirectoryReader reader) {
this.current = reader;
this.refreshListener = refreshListener;
refreshListener.accept(current, null);
}
@Override
@ -79,9 +70,6 @@ class OpenSearchReaderManager extends ReferenceManager<OpenSearchDirectoryReader
@Override
protected OpenSearchDirectoryReader refreshIfNeeded(OpenSearchDirectoryReader referenceToRefresh) throws IOException {
final OpenSearchDirectoryReader reader = (OpenSearchDirectoryReader) DirectoryReader.openIfChanged(referenceToRefresh);
if (reader != null) {
refreshListener.accept(reader, referenceToRefresh);
}
return reader;
}

View File

@ -1,94 +0,0 @@
/*
* SPDX-License-Identifier: Apache-2.0
*
* The OpenSearch Contributors require contributions made to
* this file be licensed under the Apache-2.0 license or a
* compatible open source license.
*/
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*
* Modifications Copyright OpenSearch Contributors. See
* GitHub history for details.
*/
package org.opensearch.index.engine;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.SegmentReader;
import org.opensearch.common.breaker.CircuitBreaker;
import org.opensearch.common.lucene.Lucene;
import org.opensearch.common.lucene.index.OpenSearchDirectoryReader;
import org.opensearch.indices.breaker.CircuitBreakerService;
import java.util.Collections;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import java.util.function.BiConsumer;
/**
* A refresh listener that tracks the amount of memory used by segments in the accounting circuit breaker.
*/
final class RamAccountingRefreshListener implements BiConsumer<OpenSearchDirectoryReader, OpenSearchDirectoryReader> {
private final CircuitBreakerService breakerService;
RamAccountingRefreshListener(CircuitBreakerService breakerService) {
this.breakerService = breakerService;
}
@Override
public void accept(OpenSearchDirectoryReader reader, OpenSearchDirectoryReader previousReader) {
final CircuitBreaker breaker = breakerService.getBreaker(CircuitBreaker.ACCOUNTING);
// Construct a list of the previous segment readers, we only want to track memory used
// by new readers, so these will be exempted from the circuit breaking accounting.
//
// The Core CacheKey is used as the key for the set so that deletions still keep the correct
// accounting, as using the Reader or Reader's CacheKey causes incorrect accounting.
final Set<IndexReader.CacheKey> prevReaders;
if (previousReader == null) {
prevReaders = Collections.emptySet();
} else {
final List<LeafReaderContext> previousReaderLeaves = previousReader.leaves();
prevReaders = new HashSet<>(previousReaderLeaves.size());
for (LeafReaderContext lrc : previousReaderLeaves) {
prevReaders.add(Lucene.segmentReader(lrc.reader()).getCoreCacheHelper().getKey());
}
}
for (LeafReaderContext lrc : reader.leaves()) {
final SegmentReader segmentReader = Lucene.segmentReader(lrc.reader());
// don't add the segment's memory unless it is not referenced by the previous reader
// (only new segments)
if (prevReaders.contains(segmentReader.getCoreCacheHelper().getKey()) == false) {
final long ramBytesUsed = segmentReader.ramBytesUsed();
// add the segment memory to the breaker (non-breaking)
breaker.addWithoutBreaking(ramBytesUsed);
// and register a listener for when the segment is closed to decrement the
// breaker accounting
segmentReader.getCoreCacheHelper().addClosedListener(k -> breaker.addWithoutBreaking(-ramBytesUsed));
}
}
}
}

View File

@ -85,7 +85,6 @@ public class ReadOnlyEngine extends Engine {
private final OpenSearchReaderManager readerManager;
private final IndexCommit indexCommit;
private final Lock indexWriterLock;
private final RamAccountingRefreshListener refreshListener;
private final SafeCommitInfo safeCommitInfo;
private final CompletionStatsCache completionStatsCache;
private final boolean requireCompleteHistory;
@ -114,7 +113,6 @@ public class ReadOnlyEngine extends Engine {
boolean requireCompleteHistory
) {
super(config);
this.refreshListener = new RamAccountingRefreshListener(engineConfig.getCircuitBreakerService());
this.requireCompleteHistory = requireCompleteHistory;
try {
Store store = config.getStore();
@ -135,14 +133,13 @@ public class ReadOnlyEngine extends Engine {
this.seqNoStats = seqNoStats;
this.indexCommit = Lucene.getIndexCommit(lastCommittedSegmentInfos, directory);
reader = wrapReader(open(indexCommit), readerWrapperFunction);
readerManager = new OpenSearchReaderManager(reader, refreshListener);
readerManager = new OpenSearchReaderManager(reader);
assert translogStats != null || obtainLock : "mutiple translogs instances should not be opened at the same time";
this.translogStats = translogStats != null ? translogStats : translogStats(config, lastCommittedSegmentInfos);
this.indexWriterLock = indexWriterLock;
this.safeCommitInfo = new SafeCommitInfo(seqNoStats.getLocalCheckpoint(), lastCommittedSegmentInfos.totalMaxDoc());
completionStatsCache = new CompletionStatsCache(() -> acquireSearcher("completion_stats"));
// no need to register a refresh listener to invalidate completionStatsCache since this engine is readonly
success = true;
} finally {
@ -511,10 +508,6 @@ public class ReadOnlyEngine extends Engine {
}
protected void processReader(OpenSearchDirectoryReader reader) {
refreshListener.accept(reader, null);
}
@Override
public boolean refreshNeeded() {
return false;

View File

@ -40,6 +40,7 @@ import org.apache.lucene.search.SortedSetSelector;
import org.apache.lucene.search.SortedNumericSelector;
import org.apache.lucene.util.Accountable;
import org.apache.lucene.util.Accountables;
import org.opensearch.Version;
import org.opensearch.common.Nullable;
import org.opensearch.common.io.stream.StreamInput;
import org.opensearch.common.io.stream.StreamOutput;
@ -66,11 +67,12 @@ public class Segment implements Writeable {
public org.apache.lucene.util.Version version = null;
public Boolean compound = null;
public String mergeId;
public long memoryInBytes;
public Sort segmentSort;
public Accountable ramTree = null;
public Map<String, String> attributes;
private static final ByteSizeValue ZERO_BYTE_SIZE_VALUE = new ByteSizeValue(0L);
public Segment(StreamInput in) throws IOException {
name = in.readString();
generation = Long.parseLong(name.substring(1), Character.MAX_RADIX);
@ -82,7 +84,11 @@ public class Segment implements Writeable {
version = Lucene.parseVersionLenient(in.readOptionalString(), null);
compound = in.readOptionalBoolean();
mergeId = in.readOptionalString();
memoryInBytes = in.readLong();
// the following was removed in Lucene 9 (https://issues.apache.org/jira/browse/LUCENE-9387)
// retain for bwc only (todo: remove in OpenSearch 3)
if (in.getVersion().before(Version.V_2_0_0)) {
in.readLong(); // estimated memory
}
if (in.readBoolean()) {
// verbose mode
ramTree = readRamTree(in);
@ -145,10 +151,13 @@ public class Segment implements Writeable {
}
/**
* Estimation of the memory usage used by a segment.
* Estimation of the memory usage was removed in Lucene 9 (https://issues.apache.org/jira/browse/LUCENE-9387)
* retain for bwc only (todo: remove in OpenSearch 3).
* @deprecated
*/
public long getMemoryInBytes() {
return this.memoryInBytes;
@Deprecated
public ByteSizeValue getZeroMemory() {
return ZERO_BYTE_SIZE_VALUE;
}
/**
@ -193,7 +202,11 @@ public class Segment implements Writeable {
out.writeOptionalString(version.toString());
out.writeOptionalBoolean(compound);
out.writeOptionalString(mergeId);
out.writeLong(memoryInBytes);
// the following was removed in Lucene 9 (https://issues.apache.org/jira/browse/LUCENE-9387)
// retain for bwc only (todo: remove in OpenSearch 3)
if (out.getVersion().before(Version.V_2_0_0)) {
out.writeLong(0L);
}
boolean verbose = ramTree != null;
out.writeBoolean(verbose);
@ -350,8 +363,6 @@ public class Segment implements Writeable {
+ ", mergeId='"
+ mergeId
+ '\''
+ ", memoryInBytes="
+ memoryInBytes
+ (segmentSort != null ? ", sort=" + segmentSort : "")
+ ", attributes="
+ attributes

View File

@ -33,6 +33,7 @@
package org.opensearch.index.engine;
import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
import org.opensearch.Version;
import org.opensearch.common.collect.ImmutableOpenMap;
import org.opensearch.common.io.stream.StreamInput;
import org.opensearch.common.io.stream.StreamOutput;
@ -46,19 +47,14 @@ import java.io.IOException;
public class SegmentsStats implements Writeable, ToXContentFragment {
private long count;
private long memoryInBytes;
private long termsMemoryInBytes;
private long storedFieldsMemoryInBytes;
private long termVectorsMemoryInBytes;
private long normsMemoryInBytes;
private long pointsMemoryInBytes;
private long docValuesMemoryInBytes;
private long indexWriterMemoryInBytes;
private long versionMapMemoryInBytes;
private long maxUnsafeAutoIdTimestamp = Long.MIN_VALUE;
private long bitsetMemoryInBytes;
private ImmutableOpenMap<String, Long> fileSizes = ImmutableOpenMap.of();
private static final ByteSizeValue ZERO_BYTE_SIZE_VALUE = new ByteSizeValue(0L);
/*
* A map to provide a best-effort approach describing Lucene index files.
*
@ -91,13 +87,17 @@ public class SegmentsStats implements Writeable, ToXContentFragment {
public SegmentsStats(StreamInput in) throws IOException {
count = in.readVLong();
memoryInBytes = in.readLong();
termsMemoryInBytes = in.readLong();
storedFieldsMemoryInBytes = in.readLong();
termVectorsMemoryInBytes = in.readLong();
normsMemoryInBytes = in.readLong();
pointsMemoryInBytes = in.readLong();
docValuesMemoryInBytes = in.readLong();
// the following was removed in Lucene 9 (https://issues.apache.org/jira/browse/LUCENE-9387)
// retain for bwc only (todo: remove in OpenSearch 3)
if (in.getVersion().before(Version.V_2_0_0)) {
in.readLong(); // estimated segment memory
in.readLong(); // estimated terms memory
in.readLong(); // estimated stored fields memory
in.readLong(); // estimated term vector memory
in.readLong(); // estimated norms memory
in.readLong(); // estimated points memory
in.readLong(); // estimated doc values memory
}
indexWriterMemoryInBytes = in.readLong();
versionMapMemoryInBytes = in.readLong();
bitsetMemoryInBytes = in.readLong();
@ -113,33 +113,8 @@ public class SegmentsStats implements Writeable, ToXContentFragment {
fileSizes = map.build();
}
public void add(long count, long memoryInBytes) {
public void add(long count) {
this.count += count;
this.memoryInBytes += memoryInBytes;
}
public void addTermsMemoryInBytes(long termsMemoryInBytes) {
this.termsMemoryInBytes += termsMemoryInBytes;
}
public void addStoredFieldsMemoryInBytes(long storedFieldsMemoryInBytes) {
this.storedFieldsMemoryInBytes += storedFieldsMemoryInBytes;
}
public void addTermVectorsMemoryInBytes(long termVectorsMemoryInBytes) {
this.termVectorsMemoryInBytes += termVectorsMemoryInBytes;
}
public void addNormsMemoryInBytes(long normsMemoryInBytes) {
this.normsMemoryInBytes += normsMemoryInBytes;
}
public void addPointsMemoryInBytes(long pointsMemoryInBytes) {
this.pointsMemoryInBytes += pointsMemoryInBytes;
}
public void addDocValuesMemoryInBytes(long docValuesMemoryInBytes) {
this.docValuesMemoryInBytes += docValuesMemoryInBytes;
}
public void addIndexWriterMemoryInBytes(long indexWriterMemoryInBytes) {
@ -178,13 +153,7 @@ public class SegmentsStats implements Writeable, ToXContentFragment {
return;
}
updateMaxUnsafeAutoIdTimestamp(mergeStats.maxUnsafeAutoIdTimestamp);
add(mergeStats.count, mergeStats.memoryInBytes);
addTermsMemoryInBytes(mergeStats.termsMemoryInBytes);
addStoredFieldsMemoryInBytes(mergeStats.storedFieldsMemoryInBytes);
addTermVectorsMemoryInBytes(mergeStats.termVectorsMemoryInBytes);
addNormsMemoryInBytes(mergeStats.normsMemoryInBytes);
addPointsMemoryInBytes(mergeStats.pointsMemoryInBytes);
addDocValuesMemoryInBytes(mergeStats.docValuesMemoryInBytes);
add(mergeStats.count);
addIndexWriterMemoryInBytes(mergeStats.indexWriterMemoryInBytes);
addVersionMapMemoryInBytes(mergeStats.versionMapMemoryInBytes);
addBitsetMemoryInBytes(mergeStats.bitsetMemoryInBytes);
@ -198,83 +167,6 @@ public class SegmentsStats implements Writeable, ToXContentFragment {
return this.count;
}
/**
* Estimation of the memory usage used by a segment.
*/
public long getMemoryInBytes() {
return this.memoryInBytes;
}
public ByteSizeValue getMemory() {
return new ByteSizeValue(memoryInBytes);
}
/**
* Estimation of the terms dictionary memory usage by a segment.
*/
public long getTermsMemoryInBytes() {
return this.termsMemoryInBytes;
}
private ByteSizeValue getTermsMemory() {
return new ByteSizeValue(termsMemoryInBytes);
}
/**
* Estimation of the stored fields memory usage by a segment.
*/
public long getStoredFieldsMemoryInBytes() {
return this.storedFieldsMemoryInBytes;
}
private ByteSizeValue getStoredFieldsMemory() {
return new ByteSizeValue(storedFieldsMemoryInBytes);
}
/**
* Estimation of the term vectors memory usage by a segment.
*/
public long getTermVectorsMemoryInBytes() {
return this.termVectorsMemoryInBytes;
}
private ByteSizeValue getTermVectorsMemory() {
return new ByteSizeValue(termVectorsMemoryInBytes);
}
/**
* Estimation of the norms memory usage by a segment.
*/
public long getNormsMemoryInBytes() {
return this.normsMemoryInBytes;
}
private ByteSizeValue getNormsMemory() {
return new ByteSizeValue(normsMemoryInBytes);
}
/**
* Estimation of the points memory usage by a segment.
*/
public long getPointsMemoryInBytes() {
return this.pointsMemoryInBytes;
}
private ByteSizeValue getPointsMemory() {
return new ByteSizeValue(pointsMemoryInBytes);
}
/**
* Estimation of the doc values memory usage by a segment.
*/
public long getDocValuesMemoryInBytes() {
return this.docValuesMemoryInBytes;
}
private ByteSizeValue getDocValuesMemory() {
return new ByteSizeValue(docValuesMemoryInBytes);
}
/**
* Estimation of the memory usage by index writer
*/
@ -324,13 +216,13 @@ public class SegmentsStats implements Writeable, ToXContentFragment {
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject(Fields.SEGMENTS);
builder.field(Fields.COUNT, count);
builder.humanReadableField(Fields.MEMORY_IN_BYTES, Fields.MEMORY, getMemory());
builder.humanReadableField(Fields.TERMS_MEMORY_IN_BYTES, Fields.TERMS_MEMORY, getTermsMemory());
builder.humanReadableField(Fields.STORED_FIELDS_MEMORY_IN_BYTES, Fields.STORED_FIELDS_MEMORY, getStoredFieldsMemory());
builder.humanReadableField(Fields.TERM_VECTORS_MEMORY_IN_BYTES, Fields.TERM_VECTORS_MEMORY, getTermVectorsMemory());
builder.humanReadableField(Fields.NORMS_MEMORY_IN_BYTES, Fields.NORMS_MEMORY, getNormsMemory());
builder.humanReadableField(Fields.POINTS_MEMORY_IN_BYTES, Fields.POINTS_MEMORY, getPointsMemory());
builder.humanReadableField(Fields.DOC_VALUES_MEMORY_IN_BYTES, Fields.DOC_VALUES_MEMORY, getDocValuesMemory());
builder.humanReadableField(Fields.MEMORY_IN_BYTES, Fields.MEMORY, ZERO_BYTE_SIZE_VALUE);
builder.humanReadableField(Fields.TERMS_MEMORY_IN_BYTES, Fields.TERMS_MEMORY, ZERO_BYTE_SIZE_VALUE);
builder.humanReadableField(Fields.STORED_FIELDS_MEMORY_IN_BYTES, Fields.STORED_FIELDS_MEMORY, ZERO_BYTE_SIZE_VALUE);
builder.humanReadableField(Fields.TERM_VECTORS_MEMORY_IN_BYTES, Fields.TERM_VECTORS_MEMORY, ZERO_BYTE_SIZE_VALUE);
builder.humanReadableField(Fields.NORMS_MEMORY_IN_BYTES, Fields.NORMS_MEMORY, ZERO_BYTE_SIZE_VALUE);
builder.humanReadableField(Fields.POINTS_MEMORY_IN_BYTES, Fields.POINTS_MEMORY, ZERO_BYTE_SIZE_VALUE);
builder.humanReadableField(Fields.DOC_VALUES_MEMORY_IN_BYTES, Fields.DOC_VALUES_MEMORY, ZERO_BYTE_SIZE_VALUE);
builder.humanReadableField(Fields.INDEX_WRITER_MEMORY_IN_BYTES, Fields.INDEX_WRITER_MEMORY, getIndexWriterMemory());
builder.humanReadableField(Fields.VERSION_MAP_MEMORY_IN_BYTES, Fields.VERSION_MAP_MEMORY, getVersionMapMemory());
builder.humanReadableField(Fields.FIXED_BIT_SET_MEMORY_IN_BYTES, Fields.FIXED_BIT_SET, getBitsetMemory());
@ -380,13 +272,17 @@ public class SegmentsStats implements Writeable, ToXContentFragment {
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeVLong(count);
out.writeLong(memoryInBytes);
out.writeLong(termsMemoryInBytes);
out.writeLong(storedFieldsMemoryInBytes);
out.writeLong(termVectorsMemoryInBytes);
out.writeLong(normsMemoryInBytes);
out.writeLong(pointsMemoryInBytes);
out.writeLong(docValuesMemoryInBytes);
if (out.getVersion().before(Version.V_2_0_0)) {
// the following was removed in Lucene 9 (https://issues.apache.org/jira/browse/LUCENE-9387)
// retain the following for bwc only (todo: remove in OpenSearch 3)
out.writeLong(0L); // estimated memory
out.writeLong(0L); // estimated terms memory
out.writeLong(0L); // estimated stored fields memory
out.writeLong(0L); // estimated term vector memory
out.writeLong(0L); // estimated norms memory
out.writeLong(0L); // estimated points memory
out.writeLong(0L); // estimated doc values memory
}
out.writeLong(indexWriterMemoryInBytes);
out.writeLong(versionMapMemoryInBytes);
out.writeLong(bitsetMemoryInBytes);
@ -402,4 +298,14 @@ public class SegmentsStats implements Writeable, ToXContentFragment {
public void clearFileSizes() {
fileSizes = ImmutableOpenMap.of();
}
/**
* Used only for deprecating memory tracking in REST interface
* todo remove in OpenSearch 3.0
* @deprecated
*/
@Deprecated
public ByteSizeValue getZeroMemory() {
return ZERO_BYTE_SIZE_VALUE;
}
}

View File

@ -850,8 +850,8 @@ public class RestIndicesAction extends AbstractCatAction {
table.addCell(totalStats.getSegments() == null ? null : totalStats.getSegments().getCount());
table.addCell(primaryStats.getSegments() == null ? null : primaryStats.getSegments().getCount());
table.addCell(totalStats.getSegments() == null ? null : totalStats.getSegments().getMemory());
table.addCell(primaryStats.getSegments() == null ? null : primaryStats.getSegments().getMemory());
table.addCell(totalStats.getSegments() == null ? null : totalStats.getSegments().getZeroMemory());
table.addCell(primaryStats.getSegments() == null ? null : primaryStats.getSegments().getZeroMemory());
table.addCell(totalStats.getSegments() == null ? null : totalStats.getSegments().getIndexWriterMemory());
table.addCell(primaryStats.getSegments() == null ? null : primaryStats.getSegments().getIndexWriterMemory());

View File

@ -501,7 +501,7 @@ public class RestNodesAction extends AbstractCatAction {
SegmentsStats segmentsStats = indicesStats == null ? null : indicesStats.getSegments();
table.addCell(segmentsStats == null ? null : segmentsStats.getCount());
table.addCell(segmentsStats == null ? null : segmentsStats.getMemory());
table.addCell(segmentsStats == null ? null : segmentsStats.getZeroMemory());
table.addCell(segmentsStats == null ? null : segmentsStats.getIndexWriterMemory());
table.addCell(segmentsStats == null ? null : segmentsStats.getVersionMapMemory());
table.addCell(segmentsStats == null ? null : segmentsStats.getBitsetMemory());

View File

@ -155,7 +155,7 @@ public class RestSegmentsAction extends AbstractCatAction {
table.addCell(segment.getNumDocs());
table.addCell(segment.getDeletedDocs());
table.addCell(segment.getSize());
table.addCell(segment.getMemoryInBytes());
table.addCell(0L);
table.addCell(segment.isCommitted());
table.addCell(segment.isSearch());
table.addCell(segment.getVersion());

View File

@ -381,7 +381,7 @@ public class RestShardsAction extends AbstractCatAction {
table.addCell(getOrNull(commonStats, CommonStats::getSearch, i -> i.getTotal().getScrollCount()));
table.addCell(getOrNull(commonStats, CommonStats::getSegments, SegmentsStats::getCount));
table.addCell(getOrNull(commonStats, CommonStats::getSegments, SegmentsStats::getMemory));
table.addCell(getOrNull(commonStats, CommonStats::getSegments, SegmentsStats::getZeroMemory));
table.addCell(getOrNull(commonStats, CommonStats::getSegments, SegmentsStats::getIndexWriterMemory));
table.addCell(getOrNull(commonStats, CommonStats::getSegments, SegmentsStats::getVersionMapMemory));
table.addCell(getOrNull(commonStats, CommonStats::getSegments, SegmentsStats::getBitsetMemory));

View File

@ -60,12 +60,7 @@ public class IndicesStatsTests extends OpenSearchSingleNodeTestCase {
createIndex("test");
IndicesStatsResponse rsp = client().admin().indices().prepareStats("test").get();
SegmentsStats stats = rsp.getTotal().getSegments();
assertEquals(0, stats.getTermsMemoryInBytes());
assertEquals(0, stats.getStoredFieldsMemoryInBytes());
assertEquals(0, stats.getTermVectorsMemoryInBytes());
assertEquals(0, stats.getNormsMemoryInBytes());
assertEquals(0, stats.getPointsMemoryInBytes());
assertEquals(0, stats.getDocValuesMemoryInBytes());
assertEquals(0, stats.getCount());
}
public void testSegmentStats() throws Exception {
@ -102,16 +97,8 @@ public class IndicesStatsTests extends OpenSearchSingleNodeTestCase {
IndicesStatsResponse rsp = client().admin().indices().prepareStats("test").get();
SegmentsStats stats = rsp.getIndex("test").getTotal().getSegments();
assertThat(stats.getTermsMemoryInBytes(), greaterThan(0L));
assertThat(stats.getStoredFieldsMemoryInBytes(), greaterThan(0L));
assertThat(stats.getTermVectorsMemoryInBytes(), greaterThan(0L));
assertThat(stats.getNormsMemoryInBytes(), greaterThan(0L));
assertThat(stats.getDocValuesMemoryInBytes(), greaterThan(0L));
if ((storeType == IndexModule.Type.MMAPFS) || (storeType == IndexModule.Type.HYBRIDFS)) {
assertEquals(0, stats.getPointsMemoryInBytes()); // bkd tree is stored off-heap
} else {
assertThat(stats.getPointsMemoryInBytes(), greaterThan(0L)); // bkd tree is stored on heap
}
// should be more than one segment since data was indexed
assertThat(stats.getCount(), greaterThan(0L));
// now check multiple segments stats are merged together
client().prepareIndex("test", "doc", "2").setSource("foo", "bar", "bar", "baz", "baz", 43).get();
@ -119,16 +106,8 @@ public class IndicesStatsTests extends OpenSearchSingleNodeTestCase {
rsp = client().admin().indices().prepareStats("test").get();
SegmentsStats stats2 = rsp.getIndex("test").getTotal().getSegments();
assertThat(stats2.getTermsMemoryInBytes(), greaterThan(stats.getTermsMemoryInBytes()));
assertThat(stats2.getStoredFieldsMemoryInBytes(), greaterThan(stats.getStoredFieldsMemoryInBytes()));
assertThat(stats2.getTermVectorsMemoryInBytes(), greaterThan(stats.getTermVectorsMemoryInBytes()));
assertThat(stats2.getNormsMemoryInBytes(), greaterThan(stats.getNormsMemoryInBytes()));
assertThat(stats2.getDocValuesMemoryInBytes(), greaterThan(stats.getDocValuesMemoryInBytes()));
if ((storeType == IndexModule.Type.MMAPFS) || (storeType == IndexModule.Type.HYBRIDFS)) {
assertEquals(0, stats2.getPointsMemoryInBytes()); // bkd tree is stored off-heap
} else {
assertThat(stats2.getPointsMemoryInBytes(), greaterThan(stats.getPointsMemoryInBytes())); // bkd tree is stored on heap
}
// stats2 should exceed stats since multiple segments stats were merged
assertThat(stats2.getCount(), greaterThan(stats.getCount()));
}
public void testCommitStats() throws Exception {

View File

@ -98,7 +98,6 @@ import org.opensearch.common.Randomness;
import org.opensearch.common.Strings;
import org.opensearch.common.TriFunction;
import org.opensearch.common.UUIDs;
import org.opensearch.common.breaker.CircuitBreaker;
import org.opensearch.common.bytes.BytesArray;
import org.opensearch.common.bytes.BytesReference;
import org.opensearch.common.collect.Tuple;
@ -6712,7 +6711,6 @@ public class InternalEngineTests extends EngineTestCase {
indexer.join();
refresher.join();
}
assertThat(engine.config().getCircuitBreakerService().getBreaker(CircuitBreaker.ACCOUNTING).getUsed(), equalTo(0L));
}
public void testPruneAwayDeletedButRetainedIds() throws Exception {

View File

@ -182,7 +182,6 @@ public class NoOpEngineTests extends EngineTestCase {
);
assertEquals(0, noOpEngine.segmentsStats(includeFileSize, false).getFileSizes().size());
assertEquals(0, noOpEngine.segmentsStats(includeFileSize, false).getMemoryInBytes());
} catch (AssertionError e) {
logger.error(config.getMergePolicy());
throw e;

View File

@ -95,7 +95,6 @@ public class SegmentTests extends OpenSearchTestCase {
segment.version = Version.LUCENE_7_0_0;
segment.compound = randomBoolean();
segment.mergeId = randomAlphaOfLengthBetween(1, 10);
segment.memoryInBytes = randomNonNegativeLong();
segment.segmentSort = randomIndexSort();
if (randomBoolean()) {
segment.attributes = Collections.singletonMap("foo", "bar");
@ -123,7 +122,6 @@ public class SegmentTests extends OpenSearchTestCase {
&& Objects.equals(seg1.version, seg2.version)
&& Objects.equals(seg1.compound, seg2.compound)
&& seg1.sizeInBytes == seg2.sizeInBytes
&& seg1.memoryInBytes == seg2.memoryInBytes
&& seg1.getGeneration() == seg2.getGeneration()
&& seg1.getName().equals(seg2.getName())
&& seg1.getMergeId().equals(seg2.getMergeId())

View File

@ -70,7 +70,6 @@ import org.opensearch.common.CheckedFunction;
import org.opensearch.common.Randomness;
import org.opensearch.common.Strings;
import org.opensearch.common.UUIDs;
import org.opensearch.common.breaker.CircuitBreaker;
import org.opensearch.common.bytes.BytesArray;
import org.opensearch.common.collect.Tuple;
import org.opensearch.common.io.stream.BytesStreamOutput;
@ -88,7 +87,6 @@ import org.opensearch.common.xcontent.NamedXContentRegistry;
import org.opensearch.common.xcontent.XContentBuilder;
import org.opensearch.common.xcontent.XContentFactory;
import org.opensearch.common.xcontent.XContentType;
import org.opensearch.core.internal.io.IOUtils;
import org.opensearch.env.NodeEnvironment;
import org.opensearch.index.IndexSettings;
import org.opensearch.index.VersionType;
@ -103,7 +101,6 @@ import org.opensearch.index.engine.EngineTestCase;
import org.opensearch.index.engine.InternalEngine;
import org.opensearch.index.engine.InternalEngineFactory;
import org.opensearch.index.engine.ReadOnlyEngine;
import org.opensearch.index.engine.SegmentsStats;
import org.opensearch.index.fielddata.FieldDataStats;
import org.opensearch.index.fielddata.IndexFieldData;
import org.opensearch.index.fielddata.IndexFieldDataCache;
@ -201,7 +198,6 @@ import static org.hamcrest.Matchers.hasToString;
import static org.hamcrest.Matchers.in;
import static org.hamcrest.Matchers.instanceOf;
import static org.hamcrest.Matchers.is;
import static org.hamcrest.Matchers.lessThan;
import static org.hamcrest.Matchers.lessThanOrEqualTo;
import static org.hamcrest.Matchers.not;
import static org.hamcrest.Matchers.notNullValue;
@ -3916,172 +3912,6 @@ public class IndexShardTests extends IndexShardTestCase {
closeShards(primary);
}
public void testSegmentMemoryTrackedInBreaker() throws Exception {
Settings settings = Settings.builder()
.put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT)
.put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1)
.put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1)
.build();
IndexMetadata metadata = IndexMetadata.builder("test")
.putMapping("_doc", "{ \"properties\": { \"foo\": { \"type\": \"text\"}}}")
.settings(settings)
.primaryTerm(0, 1)
.build();
IndexShard primary = newShard(new ShardId(metadata.getIndex(), 0), true, "n1", metadata, null);
recoverShardFromStore(primary);
indexDoc(primary, "_doc", "0", "{\"foo\" : \"foo\"}");
primary.refresh("forced refresh");
SegmentsStats ss = primary.segmentStats(randomBoolean(), randomBoolean());
CircuitBreaker breaker = primary.circuitBreakerService.getBreaker(CircuitBreaker.ACCOUNTING);
assertThat(ss.getMemoryInBytes(), equalTo(breaker.getUsed()));
final long preRefreshBytes = ss.getMemoryInBytes();
indexDoc(primary, "_doc", "1", "{\"foo\" : \"bar\"}");
indexDoc(primary, "_doc", "2", "{\"foo\" : \"baz\"}");
indexDoc(primary, "_doc", "3", "{\"foo\" : \"eggplant\"}");
ss = primary.segmentStats(randomBoolean(), randomBoolean());
breaker = primary.circuitBreakerService.getBreaker(CircuitBreaker.ACCOUNTING);
assertThat(preRefreshBytes, equalTo(breaker.getUsed()));
primary.refresh("refresh");
ss = primary.segmentStats(randomBoolean(), randomBoolean());
breaker = primary.circuitBreakerService.getBreaker(CircuitBreaker.ACCOUNTING);
assertThat(breaker.getUsed(), equalTo(ss.getMemoryInBytes()));
assertThat(breaker.getUsed(), greaterThan(preRefreshBytes));
indexDoc(primary, "_doc", "4", "{\"foo\": \"potato\"}");
indexDoc(primary, "_doc", "5", "{\"foo\": \"potato\"}");
// Forces a refresh with the INTERNAL scope
((InternalEngine) primary.getEngine()).writeIndexingBuffer();
ss = primary.segmentStats(randomBoolean(), randomBoolean());
breaker = primary.circuitBreakerService.getBreaker(CircuitBreaker.ACCOUNTING);
assertThat(breaker.getUsed(), equalTo(ss.getMemoryInBytes()));
assertThat(breaker.getUsed(), greaterThan(preRefreshBytes));
final long postRefreshBytes = ss.getMemoryInBytes();
// Deleting a doc causes its memory to be freed from the breaker
deleteDoc(primary, "_doc", "0");
// Here we are testing that a fully deleted segment should be dropped and its memory usage is freed.
// In order to instruct the merge policy not to keep a fully deleted segment,
// we need to flush and make that commit safe so that the SoftDeletesPolicy can drop everything.
primary.updateGlobalCheckpointForShard(primary.routingEntry().allocationId().getId(), primary.getLastSyncedGlobalCheckpoint());
primary.syncRetentionLeases();
primary.sync();
flushShard(primary);
primary.refresh("force refresh");
ss = primary.segmentStats(randomBoolean(), randomBoolean());
breaker = primary.circuitBreakerService.getBreaker(CircuitBreaker.ACCOUNTING);
assertThat(breaker.getUsed(), lessThan(postRefreshBytes));
closeShards(primary);
breaker = primary.circuitBreakerService.getBreaker(CircuitBreaker.ACCOUNTING);
assertThat(breaker.getUsed(), equalTo(0L));
}
public void testSegmentMemoryTrackedWithRandomSearchers() throws Exception {
Settings settings = Settings.builder()
.put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT)
.put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0)
.put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1)
.build();
IndexMetadata metadata = IndexMetadata.builder("test")
.putMapping("_doc", "{ \"properties\": { \"foo\": { \"type\": \"text\"}}}")
.settings(settings)
.primaryTerm(0, 1)
.build();
IndexShard primary = newShard(new ShardId(metadata.getIndex(), 0), true, "n1", metadata, null);
recoverShardFromStore(primary);
int threadCount = randomIntBetween(2, 4);
List<Thread> threads = new ArrayList<>(threadCount);
int iterations = randomIntBetween(10, 20);
List<Engine.Searcher> searchers = Collections.synchronizedList(new ArrayList<>());
logger.info("--> running with {} threads and {} iterations each", threadCount, iterations);
for (int threadId = 0; threadId < threadCount; threadId++) {
final String threadName = "thread-" + threadId;
Runnable r = () -> {
for (int i = 0; i < iterations; i++) {
try {
if (randomBoolean()) {
String id = "id-" + threadName + "-" + i;
logger.debug("--> {} indexing {}", threadName, id);
indexDoc(primary, "_doc", id, "{\"foo\" : \"" + randomAlphaOfLength(10) + "\"}");
}
if (randomBoolean() && i > 10) {
String id = "id-" + threadName + "-" + randomIntBetween(0, i - 1);
logger.debug("--> {}, deleting {}", threadName, id);
deleteDoc(primary, "_doc", id);
}
if (randomBoolean()) {
logger.debug("--> {} refreshing", threadName);
primary.refresh("forced refresh");
}
if (randomBoolean()) {
String searcherName = "searcher-" + threadName + "-" + i;
logger.debug("--> {} acquiring new searcher {}", threadName, searcherName);
// Acquire a new searcher, adding it to the list
searchers.add(primary.acquireSearcher(searcherName));
}
if (randomBoolean() && searchers.size() > 1) {
// Close one of the readers at random
synchronized (searchers) {
// re-check because it could have decremented after the check
if (searchers.size() > 1) {
Engine.Searcher searcher = searchers.remove(0);
logger.debug("--> {} closing searcher {}", threadName, searcher.source());
IOUtils.close(searcher);
}
}
}
} catch (Exception e) {
logger.warn("--> got exception: ", e);
fail("got an exception we didn't expect");
}
}
};
threads.add(new Thread(r, threadName));
}
threads.stream().forEach(t -> t.start());
for (Thread t : threads) {
t.join();
}
// We need to wait for all ongoing merges to complete. The reason is that during a merge the
// IndexWriter holds the core cache key open and causes the memory to be registered in the breaker
primary.forceMerge(new ForceMergeRequest().maxNumSegments(1).flush(true));
// Close remaining searchers
IOUtils.close(searchers);
primary.refresh("test");
SegmentsStats ss = primary.segmentStats(randomBoolean(), randomBoolean());
CircuitBreaker breaker = primary.circuitBreakerService.getBreaker(CircuitBreaker.ACCOUNTING);
long segmentMem = ss.getMemoryInBytes();
long breakerMem = breaker.getUsed();
logger.info("--> comparing segmentMem: {} - breaker: {} => {}", segmentMem, breakerMem, segmentMem == breakerMem);
assertThat(segmentMem, equalTo(breakerMem));
// Close shard
closeShards(primary);
// Check that the breaker was successfully reset to 0, meaning that all the accounting was correctly applied
breaker = primary.circuitBreakerService.getBreaker(CircuitBreaker.ACCOUNTING);
assertThat(breaker.getUsed(), equalTo(0L));
}
public void testOnCloseStats() throws IOException {
final IndexShard indexShard = newStartedShard(true);

View File

@ -222,14 +222,6 @@ public final class ExternalTestCluster extends TestCluster {
stats.getBreaker().getStats(CircuitBreaker.FIELDDATA).getEstimated(),
equalTo(0L)
);
assertThat(
"Accounting breaker not reset to "
+ stats.getIndices().getSegments().getMemoryInBytes()
+ " on node: "
+ stats.getNode(),
stats.getBreaker().getStats(CircuitBreaker.ACCOUNTING).getEstimated(),
equalTo(stats.getIndices().getSegments().getMemoryInBytes())
);
// ExternalTestCluster does not check the request breaker,
// because checking it requires a network request, which in
// turn increments the breaker, making it non-0