[Javadocs] add to o.o.index and indices (#3209)

Adds javadocs to org.opensearch.index and indices packages.

Signed-off-by: Nicholas Walter Knize <nknize@apache.org>
This commit is contained in:
Nick Knize 2022-05-05 17:06:48 -05:00 committed by GitHub
parent 2fe2e37a44
commit 7b75fb425f
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
559 changed files with 1896 additions and 28 deletions

View File

@ -36,6 +36,11 @@ import org.apache.logging.log4j.Logger;
import org.opensearch.common.logging.DeprecationLogger;
import org.opensearch.common.logging.Loggers;
/**
* A base class for new index components
*
* @opensearch.internal
*/
public abstract class AbstractIndexComponent implements IndexComponent {
protected final Logger logger;

View File

@ -51,6 +51,8 @@ import java.util.List;
/**
* A composite {@link IndexEventListener} that forwards all callbacks to an immutable list of IndexEventListener
*
* @opensearch.internal
*/
final class CompositeIndexEventListener implements IndexEventListener {

View File

@ -47,6 +47,8 @@ import java.util.Objects;
/**
* A value class representing the basic required properties of an OpenSearch index.
*
* @opensearch.internal
*/
public class Index implements Writeable, ToXContentObject {

View File

@ -32,6 +32,11 @@
package org.opensearch.index;
/**
* Actions that can be executed on an Index Component
*
* @opensearch.internal
*/
public interface IndexComponent {
Index index();

View File

@ -109,6 +109,8 @@ import java.util.function.Function;
* <li>Settings update listener - Custom settings update listener can be registered via
* {@link #addSettingsUpdateConsumer(Setting, Consumer)}</li>
* </ul>
*
* @opensearch.internal
*/
public final class IndexModule {

View File

@ -36,6 +36,11 @@ import org.opensearch.common.io.stream.StreamInput;
import java.io.IOException;
/**
* Exception thrown if an index is not found
*
* @opensearch.internal
*/
public final class IndexNotFoundException extends ResourceNotFoundException {
/**
* Construct with a custom message.

View File

@ -122,6 +122,11 @@ import static java.util.Collections.emptyMap;
import static java.util.Collections.unmodifiableMap;
import static org.opensearch.common.collect.MapBuilder.newMapBuilder;
/**
* The main OpenSearch index service
*
* @opensearch.internal
*/
public class IndexService extends AbstractIndexComponent implements IndicesClusterStateService.AllocatedIndex<IndexShard> {
private final IndexEventListener eventListener;

View File

@ -69,6 +69,8 @@ import static org.opensearch.index.mapper.MapperService.INDEX_MAPPING_TOTAL_FIEL
* the latest updated settings instance. Classes that need to listen to settings updates can register
* a settings consumer at index creation via {@link IndexModule#addSettingsUpdateConsumer(Setting, Consumer)} that will
* be called for each settings update.
*
* @opensearch.internal
*/
public final class IndexSettings {
public static final Setting<List<String>> DEFAULT_FIELD_SETTING = Setting.listSetting(

View File

@ -69,6 +69,7 @@ import java.util.function.Supplier;
* </li>
* </ul>
*
* @opensearch.internal
**/
public final class IndexSortConfig {
/**

View File

@ -54,6 +54,11 @@ import java.util.concurrent.CountDownLatch;
import java.util.concurrent.Executor;
import java.util.concurrent.TimeUnit;
/**
* The main opensearch index warmer
*
* @opensearch.internal
*/
public final class IndexWarmer {
private static final Logger logger = LogManager.getLogger(IndexWarmer.class);

View File

@ -45,6 +45,11 @@ import org.opensearch.index.stats.IndexingPressureStats;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicLong;
/**
* Tracks indexing memory pressure
*
* @opensearch.internal
*/
public class IndexingPressure {
public static final Setting<ByteSizeValue> MAX_INDEXING_BYTES = Setting.memorySizeSetting(

View File

@ -18,6 +18,8 @@ import java.util.function.LongSupplier;
/**
* Sets up classes for node/shard level indexing pressure.
* Provides abstraction and orchestration for indexing pressure interfaces when called from Transport Actions or for Stats.
*
* @opensearch.internal
*/
public class IndexingPressureService {

View File

@ -55,6 +55,11 @@ import java.util.Locale;
import java.util.Map;
import java.util.concurrent.TimeUnit;
/**
* The indexing slowlog implementation
*
* @opensearch.internal
*/
public final class IndexingSlowLog implements IndexingOperationListener {
public static final String INDEX_INDEXING_SLOWLOG_PREFIX = "index.indexing.slowlog";
public static final Setting<TimeValue> INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_WARN_SETTING = Setting.timeSetting(

View File

@ -121,6 +121,8 @@ import org.opensearch.common.unit.ByteSizeValue;
* indices segments API to see the segments that an index has, and
* possibly either increase the <code>max_merged_segment</code> or issue an optimize
* call for the index (try and aim to issue it on a low traffic time).
*
* @opensearch.internal
*/
public final class MergePolicyConfig {

View File

@ -63,6 +63,8 @@ import org.opensearch.common.util.concurrent.OpenSearchExecutors;
* throttled, while an application doing heavy indexing will see the throttle
* move higher to allow merges to keep up with ongoing indexing.
* </ul>
*
* @opensearch.internal
*/
public final class MergeSchedulerConfig {

View File

@ -44,6 +44,8 @@ import java.util.Map;
* Wrapper around {@link TieredMergePolicy} which doesn't respect
* {@link TieredMergePolicy#setMaxMergedSegmentMB(double)} on forced merges.
* See https://issues.apache.org/jira/browse/LUCENE-7976.
*
* @opensearch.internal
*/
final class OpenSearchTieredMergePolicy extends FilterMergePolicy {

View File

@ -53,6 +53,11 @@ import java.util.Map;
import java.util.concurrent.TimeUnit;
import java.util.stream.Stream;
/**
* The search time slow log implementation
*
* @opensearch.internal
*/
public final class SearchSlowLog implements SearchOperationListener {
private static final Charset UTF_8 = Charset.forName("UTF-8");

View File

@ -39,6 +39,8 @@ import java.util.concurrent.atomic.AtomicBoolean;
* 1. Memory Accounting at shard level. This can be enabled/disabled based on dynamic setting.
* 2. Memory Accounting at Node level. Tracking is done using the IndexingPressure artefacts to support feature seamless toggling.
* 3. Interfaces to access the statistics for shard trackers.
*
* @opensearch.internal
*/
public class ShardIndexingPressure extends IndexingPressure {

View File

@ -52,6 +52,7 @@ import java.util.function.ToLongFunction;
* goes above operating_factor.upper of current shard limits. MemoryManager attempts to update the new shard limit such that the new value
* remains withing the operating_factor.optimal range of current shard utilization.
*
* @opensearch.internal
*/
public class ShardIndexingPressureMemoryManager {
private static final Logger logger = LogManager.getLogger(ShardIndexingPressureMemoryManager.class);

View File

@ -14,6 +14,8 @@ import org.opensearch.common.settings.Settings;
* This class contains all the settings which are required and owned by {TODO link ShardIndexingPressure}. These will be
* referenced/used in ShardIndexingPressure, as well as its dependent components, i.e.
* {TODO link ShardIndexingPressureMemoryManager} and {TODO link ShardIndexingPressureStore}
*
* @opensearch.internal
*/
public final class ShardIndexingPressureSettings {

View File

@ -44,6 +44,7 @@ import static java.util.Objects.isNull;
* update and evict operations can be abstracted out to support any other strategy such as LRU, if
* discovered a need later.
*
* @opensearch.internal
*/
public class ShardIndexingPressureStore {

View File

@ -31,6 +31,8 @@ import java.util.concurrent.atomic.AtomicLong;
* increase the complexity of handling shard-lister events and handling other race scenarios such as request-draining etc.
* To prefer simplicity we have modelled by keeping explicit fields for different operation tracking, while tracker by itself is
* agnostic of the actual shard role.
*
* @opensearch.internal
*/
public class ShardIndexingPressureTracker {

View File

@ -33,6 +33,11 @@ package org.opensearch.index;
import java.util.Locale;
/**
* Levels for the slow logs
*
* @opensearch.internal
*/
public enum SlowLogLevel {
WARN(3), // most specific - little logging
INFO(2),

View File

@ -39,6 +39,11 @@ import org.opensearch.common.lucene.uid.Versions;
import java.io.IOException;
import java.util.Locale;
/**
* Types of index versions
*
* @opensearch.internal
*/
public enum VersionType implements Writeable {
INTERNAL((byte) 0) {
@Override

View File

@ -35,6 +35,11 @@ package org.opensearch.index.analysis;
import org.opensearch.index.AbstractIndexComponent;
import org.opensearch.index.IndexSettings;
/**
* Base character filter factory used in analysis chain
*
* @opensearch.internal
*/
public abstract class AbstractCharFilterFactory extends AbstractIndexComponent implements CharFilterFactory {
private final String name;

View File

@ -37,6 +37,11 @@ import org.opensearch.common.settings.Settings;
import org.opensearch.index.AbstractIndexComponent;
import org.opensearch.index.IndexSettings;
/**
* Base analyzer component
*
* @opensearch.internal
*/
public abstract class AbstractIndexAnalyzerProvider<T extends Analyzer> extends AbstractIndexComponent implements AnalyzerProvider<T> {
private final String name;

View File

@ -36,6 +36,11 @@ import org.opensearch.common.settings.Settings;
import org.opensearch.index.AbstractIndexComponent;
import org.opensearch.index.IndexSettings;
/**
* Base tokenfilter factory used in analysis chain
*
* @opensearch.internal
*/
public abstract class AbstractTokenFilterFactory extends AbstractIndexComponent implements TokenFilterFactory {
private final String name;

View File

@ -36,6 +36,11 @@ import org.opensearch.common.settings.Settings;
import org.opensearch.index.AbstractIndexComponent;
import org.opensearch.index.IndexSettings;
/**
* Base tokenizer factory used in analysis chain
*
* @opensearch.internal
*/
public abstract class AbstractTokenizerFactory extends AbstractIndexComponent implements TokenizerFactory {
private final String name;

View File

@ -88,6 +88,11 @@ import java.util.Set;
import static java.util.Collections.unmodifiableMap;
/**
* Core analysis class
*
* @opensearch.internal
*/
public class Analysis {
public static CharArraySet parseStemExclusion(Settings settings, CharArraySet defaultStemExclusion) {

View File

@ -36,6 +36,8 @@ package org.opensearch.index.analysis;
* Enum representing the mode in which token filters and analyzers are allowed to operate.
* While most token filters are allowed both in index and search time analyzers, some are
* restricted to be used only at index time, others at search time.
*
* @opensearch.internal
*/
public enum AnalysisMode {

View File

@ -67,6 +67,8 @@ import static java.util.Collections.unmodifiableMap;
/**
* An internal registry for tokenizer, token filter, char filter and analyzer.
* This class exists per node and allows to create per-index {@link IndexAnalyzers} via {@link #build(IndexSettings)}
*
* @opensearch.internal
*/
public final class AnalysisRegistry implements Closeable {
public static final String INDEX_ANALYSIS_CHAR_FILTER = "index.analysis.char_filter";

View File

@ -41,6 +41,8 @@ import java.util.Map;
/**
* A class that groups analysis components necessary to produce a custom analyzer.
* See {@link ReloadableCustomAnalyzer} for an example usage.
*
* @opensearch.internal
*/
public final class AnalyzerComponents {

View File

@ -34,6 +34,8 @@ package org.opensearch.index.analysis;
/**
* Analyzers that provide access to their token filters should implement this
*
* @opensearch.internal
*/
public interface AnalyzerComponentsProvider {

View File

@ -35,6 +35,11 @@ package org.opensearch.index.analysis;
import org.apache.lucene.analysis.Analyzer;
import org.opensearch.common.inject.Provider;
/**
* Base interface for all analyzer providers
*
* @opensearch.internal
*/
public interface AnalyzerProvider<T extends Analyzer> extends Provider<T> {
String name();

View File

@ -32,6 +32,11 @@
package org.opensearch.index.analysis;
/**
* Enum to identify the scope of an analyzer
*
* @opensearch.internal
*/
public enum AnalyzerScope {
INDEX,
INDICES,

View File

@ -34,6 +34,11 @@ package org.opensearch.index.analysis;
import java.io.Reader;
/**
* Base character filter factory behavior used in analysis chain
*
* @opensearch.internal
*/
public interface CharFilterFactory {
String name();

View File

@ -39,6 +39,11 @@ import org.opensearch.common.util.CollectionUtils;
import java.io.Reader;
/**
* Custom analyzer chain
*
* @opensearch.internal
*/
public final class CustomAnalyzer extends Analyzer implements AnalyzerComponentsProvider {
private final AnalyzerComponents components;

View File

@ -44,6 +44,8 @@ import static org.opensearch.index.analysis.AnalyzerComponents.createComponents;
/**
* A custom analyzer that is built out of a single {@link org.apache.lucene.analysis.Tokenizer} and a list
* of {@link org.apache.lucene.analysis.TokenFilter}s.
*
* @opensearch.internal
*/
public class CustomAnalyzerProvider extends AbstractIndexAnalyzerProvider<Analyzer> {

View File

@ -43,6 +43,8 @@ import java.util.Map;
* A custom normalizer that is built out of a char and token filters. On the
* contrary to analyzers, it does not support tokenizers and only supports a
* subset of char and token filters.
*
* @opensearch.internal
*/
public final class CustomNormalizerProvider extends AbstractIndexAnalyzerProvider<CustomAnalyzer> {

View File

@ -38,6 +38,11 @@ import org.opensearch.common.collect.CopyOnWriteHashMap;
import java.util.Map;
/**
* Analysis chain for field names
*
* @opensearch.internal
*/
public final class FieldNameAnalyzer extends DelegatingAnalyzerWrapper {
private final Map<String, Analyzer> analyzers;

View File

@ -40,6 +40,11 @@ import org.opensearch.indices.analysis.HunspellService;
import java.util.Locale;
/**
* The token filter factory for the hunspell analyzer
*
* @opensearch.internal
*/
public class HunspellTokenFilterFactory extends AbstractTokenFilterFactory {
private final Dictionary dictionary;

View File

@ -51,6 +51,8 @@ import static org.opensearch.index.analysis.AnalysisRegistry.DEFAULT_SEARCH_QUOT
* access to individual tokenizers, char or token filter.
*
* @see AnalysisRegistry
*
* @opensearch.internal
*/
public final class IndexAnalyzers implements Closeable {
private final Map<String, NamedAnalyzer> analyzers;

View File

@ -37,6 +37,11 @@ import org.opensearch.common.settings.Settings;
import org.opensearch.env.Environment;
import org.opensearch.index.IndexSettings;
/**
* Provider for the keyword analyzer
*
* @opensearch.internal
*/
public class KeywordAnalyzerProvider extends AbstractIndexAnalyzerProvider<KeywordAnalyzer> {
private final KeywordAnalyzer keywordAnalyzer;

View File

@ -38,7 +38,11 @@ import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.analysis.core.KeywordTokenizer;
/** Normalizer used to lowercase values */
/**
* Normalizer used to lowercase values
*
* @opensearch.internal
*/
public final class LowercaseNormalizer extends Analyzer {
@Override

View File

@ -38,6 +38,8 @@ import org.opensearch.index.IndexSettings;
/**
* Builds an analyzer for normalization that lowercases terms.
*
* @opensearch.internal
*/
public class LowercaseNormalizerProvider extends AbstractIndexAnalyzerProvider<LowercaseNormalizer> {

View File

@ -45,6 +45,11 @@ import java.io.IOException;
import java.util.Map;
import java.util.Objects;
/**
* Provides the name and settings for an analyzer
*
* @opensearch.internal
*/
public class NameOrDefinition implements Writeable, ToXContentFragment {
// exactly one of these two members is not null
public final String name;

View File

@ -43,6 +43,8 @@ import java.util.Objects;
/**
* Named analyzer is an analyzer wrapper around an actual analyzer ({@link #analyzer} that is associated
* with a name ({@link #name()}.
*
* @opensearch.internal
*/
public class NamedAnalyzer extends DelegatingAnalyzerWrapper {

View File

@ -39,6 +39,8 @@ import java.io.Reader;
*
* The default implementation of {@link #normalize(Reader)} delegates to
* {@link #create(Reader)}
*
* @opensearch.internal
*/
public interface NormalizingCharFilterFactory extends CharFilterFactory {

View File

@ -39,6 +39,8 @@ import org.apache.lucene.analysis.TokenStream;
*
* The default implementation delegates {@link #normalize(TokenStream)} to
* {@link #create(TokenStream)}}.
*
* @opensearch.internal
*/
public interface NormalizingTokenFilterFactory extends TokenFilterFactory {

View File

@ -34,6 +34,11 @@ package org.opensearch.index.analysis;
import org.apache.lucene.analysis.Analyzer;
/**
* Provider for all prebuilt analyzers
*
* @opensearch.internal
*/
public class PreBuiltAnalyzerProvider implements AnalyzerProvider<NamedAnalyzer> {
private final NamedAnalyzer analyzer;

View File

@ -49,6 +49,11 @@ import java.util.function.Function;
import java.util.function.Supplier;
import java.util.stream.Collectors;
/**
* Factory to create the providers for all prebuilt analyzers
*
* @opensearch.internal
*/
public class PreBuiltAnalyzerProviderFactory extends PreConfiguredAnalysisComponent<AnalyzerProvider<?>> implements Closeable {
private final Function<Version, Analyzer> create;

View File

@ -43,6 +43,8 @@ import java.io.IOException;
/**
* Shared implementation for pre-configured analysis components.
*
* @opensearch.internal
*/
public abstract class PreConfiguredAnalysisComponent<T> implements AnalysisModule.AnalysisProvider<T> {
protected final String name;

View File

@ -43,6 +43,8 @@ import java.util.function.Function;
/**
* Provides pre-configured, shared {@link CharFilter}s.
*
* @opensearch.internal
*/
public class PreConfiguredCharFilter extends PreConfiguredAnalysisComponent<CharFilterFactory> {
/**

View File

@ -43,6 +43,8 @@ import java.util.function.Function;
/**
* Provides pre-configured, shared {@link TokenFilter}s.
*
* @opensearch.internal
*/
public final class PreConfiguredTokenFilter extends PreConfiguredAnalysisComponent<TokenFilterFactory> {
/**

View File

@ -42,6 +42,8 @@ import java.util.function.Supplier;
/**
* Provides pre-configured, shared {@link Tokenizer}s.
*
* @opensearch.internal
*/
public final class PreConfiguredTokenizer extends PreConfiguredAnalysisComponent<TokenizerFactory> {
/**

View File

@ -42,6 +42,11 @@ import org.opensearch.common.util.CollectionUtils;
import java.io.Reader;
import java.util.Map;
/**
* A custom analyzer that is reloadable
*
* @opensearch.internal
*/
public final class ReloadableCustomAnalyzer extends Analyzer implements AnalyzerComponentsProvider {
private volatile AnalyzerComponents components;

View File

@ -41,6 +41,11 @@ import org.opensearch.common.settings.Settings;
import org.opensearch.env.Environment;
import org.opensearch.index.IndexSettings;
/**
* Factory for shingle analyzer token filters
*
* @opensearch.internal
*/
public class ShingleTokenFilterFactory extends AbstractTokenFilterFactory {
private static final DeprecationLogger DEPRECATION_LOGGER = DeprecationLogger.getLogger(ShingleTokenFilterFactory.class);

View File

@ -37,6 +37,11 @@ import org.opensearch.common.settings.Settings;
import org.opensearch.env.Environment;
import org.opensearch.index.IndexSettings;
/**
* Provider class for the simple analyzer
*
* @opensearch.internal
*/
public class SimpleAnalyzerProvider extends AbstractIndexAnalyzerProvider<SimpleAnalyzer> {
private final SimpleAnalyzer simpleAnalyzer;

View File

@ -38,6 +38,11 @@ import org.opensearch.common.settings.Settings;
import org.opensearch.env.Environment;
import org.opensearch.index.IndexSettings;
/**
* Provider for the standard analyzer
*
* @opensearch.internal
*/
public class StandardAnalyzerProvider extends AbstractIndexAnalyzerProvider<StandardAnalyzer> {
private final StandardAnalyzer standardAnalyzer;

View File

@ -39,6 +39,11 @@ import org.opensearch.common.settings.Settings;
import org.opensearch.env.Environment;
import org.opensearch.index.IndexSettings;
/**
* Tokenizer factory for the standard analyzer
*
* @opensearch.internal
*/
public class StandardTokenizerFactory extends AbstractTokenizerFactory {
private final int maxTokenLength;

View File

@ -39,6 +39,11 @@ import org.opensearch.common.settings.Settings;
import org.opensearch.env.Environment;
import org.opensearch.index.IndexSettings;
/**
* Provider class for the stop word analyzer
*
* @opensearch.internal
*/
public class StopAnalyzerProvider extends AbstractIndexAnalyzerProvider<StopAnalyzer> {
private final StopAnalyzer stopAnalyzer;

View File

@ -43,6 +43,11 @@ import org.opensearch.index.IndexSettings;
import java.util.Set;
/**
* Token filter factory for the stop word analyzer
*
* @opensearch.internal
*/
public class StopTokenFilterFactory extends AbstractTokenFilterFactory {
private final CharArraySet stopWords;

View File

@ -39,6 +39,11 @@ import org.opensearch.search.fetch.subphase.highlight.FastVectorHighlighter;
import java.util.List;
import java.util.function.Function;
/**
* Base token filter factory used in analysis chain
*
* @opensearch.internal
*/
public interface TokenFilterFactory {
String name();

View File

@ -36,6 +36,11 @@ import org.apache.lucene.analysis.Tokenizer;
import java.util.function.Supplier;
/**
* Base tokenizer factory used in analysis chain
*
* @opensearch.internal
*/
public interface TokenizerFactory {
String name();

View File

@ -37,6 +37,11 @@ import org.opensearch.common.settings.Settings;
import org.opensearch.env.Environment;
import org.opensearch.index.IndexSettings;
/**
* Provider class for the whitespace analyzer
*
* @opensearch.internal
*/
public class WhitespaceAnalyzerProvider extends AbstractIndexAnalyzerProvider<WhitespaceAnalyzer> {
private final WhitespaceAnalyzer analyzer;

View File

@ -41,6 +41,11 @@ import org.opensearch.index.cache.query.QueryCache;
import java.io.Closeable;
import java.io.IOException;
/**
* Wrapping class for the index cache
*
* @opensearch.internal
*/
public class IndexCache extends AbstractIndexComponent implements Closeable {
private final QueryCache queryCache;

View File

@ -84,6 +84,8 @@ import java.util.concurrent.Executor;
* Use this cache with care, only components that require that a filter is to be materialized as a {@link BitDocIdSet}
* and require that it should always be around should use this cache, otherwise the
* {@link org.opensearch.index.cache.query.QueryCache} should be used instead.
*
* @opensearch.internal
*/
public final class BitsetFilterCache extends AbstractIndexComponent
implements

View File

@ -37,6 +37,11 @@ import org.opensearch.index.IndexSettings;
import org.opensearch.index.shard.AbstractIndexShardComponent;
import org.opensearch.index.shard.ShardId;
/**
* Bitset Filter Cache for shards
*
* @opensearch.internal
*/
public class ShardBitsetFilterCache extends AbstractIndexShardComponent {
private final CounterMetric totalMetric = new CounterMetric();

View File

@ -37,6 +37,11 @@ import org.apache.lucene.search.Weight;
import org.opensearch.index.AbstractIndexComponent;
import org.opensearch.index.IndexSettings;
/**
* NoOp Implementation for a Disabled query cache
*
* @opensearch.internal
*/
public class DisabledQueryCache extends AbstractIndexComponent implements QueryCache {
public DisabledQueryCache(IndexSettings indexSettings) {

View File

@ -42,6 +42,8 @@ import org.opensearch.indices.IndicesQueryCache;
/**
* The index-level query cache. This class mostly delegates to the node-level
* query cache: {@link IndicesQueryCache}.
*
* @opensearch.internal
*/
public class IndexQueryCache extends AbstractIndexComponent implements QueryCache {

View File

@ -36,6 +36,11 @@ import org.opensearch.index.IndexComponent;
import java.io.Closeable;
/**
* Base interface for a query cache
*
* @opensearch.internal
*/
public interface QueryCache extends IndexComponent, Closeable, org.apache.lucene.search.QueryCache {
void clear(String reason);

View File

@ -43,6 +43,11 @@ import org.opensearch.common.xcontent.XContentBuilder;
import java.io.IOException;
/**
* Stats for the query cache
*
* @opensearch.internal
*/
public class QueryCacheStats implements Writeable, ToXContentFragment {
private long ramBytesUsed;

View File

@ -41,6 +41,11 @@ import org.opensearch.common.xcontent.XContentBuilder;
import java.io.IOException;
/**
* Request for the query cache statistics
*
* @opensearch.internal
*/
public class RequestCacheStats implements Writeable, ToXContentFragment {
private long memorySize;

View File

@ -38,6 +38,8 @@ import org.opensearch.common.metrics.CounterMetric;
/**
* Tracks the portion of the request cache in use for a particular shard.
*
* @opensearch.internal
*/
public final class ShardRequestCache {

View File

@ -47,6 +47,8 @@ import java.util.Map;
* codec layer that allows to use use-case specific file formats &amp;
* data-structures per field. OpenSearch exposes the full
* {@link Codec} capabilities through this {@link CodecService}.
*
* @opensearch.internal
*/
public class CodecService {

View File

@ -17,6 +17,8 @@ import java.util.Objects;
/**
* The configuration parameters necessary for the {@link CodecService} instance construction.
*
* @opensearch.internal
*/
public final class CodecServiceConfig {
private final IndexSettings indexSettings;

View File

@ -10,6 +10,8 @@ package org.opensearch.index.codec;
/**
* A factory for creating new {@link CodecService} instance
*
* @opensearch.internal
*/
@FunctionalInterface
public interface CodecServiceFactory {

View File

@ -50,6 +50,8 @@ import org.opensearch.index.mapper.MapperService;
* allows users to change the low level postings format for individual fields
* per index in real time via the mapping API. If no specific postings format is
* configured for a specific field the default postings format is used.
*
* @opensearch.internal
*/
public class PerFieldMappingPostingFormatCodec extends Lucene91Codec {
private final Logger logger;

View File

@ -56,6 +56,8 @@ import java.util.function.LongSupplier;
* <p>
* In particular, this policy will delete index commits whose max sequence number is at most
* the current global checkpoint except the index commit which has the highest max sequence number among those.
*
* @opensearch.internal
*/
public class CombinedDeletionPolicy extends IndexDeletionPolicy {
private final Logger logger;

View File

@ -41,6 +41,11 @@ import org.opensearch.index.mapper.VersionFieldMapper;
import java.io.IOException;
import java.util.Objects;
/**
* Doc Values used in LuceneChangesSnapshot for recovery
*
* @opensearch.internal
*/
final class CombinedDocValues {
private final NumericDocValues versionDV;
private final NumericDocValues seqNoDV;

View File

@ -44,7 +44,11 @@ import java.io.IOException;
import java.util.Base64;
import java.util.Map;
/** a class the returns dynamic information with respect to the last commit point of this shard */
/**
* a class the returns dynamic information with respect to the last commit point of this shard
*
* @opensearch.internal
*/
public final class CommitStats implements Writeable, ToXContentFragment {
private final Map<String, String> userData;

View File

@ -49,6 +49,11 @@ import org.opensearch.search.suggest.completion.CompletionStats;
import java.util.function.Supplier;
/**
* Cache to store engine completion stats
*
* @opensearch.internal
*/
class CompletionStatsCache implements ReferenceManager.RefreshListener {
private final Supplier<Engine.Searcher> searcherSupplier;

View File

@ -34,7 +34,11 @@ package org.opensearch.index.engine;
import org.apache.lucene.util.RamUsageEstimator;
/** Holds a deleted version, which just adds a timestamp to {@link VersionValue} so we know when we can expire the deletion. */
/**
* Holds a deleted version, which just adds a timestamp to {@link VersionValue} so we know when we can expire the deletion.
*
* @opensearch.internal
*/
final class DeleteVersionValue extends VersionValue {

View File

@ -37,6 +37,11 @@ import org.opensearch.rest.RestStatus;
import java.io.IOException;
/**
* Exception thrown if a document is missing
*
* @opensearch.internal
*/
public class DocumentMissingException extends EngineException {
public DocumentMissingException(ShardId shardId, String id) {

View File

@ -37,6 +37,11 @@ import org.opensearch.rest.RestStatus;
import java.io.IOException;
/**
* Exception thrown if a document source is missing
*
* @opensearch.internal
*/
public class DocumentSourceMissingException extends EngineException {
public DocumentSourceMissingException(ShardId shardId, String id) {

View File

@ -112,6 +112,11 @@ import java.util.stream.Stream;
import static org.opensearch.index.seqno.SequenceNumbers.UNASSIGNED_PRIMARY_TERM;
import static org.opensearch.index.seqno.SequenceNumbers.UNASSIGNED_SEQ_NO;
/**
* Base OpenSearch Engine class
*
* @opensearch.internal
*/
public abstract class Engine implements Closeable {
public static final String SYNC_COMMIT_ID = "sync_id"; // TODO: remove sync_id in 3.0

View File

@ -62,10 +62,12 @@ import java.util.Objects;
import java.util.function.LongSupplier;
import java.util.function.Supplier;
/*
/**
* Holds all the configuration that is used to create an {@link Engine}.
* Once {@link Engine} has been created with this object, changes to this
* object will affect the {@link Engine} instance.
*
* @opensearch.internal
*/
public final class EngineConfig {
private final ShardId shardId;

View File

@ -42,6 +42,8 @@ import java.util.function.Supplier;
/**
* A factory to create an EngineConfig based on custom plugin overrides
*
* @opensearch.internal
*/
public class EngineConfigFactory {
private final CodecServiceFactory codecServiceFactory;

View File

@ -40,7 +40,7 @@ import java.io.IOException;
/**
* An exception indicating that an {@link Engine} creation failed.
*
*
* @opensearch.internal
*/
public class EngineCreationFailureException extends EngineException {

View File

@ -38,6 +38,11 @@ import org.opensearch.index.shard.ShardId;
import java.io.IOException;
/**
* Exception if there are any errors in the engine
*
* @opensearch.internal
*/
public class EngineException extends OpenSearchException {
public EngineException(ShardId shardId, String msg, Object... params) {

View File

@ -33,6 +33,8 @@ package org.opensearch.index.engine;
/**
* Simple Engine Factory
*
* @opensearch.internal
*/
@FunctionalInterface
public interface EngineFactory {

View File

@ -37,6 +37,11 @@ import org.opensearch.index.shard.ShardId;
import java.io.IOException;
/**
* Exception thrown if there is an error on flush
*
* @opensearch.internal
*/
public class FlushFailedEngineException extends EngineException {
public FlushFailedEngineException(ShardId shardId, Throwable t) {

View File

@ -37,6 +37,11 @@ import org.opensearch.index.translog.Translog;
import java.util.Objects;
/**
* Encapsulates an Index Version in the translog
*
* @opensearch.internal
*/
final class IndexVersionValue extends VersionValue {
private static final long RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance(IndexVersionValue.class);

View File

@ -136,6 +136,11 @@ import java.util.function.LongSupplier;
import java.util.stream.Collectors;
import java.util.stream.Stream;
/**
* The default internal engine (can be overridden by plugins)
*
* @opensearch.internal
*/
public class InternalEngine extends Engine {
/**

View File

@ -32,6 +32,11 @@
package org.opensearch.index.engine;
/**
* Factory for creating an in InternalEngine instance
*
* @opensearch.internal
*/
public class InternalEngineFactory implements EngineFactory {
@Override
public Engine newReadWriteEngine(EngineConfig config) {

View File

@ -46,7 +46,11 @@ import java.util.Collections;
import java.util.Map;
import java.util.concurrent.atomic.AtomicLong;
/** Maps _uid value to its version information. */
/**
* Maps _uid value to its version information.
*
* @opensearch.internal
*/
final class LiveVersionMap implements ReferenceManager.RefreshListener, Accountable {
private final KeyedLock<BytesRef> keyedLock = new KeyedLock<>();

View File

@ -64,6 +64,8 @@ import java.util.concurrent.atomic.AtomicBoolean;
/**
* A {@link Translog.Snapshot} from changes in a Lucene index
*
* @opensearch.internal
*/
final class LuceneChangesSnapshot implements Translog.Snapshot {
static final int DEFAULT_BATCH_SIZE = 1024;

View File

@ -35,6 +35,8 @@ package org.opensearch.index.engine;
/**
* Exception indicating that not all requested operations from {@link LuceneChangesSnapshot}
* are available.
*
* @opensearch.internal
*/
public final class MissingHistoryOperationsException extends IllegalStateException {

View File

@ -61,6 +61,8 @@ import java.util.function.Function;
* index, get), throw {@link UnsupportedOperationException}. However, NoOpEngine
* allows to trim any existing translog files through the usage of the
* {{@link #trimUnreferencedTranslogFiles()}} method.
*
* @opensearch.internal
*/
public final class NoOpEngine extends ReadOnlyEngine {

View File

@ -59,6 +59,8 @@ import java.util.Set;
/**
* An extension to the {@link ConcurrentMergeScheduler} that provides tracking on merge times, total
* and current merges.
*
* @opensearch.internal
*/
class OpenSearchConcurrentMergeScheduler extends ConcurrentMergeScheduler {

Some files were not shown because too many files have changed in this diff Show More