[Javadocs] add to o.o.dfs,fetch,internal,lookup,profile, and query packages (#3261)

Adds class level javadocs to org.opensearch.dfs, fetch, internal, lookup,
profile, and query packages.

Signed-off-by: Nicholas Walter Knize <nknize@apache.org>
This commit is contained in:
Nick Knize 2022-05-09 14:05:59 -05:00 committed by GitHub
parent 625623f932
commit 1118dcf372
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
118 changed files with 418 additions and 3 deletions

View File

@ -44,6 +44,11 @@ import org.opensearch.common.io.stream.Writeable;
import java.io.IOException;
/**
* Compute global distributed frequency across the index
*
* @opensearch.internal
*/
public class AggregatedDfs implements Writeable {
private ObjectObjectHashMap<Term, TermStatistics> termStatistics;

View File

@ -51,6 +51,8 @@ import java.util.Map;
/**
* Dfs phase of a search request, used to make scoring 100% accurate by collecting additional info from each shard before the query phase.
* The additional information is used to better compare the scores coming from all the shards, which depend on local factors (e.g. idf)
*
* @opensearch.internal
*/
public class DfsPhase {

View File

@ -38,6 +38,11 @@ import org.opensearch.search.SearchShardTarget;
import java.io.IOException;
/**
* Thrown if there are any errors in the DFS phase
*
* @opensearch.internal
*/
public class DfsPhaseExecutionException extends SearchException {
public DfsPhaseExecutionException(SearchShardTarget shardTarget, String msg, Throwable t) {

View File

@ -49,6 +49,11 @@ import org.opensearch.search.internal.ShardSearchRequest;
import java.io.IOException;
/**
* Result from a Distributed Frequency Search phase
*
* @opensearch.internal
*/
public class DfsSearchResult extends SearchPhaseResult {
private static final Term[] EMPTY_TERMS = new Term[0];

View File

@ -57,6 +57,8 @@ import java.util.List;
/**
* Encapsulates state required to execute fetch phases
*
* @opensearch.internal
*/
public class FetchContext {

View File

@ -90,6 +90,8 @@ import static java.util.Collections.emptyMap;
/**
* Fetch phase of a search request, used to fetch the actual top matching documents to be returned to the client, identified
* after reducing all of the matches returned by the query phase
*
* @opensearch.internal
*/
public class FetchPhase {
private static final Logger LOGGER = LogManager.getLogger(FetchPhase.class);

View File

@ -38,6 +38,11 @@ import org.opensearch.search.SearchShardTarget;
import java.io.IOException;
/**
* Thrown when there are errors during the fetch phase
*
* @opensearch.internal
*/
public class FetchPhaseExecutionException extends SearchException {
public FetchPhaseExecutionException(SearchShardTarget shardTarget, String msg, Throwable t) {

View File

@ -43,6 +43,11 @@ import org.opensearch.search.query.QuerySearchResult;
import java.io.IOException;
/**
* Result from a fetch
*
* @opensearch.internal
*/
public final class FetchSearchResult extends SearchPhaseResult {
private SearchHits hits;

View File

@ -42,6 +42,8 @@ import java.io.IOException;
/**
* Sub phase within the fetch phase used to fetch things *about* the documents like highlighting or matched queries.
*
* @opensearch.internal
*/
public interface FetchSubPhase {

View File

@ -39,6 +39,8 @@ import java.io.IOException;
/**
* Executes the logic for a {@link FetchSubPhase} against a particular leaf reader and hit
*
* @opensearch.internal
*/
public interface FetchSubPhaseProcessor {

View File

@ -41,6 +41,11 @@ import org.opensearch.search.query.QuerySearchResult;
import java.io.IOException;
/**
* Query fetch result
*
* @opensearch.internal
*/
public final class QueryFetchSearchResult extends SearchPhaseResult {
private final QuerySearchResult queryResult;

View File

@ -40,6 +40,11 @@ import org.opensearch.search.query.QuerySearchResult;
import java.io.IOException;
/**
* Scroll fetch result
*
* @opensearch.internal
*/
public final class ScrollQueryFetchSearchResult extends SearchPhaseResult {
private final QueryFetchSearchResult result;

View File

@ -54,6 +54,8 @@ import java.util.Map;
/**
* Shard level fetch base request. Holds all the info needed to execute a fetch.
* Used with search scroll as the original request doesn't hold indices.
*
* @opensearch.internal
*/
public class ShardFetchRequest extends TransportRequest {

View File

@ -50,6 +50,8 @@ import java.io.IOException;
/**
* Shard level fetch request used with search. Holds indices taken from the original search request
* and implements {@link org.opensearch.action.IndicesRequest}.
*
* @opensearch.internal
*/
public class ShardFetchSearchRequest extends ShardFetchRequest implements IndicesRequest {

View File

@ -50,6 +50,8 @@ import java.util.Objects;
/**
* Context used to fetch the {@code stored_fields}.
*
* @opensearch.internal
*/
public class StoredFieldsContext implements Writeable {
public static final String _NONE_ = "_none_";

View File

@ -42,6 +42,8 @@ import java.io.IOException;
/**
* Explains the scoring calculations for the top hits.
*
* @opensearch.internal
*/
public final class ExplainPhase implements FetchSubPhase {

View File

@ -41,6 +41,8 @@ import java.util.function.Function;
/**
* All the required context to pull a field from the doc values.
*
* @opensearch.internal
*/
public class FetchDocValuesContext {
private final List<FieldAndFormat> fields;

View File

@ -49,6 +49,8 @@ import java.util.List;
* Fetch sub phase which pulls data from doc values.
*
* Specifying {@code "docvalue_fields": ["field1", "field2"]}
*
* @opensearch.internal
*/
public final class FetchDocValuesPhase implements FetchSubPhase {

View File

@ -35,6 +35,8 @@ import java.util.List;
/**
* The context needed to retrieve fields.
*
* @opensearch.internal
*/
public class FetchFieldsContext {
private final List<FieldAndFormat> fields;

View File

@ -50,6 +50,8 @@ import java.util.Set;
/**
* A fetch sub-phase for high-level field retrieval. Given a list of fields, it
* retrieves the field values from _source and returns them as document fields.
*
* @opensearch.internal
*/
public final class FetchFieldsPhase implements FetchSubPhase {

View File

@ -44,6 +44,11 @@ import org.opensearch.search.fetch.FetchSubPhaseProcessor;
import java.io.IOException;
/**
* Fetches the score of a query match during search phase
*
* @opensearch.internal
*/
public class FetchScorePhase implements FetchSubPhase {
@Override

View File

@ -54,6 +54,8 @@ import java.util.function.Function;
/**
* Context used to fetch the {@code _source}.
*
* @opensearch.internal
*/
public class FetchSourceContext implements Writeable, ToXContentObject {

View File

@ -46,6 +46,11 @@ import org.opensearch.search.lookup.SourceLookup;
import java.io.IOException;
import java.util.Map;
/**
* Fetches the document source during search phase
*
* @opensearch.internal
*/
public final class FetchSourcePhase implements FetchSubPhase {
@Override

View File

@ -41,6 +41,11 @@ import org.opensearch.search.fetch.FetchSubPhaseProcessor;
import java.io.IOException;
/**
* Fetches the version of a term during search phase
*
* @opensearch.internal
*/
public final class FetchVersionPhase implements FetchSubPhase {
@Override

View File

@ -49,6 +49,8 @@ import java.util.Objects;
/**
* Wrapper around a field name and the format that should be used to
* display values of this field.
*
* @opensearch.internal
*/
public final class FieldAndFormat implements Writeable, ToXContentObject {
private static final ParseField FIELD_FIELD = new ParseField("field");

View File

@ -51,6 +51,8 @@ import java.util.Set;
/**
* A helper class to {@link FetchFieldsPhase} that's initialized with a list of field patterns to fetch.
* Then given a specific document, it can retrieve the corresponding fields from the document's source.
*
* @opensearch.internal
*/
public class FieldFetcher {
public static FieldFetcher create(QueryShardContext context, SearchLookup searchLookup, Collection<FieldAndFormat> fieldAndFormats) {

View File

@ -58,6 +58,8 @@ import java.util.Objects;
/**
* Context used for inner hits retrieval
*
* @opensearch.internal
*/
public final class InnerHitsContext {
private final Map<String, InnerHitSubContext> innerHits;

View File

@ -49,6 +49,11 @@ import java.io.IOException;
import java.util.HashMap;
import java.util.Map;
/**
* Gets the inner hits of a document during search
*
* @opensearch.internal
*/
public final class InnerHitsPhase implements FetchSubPhase {
private final FetchPhase fetchPhase;

View File

@ -48,6 +48,11 @@ import java.util.HashMap;
import java.util.List;
import java.util.Map;
/**
* Fetches queries that match the document during search phase
*
* @opensearch.internal
*/
public final class MatchedQueriesPhase implements FetchSubPhase {
@Override

View File

@ -37,6 +37,11 @@ import org.opensearch.script.FieldScript;
import java.util.ArrayList;
import java.util.List;
/**
* Context used for script fields
*
* @opensearch.internal
*/
public class ScriptFieldsContext {
public static class ScriptField {

View File

@ -45,6 +45,11 @@ import java.util.Collection;
import java.util.Collections;
import java.util.List;
/**
* Gets the scripted fields during search phase
*
* @opensearch.internal
*/
public final class ScriptFieldsPhase implements FetchSubPhase {
@Override

View File

@ -41,6 +41,11 @@ import org.opensearch.search.fetch.FetchSubPhaseProcessor;
import java.io.IOException;
/**
* Fetches the sequence number of the primary term during search phase
*
* @opensearch.internal
*/
public final class SeqNoPrimaryTermPhase implements FetchSubPhase {
@Override

View File

@ -62,6 +62,8 @@ import static org.opensearch.index.query.AbstractQueryBuilder.parseInnerQueryBui
/**
* This abstract class holds parameters shared by {@link HighlightBuilder} and {@link HighlightBuilder.Field}
* and provides the common setters, equality, hashCode calculation and common serialization
*
* @opensearch.internal
*/
public abstract class AbstractHighlighterBuilder<HB extends AbstractHighlighterBuilder<?>>
implements

View File

@ -43,6 +43,11 @@ import org.opensearch.index.search.OpenSearchToParentBlockJoinQuery;
import java.io.IOException;
import java.util.Map;
/**
* Internally used for custom scoring
*
* @opensearch.internal
*/
public final class CustomQueryScorer extends QueryScorer {
public CustomQueryScorer(Query query, IndexReader reader, String field, String defaultField) {

View File

@ -64,6 +64,11 @@ import java.util.Locale;
import java.util.Map;
import java.util.function.Function;
/**
* Uses Lucene's Fast Vector Highlighting
*
* @opensearch.internal
*/
public class FastVectorHighlighter implements Highlighter {
private static final BoundaryScanner DEFAULT_SIMPLE_BOUNDARY_SCANNER = new SimpleBoundaryScanner();
private static final BoundaryScanner DEFAULT_SENTENCE_BOUNDARY_SCANNER = new BreakIteratorBoundaryScanner(

View File

@ -38,6 +38,11 @@ import org.opensearch.search.fetch.FetchSubPhase;
import java.util.Map;
/**
* Context used during field level highlighting
*
* @opensearch.internal
*/
public class FieldHighlightContext {
public final String fieldName;

View File

@ -49,6 +49,8 @@ import java.util.List;
/**
* Simple helper class for {@link FastVectorHighlighter} {@link FragmentsBuilder} implementations.
*
* @opensearch.internal
*/
public final class FragmentBuilderHelper {

View File

@ -67,6 +67,8 @@ import static org.opensearch.common.xcontent.ObjectParser.fromList;
* are summarized to show only selected snippets ("fragments") containing search terms.
*
* @see org.opensearch.search.builder.SearchSourceBuilder#highlight()
*
* @opensearch.internal
*/
public class HighlightBuilder extends AbstractHighlighterBuilder<HighlightBuilder> {
/** default for whether to highlight fields based on the source even if stored separately */

View File

@ -51,6 +51,8 @@ import static org.opensearch.common.xcontent.XContentParserUtils.ensureExpectedT
/**
* A field highlighted with its highlighted fragments.
*
* @opensearch.internal
*/
public class HighlightField implements ToXContentFragment, Writeable {

View File

@ -51,6 +51,11 @@ import java.util.LinkedHashMap;
import java.util.Map;
import java.util.function.Function;
/**
* Highlight Phase of the search request.
*
* @opensearch.internal
*/
public class HighlightPhase implements FetchSubPhase {
private final Map<String, Highlighter> highlighters;

View File

@ -46,6 +46,11 @@ import java.util.List;
import static java.util.Collections.singleton;
/**
* Utility class used during the highlight phase of the search request.
*
* @opensearch.internal
*/
public final class HighlightUtils {
// U+2029 PARAGRAPH SEPARATOR (PS): each value holds a discrete passage for highlighting (unified highlighter)

View File

@ -37,6 +37,8 @@ import java.io.IOException;
/**
* Highlights a search result.
*
* @opensearch.internal
*/
public interface Highlighter {

View File

@ -61,6 +61,11 @@ import java.util.Map;
import static org.opensearch.search.fetch.subphase.highlight.UnifiedHighlighter.convertFieldValue;
/**
* Standard Lucene Highlighter implementation
*
* @opensearch.internal
*/
public class PlainHighlighter implements Highlighter {
private static final String CACHE_KEY = "highlight-plain";

View File

@ -43,6 +43,11 @@ import java.util.Locale;
import java.util.Map;
import java.util.Set;
/**
* Search context used during highlighting phase
*
* @opensearch.internal
*/
public class SearchHighlightContext {
private final Map<String, Field> fields;

View File

@ -40,6 +40,8 @@ import org.opensearch.index.mapper.MappedFieldType;
/**
* Direct Subclass of Lucene's org.apache.lucene.search.vectorhighlight.SimpleFragmentsBuilder
* that corrects offsets for broken analysis chains.
*
* @opensearch.internal
*/
public class SimpleFragmentsBuilder extends org.apache.lucene.search.vectorhighlight.SimpleFragmentsBuilder {
protected final MappedFieldType fieldType;

View File

@ -44,6 +44,11 @@ import org.opensearch.search.lookup.SourceLookup;
import java.io.IOException;
import java.util.List;
/**
* Internal builder used during source score lookup
*
* @opensearch.internal
*/
public class SourceScoreOrderFragmentsBuilder extends ScoreOrderFragmentsBuilder {
private final MappedFieldType fieldType;

View File

@ -41,6 +41,11 @@ import org.opensearch.search.lookup.SourceLookup;
import java.io.IOException;
import java.util.List;
/**
* Internal builder used during simple highlighting
*
* @opensearch.internal
*/
public class SourceSimpleFragmentsBuilder extends SimpleFragmentsBuilder {
private final SourceLookup sourceLookup;

View File

@ -66,6 +66,11 @@ import java.util.stream.Collectors;
import static org.apache.lucene.search.uhighlight.CustomUnifiedHighlighter.MULTIVAL_SEP_CHAR;
/**
* Uses lucene's unified highlighter implementation
*
* @opensearch.internal
*/
public class UnifiedHighlighter implements Highlighter {
@Override
public boolean canHighlight(MappedFieldType fieldType) {

View File

@ -46,6 +46,8 @@ import java.util.Objects;
/**
* Represents a {@link QueryBuilder} and a list of alias names that filters the builder is composed of.
*
* @opensearch.internal
*/
public final class AliasFilter implements Writeable, Rewriteable<AliasFilter> {

View File

@ -42,6 +42,8 @@ import java.util.Objects;
/**
* A {@link BulkScorer} wrapper that runs a {@link Runnable} on a regular basis
* so that the query can be interrupted.
*
* @opensearch.internal
*/
final class CancellableBulkScorer extends BulkScorer {

View File

@ -84,6 +84,8 @@ import java.util.concurrent.Executor;
/**
* Context-aware extension of {@link IndexSearcher}.
*
* @opensearch.internal
*/
public class ContextIndexSearcher extends IndexSearcher implements Releasable {
/**

View File

@ -51,6 +51,8 @@ import java.io.IOException;
/**
* Wraps an {@link IndexReader} with a {@link QueryCancellation}
* which checks for cancelled or timed-out query.
*
* @opensearch.internal
*/
class ExitableDirectoryReader extends FilterDirectoryReader {

View File

@ -70,6 +70,11 @@ import org.opensearch.search.suggest.SuggestionSearchContext;
import java.util.List;
import java.util.Map;
/**
* Context used during a filtered search
*
* @opensearch.internal
*/
public abstract class FilteredSearchContext extends SearchContext {
private final SearchContext in;

View File

@ -44,6 +44,11 @@ import org.opensearch.transport.TransportRequest;
import java.io.IOException;
import java.util.Map;
/**
* Internal request used during scroll search
*
* @opensearch.internal
*/
public class InternalScrollSearchRequest extends TransportRequest {
private ShardSearchContextId contextId;

View File

@ -46,6 +46,8 @@ import java.io.IOException;
/**
* {@link SearchResponseSections} subclass that can be serialized over the wire.
*
* @opensearch.internal
*/
public class InternalSearchResponse extends SearchResponseSections implements Writeable, ToXContentFragment {
public static InternalSearchResponse empty() {

View File

@ -40,6 +40,11 @@ import org.opensearch.search.dfs.AggregatedDfs;
import java.util.Objects;
/**
* Reader context that does not hold a reference to a point in time Searcher
*
* @opensearch.internal
*/
public class LegacyReaderContext extends ReaderContext {
private final ShardSearchRequest shardSearchRequest;
private final ScrollContext scrollContext;

View File

@ -56,6 +56,8 @@ import java.util.concurrent.atomic.AtomicLong;
* in {@link org.opensearch.search.SearchService} a SearchContext can be closed concurrently due to independent events
* ie. when an index gets removed. To prevent accessing closed IndexReader / IndexSearcher instances the SearchContext
* can be guarded by a reference count and fail if it's been closed by an external event.
*
* @opensearch.internal
*/
public class ReaderContext implements Releasable {
private final ShardSearchContextId id;

View File

@ -36,7 +36,11 @@ import org.apache.lucene.search.ScoreDoc;
import org.apache.lucene.search.TotalHits;
import org.opensearch.search.Scroll;
/** Wrapper around information that needs to stay around when scrolling. */
/**
* Wrapper around information that needs to stay around when scrolling.
*
* @opensearch.internal
*/
public final class ScrollContext {
public TotalHits totalHits = null;
public float maxScore = Float.NaN;

View File

@ -83,6 +83,8 @@ import java.util.concurrent.atomic.AtomicBoolean;
* This class encapsulates the state needed to execute a search. It holds a reference to the
* shards point in time snapshot (IndexReader / ContextIndexSearcher) and allows passing on
* state from one query / fetch phase to another.
*
* @opensearch.internal
*/
public abstract class SearchContext implements Releasable {

View File

@ -40,6 +40,11 @@ import org.opensearch.common.io.stream.Writeable;
import java.io.IOException;
import java.util.Objects;
/**
* Used to support Point in Time Searching
*
* @opensearch.internal
*/
public final class ShardSearchContextId implements Writeable {
private final String sessionId;
private final long id;

View File

@ -81,6 +81,8 @@ import static org.opensearch.search.internal.SearchContext.TRACK_TOTAL_HITS_DISA
* Shard level request that represents a search.
* It provides all the methods that the {@link SearchContext} needs.
* Provides a cache key based on its content that can be used to cache shard level response.
*
* @opensearch.internal
*/
public class ShardSearchRequest extends TransportRequest implements IndicesRequest {
private final String clusterAlias;

View File

@ -49,6 +49,11 @@ import org.opensearch.search.suggest.SuggestionSearchContext;
import java.util.List;
/**
* Context for a sub search phase
*
* @opensearch.internal
*/
public class SubSearchContext extends FilteredSearchContext {
// By default return 3 hits per bucket. A higher default would make the response really large by default, since

View File

@ -38,6 +38,11 @@ import org.opensearch.index.mapper.MapperService;
import java.util.function.Function;
/**
* Looks up a document
*
* @opensearch.internal
*/
public class DocLookup {
private final MapperService mapperService;

View File

@ -37,6 +37,11 @@ import java.util.ArrayList;
import java.util.List;
import java.util.Map;
/**
* Look up a field
*
* @opensearch.internal
*/
public class FieldLookup {
// we can cached fieldType completely per name, since its on an index/shard level (the lookup, and it does not change within the scope

View File

@ -34,6 +34,11 @@ package org.opensearch.search.lookup;
import org.apache.lucene.index.LeafReaderContext;
import org.opensearch.index.mapper.MapperService;
/**
* looks up multiple fields from a leaf reader
*
* @opensearch.internal
*/
public class FieldsLookup {
private final MapperService mapperService;

View File

@ -47,6 +47,11 @@ import java.util.Map;
import java.util.Set;
import java.util.function.Function;
/**
* Looks up a doc from a leaf reader
*
* @opensearch.internal
*/
public class LeafDocLookup implements Map<String, ScriptDocValues<?>> {
private final Map<String, ScriptDocValues<?>> localCacheFieldData = new HashMap<>(4);

View File

@ -47,6 +47,11 @@ import java.util.Set;
import static java.util.Collections.singletonMap;
/**
* looks up multiple leaf fields
*
* @opensearch.internal
*/
public class LeafFieldsLookup implements Map {
private final MapperService mapperService;

View File

@ -41,6 +41,8 @@ import static java.util.Collections.unmodifiableMap;
/**
* Per-segment version of {@link SearchLookup}.
*
* @opensearch.internal
*/
public class LeafSearchLookup {

View File

@ -44,6 +44,11 @@ import java.util.Set;
import java.util.function.BiFunction;
import java.util.function.Supplier;
/**
* Orchestrator class for search phase lookups
*
* @opensearch.internal
*/
public class SearchLookup {
/**
* The maximum depth of field dependencies.

View File

@ -54,6 +54,11 @@ import java.util.Set;
import static java.util.Collections.emptyMap;
/**
* Orchestrator class for source lookups
*
* @opensearch.internal
*/
public class SourceLookup implements Map {
private LeafReader reader;

View File

@ -40,6 +40,11 @@ import java.util.Collections;
import java.util.Deque;
import java.util.List;
/**
* Base class for a profiling tree.
*
* @opensearch.internal
*/
public abstract class AbstractInternalProfileTree<PB extends AbstractProfileBreakdown<?>, E> {
protected ArrayList<PB> breakdowns;

View File

@ -42,6 +42,8 @@ import static java.util.Collections.emptyMap;
* A record of timings for the various operations that may happen during query execution.
* A node's time may be composed of several internal attributes (rewriting, weighting,
* scoring, etc).
*
* @opensearch.internal
*/
public abstract class AbstractProfileBreakdown<T extends Enum<T>> {

View File

@ -34,6 +34,11 @@ package org.opensearch.search.profile;
import java.util.List;
/**
* Base class for a profiler
*
* @opensearch.internal
*/
public class AbstractProfiler<PB extends AbstractProfileBreakdown<?>, E> {
protected final AbstractInternalProfileTree<PB, E> profileTree;

View File

@ -11,6 +11,8 @@ package org.opensearch.search.profile;
/**
* Provide contextual profile breakdowns which are associated with freestyle context. Used when concurrent
* search over segments is activated and each collector needs own non-shareable profile breakdown instance.
*
* @opensearch.internal
*/
public abstract class ContextualProfileBreakdown<T extends Enum<T>> extends AbstractProfileBreakdown<T> {
public ContextualProfileBreakdown(Class<T> clazz) {

View File

@ -15,6 +15,11 @@ import org.opensearch.common.io.stream.Writeable;
import java.io.IOException;
/**
* Utility class to track time of network operations
*
* @opensearch.internal
*/
public class NetworkTime implements Writeable {
private long inboundNetworkTime;
private long outboundNetworkTime;

View File

@ -61,6 +61,8 @@ import static org.opensearch.common.xcontent.ConstructingObjectParser.optionalCo
* <p>
* Each InternalProfileResult has a List of InternalProfileResults, which will contain
* "children" queries if applicable
*
* @opensearch.internal
*/
public final class ProfileResult implements Writeable, ToXContentObject {
static final ParseField TYPE = new ParseField("type");

View File

@ -43,6 +43,11 @@ import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
/**
* Shard level profile results
*
* @opensearch.internal
*/
public class ProfileShardResult implements Writeable {
private final List<QueryProfileShardResult> queryProfileResults;

View File

@ -40,7 +40,11 @@ import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
/** Wrapper around all the profilers that makes management easier. */
/**
* Wrapper around all the profilers that makes management easier.
*
* @opensearch.internal
*/
public final class Profilers {
private final ContextIndexSearcher searcher;

View File

@ -57,6 +57,8 @@ import static org.opensearch.common.xcontent.XContentParserUtils.ensureExpectedT
/**
* A container class to hold all the profile results across all shards. Internally
* holds a map of shard ID -&gt; Profiled results
*
* @opensearch.internal
*/
public final class SearchProfileShardResults implements Writeable, ToXContentFragment {
private static final String SEARCHES_FIELD = "searches";

View File

@ -45,6 +45,8 @@ package org.opensearch.search.profile;
* timer.stop();
* }
* </pre>
*
* @opensearch.internal
*/
public class Timer {

View File

@ -41,6 +41,8 @@ import static java.util.Collections.unmodifiableMap;
/**
* {@linkplain AbstractProfileBreakdown} customized to work with aggregations.
*
* @opensearch.internal
*/
public class AggregationProfileBreakdown extends AbstractProfileBreakdown<AggregationTimingType> {
private final Map<String, Object> extra = new HashMap<>();

View File

@ -50,6 +50,8 @@ import static org.opensearch.common.xcontent.XContentParserUtils.ensureExpectedT
/**
* A container class to hold the profile results for a single shard in the request.
* Contains a list of query profiles, a collector tree and a total rewrite tree.
*
* @opensearch.internal
*/
public final class AggregationProfileShardResult implements Writeable, ToXContentFragment {

View File

@ -40,6 +40,11 @@ import java.util.LinkedList;
import java.util.List;
import java.util.Map;
/**
* Main class to profile aggregations
*
* @opensearch.internal
*/
public class AggregationProfiler extends AbstractProfiler<AggregationProfileBreakdown, Aggregator> {
private final Map<List<String>, AggregationProfileBreakdown> profileBreakdownLookup = new HashMap<>();

View File

@ -34,6 +34,11 @@ package org.opensearch.search.profile.aggregation;
import java.util.Locale;
/**
* Timing levels for aggregations
*
* @opensearch.internal
*/
public enum AggregationTimingType {
INITIALIZE,
BUILD_LEAF_COLLECTOR,

View File

@ -35,6 +35,11 @@ package org.opensearch.search.profile.aggregation;
import org.opensearch.search.aggregations.Aggregator;
import org.opensearch.search.profile.AbstractInternalProfileTree;
/**
* The profiling tree for different levels of agg profiling
*
* @opensearch.internal
*/
public class InternalAggregationProfileTree extends AbstractInternalProfileTree<AggregationProfileBreakdown, Aggregator> {
@Override

View File

@ -45,6 +45,9 @@ import org.opensearch.search.sort.SortOrder;
import java.io.IOException;
import java.util.Iterator;
/**
* An aggregator that aggregates the performance profiling of other aggregations
*/
public class ProfilingAggregator extends Aggregator {
private final Aggregator delegate;

View File

@ -38,6 +38,11 @@ import org.opensearch.search.profile.Timer;
import java.io.IOException;
/**
* The collector for the agg profiles
*
* @opensearch.internal
*/
public class ProfilingLeafBucketCollector extends LeafBucketCollector {
private LeafBucketCollector delegate;

View File

@ -53,6 +53,8 @@ import static org.opensearch.common.xcontent.XContentParserUtils.ensureExpectedT
* Public interface and serialization container for profiled timings of the
* Collectors used in the search. Children CollectorResult's may be
* embedded inside of a parent CollectorResult
*
* @opensearch.internal
*/
public class CollectorResult implements ToXContentObject, Writeable {

View File

@ -19,6 +19,8 @@ import java.util.concurrent.ConcurrentHashMap;
* A record of timings for the various operations that may happen during query execution.
* A node's time may be composed of several internal attributes (rewriting, weighting,
* scoring, etc). The class supports profiling the concurrent search over segments.
*
* @opensearch.internal
*/
public final class ConcurrentQueryProfileBreakdown extends ContextualProfileBreakdown<QueryTimingType> {
private final Map<Object, AbstractProfileBreakdown<QueryTimingType>> contexts = new ConcurrentHashMap<>();

View File

@ -50,6 +50,8 @@ import java.util.List;
* - needsScores()
*
* InternalProfiler facilitates the linking of the Collector graph
*
* @opensearch.internal
*/
public class InternalProfileCollector implements Collector, InternalProfileComponent {

View File

@ -18,6 +18,11 @@ import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
/**
* Collector Manager for internal agg profiling
*
* @opensearch.internal
*/
public class InternalProfileCollectorManager
implements
ProfileCollectorManager<InternalProfileCollector, ReduceableSearchResult>,

View File

@ -10,6 +10,11 @@ package org.opensearch.search.profile.query;
import java.util.Collection;
/**
* Container for an agg profiling component
*
* @opensearch.internal
*/
public interface InternalProfileComponent {
/**
* @return profile component name

View File

@ -41,6 +41,8 @@ import org.opensearch.search.profile.ProfileResult;
* This class tracks the dependency tree for queries (scoring and rewriting) and
* generates {@link QueryProfileBreakdown} for each node in the tree. It also finalizes the tree
* and returns a list of {@link ProfileResult} that can be serialized back to the client
*
* @opensearch.internal
*/
final class InternalQueryProfileTree extends AbstractInternalProfileTree<ContextualProfileBreakdown<QueryTimingType>, Query> {

View File

@ -42,7 +42,11 @@ import org.apache.lucene.search.ScoreMode;
import java.io.IOException;
/** A collector that profiles how much time is spent calling it. */
/**
* A collector that profiles how much time is spent calling it.
*
* @opensearch.internal
*/
final class ProfileCollector extends FilterCollector {
private long time;

View File

@ -13,5 +13,7 @@ import org.apache.lucene.search.CollectorManager;
/**
* Collector manager which supports profiling
*
* @opensearch.internal
*/
public interface ProfileCollectorManager<C extends Collector, T> extends CollectorManager<C, T>, InternalProfileComponent {}

View File

@ -45,6 +45,8 @@ import java.util.Collection;
/**
* {@link Scorer} wrapper that will compute how much time is spent on moving
* the iterator, confirming matches and computing scores.
*
* @opensearch.internal
*/
final class ProfileScorer extends Scorer {

View File

@ -48,6 +48,8 @@ import java.io.IOException;
* Weight wrapper that will compute how much time it takes to build the
* {@link Scorer} and then return a {@link Scorer} that is wrapped in
* order to compute timings as well.
*
* @opensearch.internal
*/
public final class ProfileWeight extends Weight {

View File

@ -39,6 +39,8 @@ import org.opensearch.search.profile.ContextualProfileBreakdown;
* A record of timings for the various operations that may happen during query execution.
* A node's time may be composed of several internal attributes (rewriting, weighting,
* scoring, etc).
*
* @opensearch.internal
*/
public final class QueryProfileBreakdown extends ContextualProfileBreakdown<QueryTimingType> {

View File

@ -50,6 +50,8 @@ import static org.opensearch.common.xcontent.XContentParserUtils.ensureExpectedT
/**
* A container class to hold the profile results for a single shard in the request.
* Contains a list of query profiles, a collector tree and a total rewrite tree.
*
* @opensearch.internal
*/
public final class QueryProfileShardResult implements Writeable, ToXContentObject {

View File

@ -48,6 +48,8 @@ import java.util.Objects;
* A Profiler is associated with every Search, not per Search-Request. E.g. a
* request may execute two searches (query + global agg). A Profiler just
* represents one of those
*
* @opensearch.internal
*/
public final class QueryProfiler extends AbstractProfiler<ContextualProfileBreakdown<QueryTimingType>, Query> {

Some files were not shown because too many files have changed in this diff Show More