[Javadocs] add to internal classes in o.o.http, indices, and search (#3288)

Adds javadocs to internal classes in org.opensearch.http, indices, and search
packages.

Signed-off-by: Nicholas Walter Knize <nknize@apache.org>
This commit is contained in:
Nick Knize 2022-05-11 12:52:28 -05:00 committed by GitHub
parent b78176afef
commit 86f8dd65a4
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
200 changed files with 1521 additions and 18 deletions

View File

@ -56,6 +56,11 @@ public class Build {
*/
public static final Build CURRENT;
/**
* The type of build
*
* @opensearch.internal
*/
public enum Type {
DEB("deb"),

View File

@ -261,6 +261,11 @@ public class CorsHandler {
response.addHeader(ACCESS_CONTROL_MAX_AGE, Long.toString(config.maxAge));
}
/**
* The cors handler configuration
*
* @opensearch.internal
*/
public static class Config {
private final boolean enabled;

View File

@ -49,6 +49,11 @@ import java.util.Map;
*/
public interface HttpRequest {
/**
* Which HTTP version being used
*
* @opensearch.internal
*/
enum HttpVersion {
HTTP_1_0,
HTTP_1_1

View File

@ -260,6 +260,11 @@ public class IndexingMemoryController implements IndexingOperationListener, Clos
}
}
/**
* The bytes used by a shard and a reference to the shard
*
* @opensearch.internal
*/
private static final class ShardAndBytesUsed implements Comparable<ShardAndBytesUsed> {
final long bytesUsed;
final IndexShard shard;

View File

@ -216,6 +216,11 @@ public class IndicesQueryCache implements QueryCache, Closeable {
cache.clear();
}
/**
* Statistics for the indices query cache
*
* @opensearch.internal
*/
private static class Stats implements Cloneable {
final ShardId shardId;
@ -251,6 +256,11 @@ public class IndicesQueryCache implements QueryCache, Closeable {
}
}
/**
* Statistics and Counts
*
* @opensearch.internal
*/
private static class StatsAndCount {
volatile int count;
final Stats stats;

View File

@ -176,6 +176,11 @@ public final class IndicesRequestCache implements RemovalListener<IndicesRequest
cache.invalidate(new Key(cacheEntity, reader.getReaderCacheHelper().getKey(), cacheKey));
}
/**
* Loader for the request cache
*
* @opensearch.internal
*/
private static class Loader implements CacheLoader<Key, BytesReference> {
private final CacheEntity entity;
@ -238,6 +243,11 @@ public final class IndicesRequestCache implements RemovalListener<IndicesRequest
void onRemoval(RemovalNotification<Key, BytesReference> notification);
}
/**
* Unique key for the cache
*
* @opensearch.internal
*/
static class Key implements Accountable {
private static final long BASE_RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance(Key.class);

View File

@ -913,6 +913,11 @@ public class IndicesService extends AbstractLifecycleComponent
return indicesQueryCache;
}
/**
* Statistics for old shards
*
* @opensearch.internal
*/
static class OldShardsStats implements IndexEventListener {
final SearchStats searchStats = new SearchStats();
@ -1236,6 +1241,11 @@ public class IndicesService extends AbstractLifecycleComponent
}
}
/**
* A pending delete
*
* @opensearch.internal
*/
private static final class PendingDelete implements Comparable<PendingDelete> {
final Index index;
final int shardId;
@ -1386,6 +1396,8 @@ public class IndicesService extends AbstractLifecycleComponent
* periodically. In this case it is the field data cache, because a cache that
* has an entry invalidated may not clean up the entry if it is not read from
* or written to after invalidation.
*
* @opensearch.internal
*/
private static final class CacheCleaner implements Runnable, Releasable {
@ -1574,6 +1586,11 @@ public class IndicesService extends AbstractLifecycleComponent
return indicesRequestCache.getOrCompute(cacheEntity, supplier, reader, cacheKey);
}
/**
* An item in the index shard cache
*
* @opensearch.internal
*/
static final class IndexShardCacheEntity extends AbstractIndexShardCacheEntity {
private static final long BASE_RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance(IndexShardCacheEntity.class);
private final IndexShard indexShard;

View File

@ -271,6 +271,11 @@ public class NodeIndicesStats implements Writeable, ToXContentFragment {
}
}
/**
* Fields used for parsing and toXContent
*
* @opensearch.internal
*/
static final class Fields {
static final String INDICES = "indices";
}

View File

@ -60,6 +60,11 @@ public class PreBuiltCacheFactory {
OPENSEARCH
}
/**
* The prebuilt cache
*
* @opensearch.internal
*/
public interface PreBuiltCache<T> {
T get(Version version);
@ -86,6 +91,8 @@ public class PreBuiltCacheFactory {
/**
* This is a pretty simple cache, it only contains one version
*
* @opensearch.internal
*/
private static class PreBuiltCacheStrategyOne<T> implements PreBuiltCache<T> {
@ -109,6 +116,8 @@ public class PreBuiltCacheFactory {
/**
* This cache contains one version for each opensearch version object
*
* @opensearch.internal
*/
private static class PreBuiltCacheStrategyOpenSearch<T> implements PreBuiltCache<T> {
@ -132,6 +141,8 @@ public class PreBuiltCacheFactory {
/**
* This cache uses the lucene version for caching
*
* @opensearch.internal
*/
private static class PreBuiltCacheStrategyLucene<T> implements PreBuiltCache<T> {

View File

@ -87,6 +87,11 @@ public class AllCircuitBreakerStats implements Writeable, ToXContentFragment {
return builder;
}
/**
* Fields used for parsing and toXContent
*
* @opensearch.internal
*/
static final class Fields {
static final String BREAKERS = "breakers";
}

View File

@ -132,6 +132,11 @@ public class CircuitBreakerStats implements Writeable, ToXContentObject {
+ "]";
}
/**
* Fields used for statistics
*
* @opensearch.internal
*/
static final class Fields {
static final String LIMIT = "limit_size_in_bytes";
static final String LIMIT_HUMAN = "limit_size";

View File

@ -341,6 +341,11 @@ public class HierarchyCircuitBreakerService extends CircuitBreakerService {
);
}
/**
* Tracks memory usage
*
* @opensearch.internal
*/
static class MemoryUsage {
final long baseUsage;
final long totalUsage;
@ -505,6 +510,11 @@ public class HierarchyCircuitBreakerService extends CircuitBreakerService {
MemoryUsage overLimit(MemoryUsage memoryUsed);
}
/**
* Kicks in G1GC if heap gets too high
*
* @opensearch.internal
*/
static class G1OverLimitStrategy implements OverLimitStrategy {
private final long g1RegionSize;
private final LongSupplier currentMemoryUsageSupplier;

View File

@ -846,6 +846,11 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent imple
}
}
/**
* A shard
*
* @opensearch.internal
*/
public interface Shard {
/**
@ -894,6 +899,11 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent imple
) throws IOException;
}
/**
* An allocated index
*
* @opensearch.internal
*/
public interface AllocatedIndex<T extends Shard> extends Iterable<T>, IndexComponent {
/**
@ -926,6 +936,11 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent imple
void removeShard(int shardId, String message);
}
/**
* Allocated indices
*
* @opensearch.internal
*/
public interface AllocatedIndices<T extends Shard, U extends AllocatedIndex<T>> extends Iterable<U> {
/**
@ -1012,6 +1027,11 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent imple
void processPendingDeletes(Index index, IndexSettings indexSettings, TimeValue timeValue) throws IOException, InterruptedException,
ShardLockObtainFailedException;
/**
* Why the index was removed
*
* @opensearch.internal
*/
enum IndexRemovalReason {
/**
* Shard of this index were previously assigned to this node but all shards have been relocated.

View File

@ -125,6 +125,11 @@ public class IndicesFieldDataCache implements RemovalListener<IndicesFieldDataCa
}
}
/**
* Computes a weight based on ramBytesUsed
*
* @opensearch.internal
*/
public static class FieldDataWeigher implements ToLongBiFunction<Key, Accountable> {
@Override
public long applyAsLong(Key key, Accountable ramUsage) {
@ -135,6 +140,8 @@ public class IndicesFieldDataCache implements RemovalListener<IndicesFieldDataCa
/**
* A specific cache instance for the relevant parameters of it (index, fieldNames, fieldType).
*
* @opensearch.internal
*/
static class IndexFieldCache implements IndexFieldDataCache, IndexReader.ClosedListener {
private final Logger logger;
@ -242,6 +249,11 @@ public class IndicesFieldDataCache implements RemovalListener<IndicesFieldDataCa
}
}
/**
* Key for the indices field data cache
*
* @opensearch.internal
*/
public static class Key {
public final IndexFieldCache indexCache;
public final IndexReader.CacheKey readerKey;

View File

@ -211,6 +211,11 @@ public abstract class MultiChunkTransfer<Source, Request extends MultiChunkTrans
protected abstract void handleError(Source resource, Exception e) throws Exception;
/**
* A file chunk item as the response
*
* @opensearch.internal
*/
private static class FileChunkResponseItem<Source> {
final long requestSeqId;
final Source source;
@ -223,6 +228,11 @@ public abstract class MultiChunkTransfer<Source, Request extends MultiChunkTrans
}
}
/**
* A chunk request
*
* @opensearch.internal
*/
public interface ChunkRequest {
/**
* @return {@code true} if this chunk request is the last chunk of the current file

View File

@ -199,6 +199,11 @@ public class MultiFileWriter extends AbstractRefCounted implements Releasable {
store.renameTempFilesSafe(tempFileNames);
}
/**
* A file chunk
*
* @opensearch.internal
*/
static final class FileChunk {
final StoreFileMetadata md;
final BytesReference content;

View File

@ -79,6 +79,11 @@ public class PeerRecoverySourceService extends AbstractLifecycleComponent implem
private static final Logger logger = LogManager.getLogger(PeerRecoverySourceService.class);
/**
* The internal actions
*
* @opensearch.internal
*/
public static class Actions {
public static final String START_RECOVERY = "internal:index/shard/recovery/start_recovery";
public static final String REESTABLISH_RECOVERY = "internal:index/shard/recovery/reestablish_recovery";

View File

@ -102,6 +102,11 @@ public class PeerRecoveryTargetService implements IndexEventListener {
private static final Logger logger = LogManager.getLogger(PeerRecoveryTargetService.class);
/**
* The internal actions
*
* @opensearch.internal
*/
public static class Actions {
public static final String FILES_INFO = "internal:index/shard/recovery/filesInfo";
public static final String FILE_CHUNK = "internal:index/shard/recovery/file_chunk";
@ -340,6 +345,11 @@ public class PeerRecoveryTargetService implements IndexEventListener {
return request;
}
/**
* The recovery listener
*
* @opensearch.internal
*/
public interface RecoveryListener {
void onRecoveryDone(RecoveryState state);

View File

@ -274,6 +274,8 @@ public class RecoveriesCollection {
* a reference to {@link RecoveryTarget}, which implements {@link AutoCloseable}. closing the reference
* causes {@link RecoveryTarget#decRef()} to be called. This makes sure that the underlying resources
* will not be freed until {@link RecoveryRef#close()} is called.
*
* @opensearch.internal
*/
public static class RecoveryRef extends AutoCloseableRefCounted<RecoveryTarget> {

View File

@ -498,6 +498,11 @@ public class RecoverySourceHandler {
FutureUtils.get(future);
}
/**
* A send file result
*
* @opensearch.internal
*/
static final class SendFileResult {
final List<String> phase1FileNames;
final List<Long> phase1FileSizes;
@ -853,6 +858,11 @@ public class RecoverySourceHandler {
sender.start();
}
/**
* An operation chunk request
*
* @opensearch.internal
*/
private static class OperationChunkRequest implements MultiChunkTransfer.ChunkRequest {
final List<Translog.Operation> operations;
final boolean lastChunk;
@ -1009,6 +1019,11 @@ public class RecoverySourceHandler {
}, listener::onFailure);
}
/**
* A result for a send snapshot
*
* @opensearch.internal
*/
static final class SendSnapshotResult {
final long targetLocalCheckpoint;
final int sentOperations;
@ -1041,6 +1056,11 @@ public class RecoverySourceHandler {
+ '}';
}
/**
* A file chunk from the recovery source
*
* @opensearch.internal
*/
private static class FileChunk implements MultiChunkTransfer.ChunkRequest, Releasable {
final StoreFileMetadata md;
final BytesReference content;

View File

@ -58,6 +58,11 @@ import java.util.Locale;
*/
public class RecoveryState implements ToXContentFragment, Writeable {
/**
* The stage of the recovery state
*
* @opensearch.internal
*/
public enum Stage {
INIT((byte) 0),
@ -328,6 +333,11 @@ public class RecoveryState implements ToXContentFragment, Writeable {
return builder;
}
/**
* Fields used in the recovery state
*
* @opensearch.internal
*/
static final class Fields {
static final String ID = "id";
static final String TYPE = "type";
@ -356,6 +366,11 @@ public class RecoveryState implements ToXContentFragment, Writeable {
static final String PERCENT = "percent";
}
/**
* Verifys the lucene index
*
* @opensearch.internal
*/
public static class VerifyIndex extends ReplicationTimer implements ToXContentFragment, Writeable {
private volatile long checkIndexTime;
@ -393,6 +408,11 @@ public class RecoveryState implements ToXContentFragment, Writeable {
}
}
/**
* The translog
*
* @opensearch.internal
*/
public static class Translog extends ReplicationTimer implements ToXContentFragment, Writeable {
public static final int UNKNOWN = -1;

View File

@ -307,6 +307,11 @@ public final class ReplicationLuceneIndex extends ReplicationTimer implements To
return filesDetails.get(dest);
}
/**
* Details about the files
*
* @opensearch.internal
*/
private static final class FilesDetails implements ToXContentFragment, Writeable {
protected final Map<String, FileMetadata> fileMetadataMap = new HashMap<>();
protected boolean complete;
@ -397,6 +402,11 @@ public final class ReplicationLuceneIndex extends ReplicationTimer implements To
}
}
/**
* Metadata about a file
*
* @opensearch.internal
*/
public static final class FileMetadata implements ToXContentObject, Writeable {
private String name;
private long length;
@ -500,6 +510,8 @@ public final class ReplicationLuceneIndex extends ReplicationTimer implements To
/**
* Duplicates many of Field names in {@link RecoveryState}
*
* @opensearch.internal
*/
static final class Fields {
static final String TOTAL_TIME = "total_time";

View File

@ -452,6 +452,11 @@ public class IndicesStore implements ClusterStateListener, Closeable {
}
/**
* A shard active request
*
* @opensearch.internal
*/
private static class ShardActiveRequest extends TransportRequest {
protected TimeValue timeout = null;
private ClusterName clusterName;
@ -483,6 +488,11 @@ public class IndicesStore implements ClusterStateListener, Closeable {
}
}
/**
* The shard active response
*
* @opensearch.internal
*/
private static class ShardActiveResponse extends TransportResponse {
private final boolean shardActive;

View File

@ -77,6 +77,11 @@ import java.util.List;
import java.util.Objects;
import java.util.concurrent.TimeUnit;
/**
* Metadata for shard stores from a list of transport nodes
*
* @opensearch.internal
*/
public class TransportNodesListShardStoreMetadata extends TransportNodesAction<
TransportNodesListShardStoreMetadata.Request,
TransportNodesListShardStoreMetadata.NodesStoreFilesMetadata,
@ -225,6 +230,11 @@ public class TransportNodesListShardStoreMetadata extends TransportNodesAction<
}
}
/**
* Metadata for store files
*
* @opensearch.internal
*/
public static class StoreFilesMetadata implements Iterable<StoreFileMetadata>, Writeable {
private final ShardId shardId;
private final Store.MetadataSnapshot metadataSnapshot;
@ -318,6 +328,11 @@ public class TransportNodesListShardStoreMetadata extends TransportNodesAction<
}
}
/**
* The request
*
* @opensearch.internal
*/
public static class Request extends BaseNodesRequest<Request> {
private final ShardId shardId;
@ -364,6 +379,11 @@ public class TransportNodesListShardStoreMetadata extends TransportNodesAction<
}
}
/**
* Metadata for the nodes store files
*
* @opensearch.internal
*/
public static class NodesStoreFilesMetadata extends BaseNodesResponse<NodeStoreFilesMetadata> {
public NodesStoreFilesMetadata(StreamInput in) throws IOException {
@ -385,6 +405,11 @@ public class TransportNodesListShardStoreMetadata extends TransportNodesAction<
}
}
/**
* The node request
*
* @opensearch.internal
*/
public static class NodeRequest extends BaseNodeRequest {
private final ShardId shardId;
@ -431,6 +456,11 @@ public class TransportNodesListShardStoreMetadata extends TransportNodesAction<
}
}
/**
* The metadata for the node store files
*
* @opensearch.internal
*/
public static class NodeStoreFilesMetadata extends BaseNodeResponse {
private StoreFilesMetadata storeFilesMetadata;

View File

@ -201,6 +201,11 @@ public interface DocValueFormat extends NamedWriteable {
}
}
/**
* Date time doc value format
*
* @opensearch.internal
*/
final class DateTime implements DocValueFormat {
public static final String NAME = "date_time";
@ -406,6 +411,11 @@ public interface DocValueFormat extends NamedWriteable {
}
};
/**
* Decimal doc value format
*
* @opensearch.internal
*/
final class Decimal implements DocValueFormat {
public static final String NAME = "decimal";

View File

@ -590,6 +590,11 @@ public final class SearchHit implements Writeable, ToXContentObject, Iterable<Do
this.innerHits = innerHits;
}
/**
* Fields in a search hit used for parsing and toXContent
*
* @opensearch.internal
*/
public static class Fields {
static final String _INDEX = "_index";
static final String _ID = "_id";
@ -1000,6 +1005,8 @@ public final class SearchHit implements Writeable, ToXContentObject, Iterable<Do
/**
* Encapsulates the nested identity of a hit.
*
* @opensearch.internal
*/
public static final class NestedIdentity implements Writeable, ToXContentFragment {

View File

@ -201,6 +201,11 @@ public final class SearchHits implements Writeable, ToXContentFragment, Iterable
return Arrays.stream(getHits()).iterator();
}
/**
* Fields for parsing and toXContent
*
* @opensearch.internal
*/
public static final class Fields {
public static final String HITS = "hits";
public static final String TOTAL = "total";

View File

@ -1418,6 +1418,11 @@ public class SearchService extends AbstractLifecycleComponent implements IndexEv
return request.source().aggregations().buildPipelineTree();
}
/**
* Search phase result that can match a response
*
* @opensearch.internal
*/
public static final class CanMatchResponse extends SearchPhaseResult {
private final boolean canMatch;
private final MinAndMax<?> estimatedMinAndMax;

View File

@ -166,6 +166,8 @@ public abstract class AggregationBuilder
* <p>
* Unlike {@link CardinalityUpperBound} which is <strong>total</strong>
* instead of <strong>per parent bucket</strong>.
*
* @opensearch.internal
*/
public enum BucketCardinality {
NONE,
@ -179,7 +181,11 @@ public abstract class AggregationBuilder
*/
public abstract BucketCardinality bucketCardinality();
/** Common xcontent fields shared among aggregator builders */
/**
* Common xcontent fields shared among aggregator builders
*
* @opensearch.internal
*/
public static final class CommonFields extends ParseField.CommonFields {
public static final ParseField VALUE_TYPE = new ParseField("value_type");
}

View File

@ -63,6 +63,8 @@ public abstract class Aggregator extends BucketCollector implements Releasable {
* Parses the aggregation request and creates the appropriate aggregator factory for it.
*
* @see AggregationBuilder
*
* @opensearch.internal
*/
@FunctionalInterface
public interface Parser {
@ -157,6 +159,8 @@ public abstract class Aggregator extends BucketCollector implements Releasable {
/**
* Compare two buckets by their ordinal.
*
* @opensearch.internal
*/
@FunctionalInterface
public interface BucketComparator {
@ -204,7 +208,11 @@ public abstract class Aggregator extends BucketCollector implements Releasable {
*/
public void collectDebugInfo(BiConsumer<String, Object> add) {}
/** Aggregation mode for sub aggregations. */
/**
* Aggregation mode for sub aggregations.
*
* @opensearch.internal
*/
public enum SubAggCollectionMode implements Writeable {
/**

View File

@ -295,6 +295,8 @@ public class AggregatorFactories {
/**
* A mutable collection of {@link AggregationBuilder}s and
* {@link PipelineAggregationBuilder}s.
*
* @opensearch.internal
*/
public static class Builder implements Writeable, ToXContentObject {
private final Set<String> names = new HashSet<>();

View File

@ -113,6 +113,8 @@ public abstract class CardinalityUpperBound {
/**
* Cardinality estimate with a known upper bound.
*
* @opensearch.internal
*/
private static class KnownCardinalityUpperBound extends CardinalityUpperBound {
private final int estimate;

View File

@ -64,6 +64,8 @@ import static java.util.Objects.requireNonNull;
public abstract class InternalAggregation implements Aggregation, NamedWriteable {
/**
* Builds {@link ReduceContext}.
*
* @opensearch.internal
*/
public interface ReduceContextBuilder {
/**
@ -77,6 +79,11 @@ public abstract class InternalAggregation implements Aggregation, NamedWriteable
ReduceContext forFinalReduction();
}
/**
* The reduce context
*
* @opensearch.internal
*/
public static class ReduceContext {
private final BigArrays bigArrays;
private final ScriptService scriptService;

View File

@ -304,6 +304,11 @@ public final class InternalAggregations extends Aggregations implements Writeabl
}
}
/**
* A counting stream output
*
* @opensearch.internal
*/
private static class CountingStreamOutput extends StreamOutput {
long size = 0;

View File

@ -223,6 +223,11 @@ public abstract class InternalMultiBucketAggregation<
return reducedBuckets;
}
/**
* An internal buck for the internal multibucket agg
*
* @opensearch.internal
*/
public abstract static class InternalBucket implements Bucket, Writeable {
public Object getProperty(String containingAggName, List<String> path) {

View File

@ -65,6 +65,8 @@ public abstract class InternalOrder extends BucketOrder {
// TODO merge the contents of this file into BucketOrder. The way it is now is relic.
/**
* {@link Bucket} ordering strategy to sort by a sub-aggregation.
*
* @opensearch.internal
*/
public static class Aggregation extends InternalOrder {
static final byte ID = 0;
@ -133,6 +135,8 @@ public abstract class InternalOrder extends BucketOrder {
/**
* {@link Bucket} ordering strategy to sort by multiple criteria.
*
* @opensearch.internal
*/
public static class CompoundOrder extends BucketOrder {
@ -244,6 +248,8 @@ public abstract class InternalOrder extends BucketOrder {
* {@link BucketOrder} implementation for simple, fixed orders like
* {@link InternalOrder#COUNT_ASC}. Complex implementations should not
* use this.
*
* @opensearch.internal
*/
private static class SimpleOrder extends InternalOrder {
private final byte id;
@ -405,6 +411,8 @@ public abstract class InternalOrder extends BucketOrder {
/**
* Contains logic for reading/writing {@link BucketOrder} from/to streams.
*
* @opensearch.internal
*/
public static class Streams {
@ -493,6 +501,8 @@ public abstract class InternalOrder extends BucketOrder {
/**
* Contains logic for parsing a {@link BucketOrder} from a {@link XContentParser}.
*
* @opensearch.internal
*/
public static class Parser {

View File

@ -176,6 +176,11 @@ public class MultiBucketCollector extends BucketCollector {
}
}
/**
* A multi leaf bucket collector
*
* @opensearch.internal
*/
private static class MultiLeafBucketCollector extends LeafBucketCollector {
private final boolean cacheScores;

View File

@ -76,6 +76,11 @@ public class MultiBucketConsumerService {
this.maxBucket = maxBucket;
}
/**
* Thrown when there are too many buckets
*
* @opensearch.internal
*/
public static class TooManyBucketsException extends AggregationExecutionException {
private final int maxBuckets;
@ -115,6 +120,8 @@ public class MultiBucketConsumerService {
* when the sum of the provided values is above the limit (`search.max_buckets`).
* It is used by aggregators to limit the number of bucket creation during
* {@link Aggregator#buildAggregations} and {@link InternalAggregation#reduce}.
*
* @opensearch.internal
*/
public static class MultiBucketConsumer implements IntConsumer {
private final int limit;

View File

@ -99,6 +99,11 @@ public abstract class ParsedMultiBucketAggregation<B extends ParsedMultiBucketAg
}, CommonFields.BUCKETS, ObjectParser.ValueType.OBJECT_ARRAY);
}
/**
* A parsed bucket
*
* @opensearch.internal
*/
public abstract static class ParsedBucket implements MultiBucketsAggregation.Bucket {
private Aggregations aggregations;

View File

@ -97,6 +97,11 @@ public abstract class PipelineAggregationBuilder
*/
protected abstract void validate(ValidationContext context);
/**
* The context used for validation
*
* @opensearch.internal
*/
public abstract static class ValidationContext {
/**
* Build the context for the root of the aggregation tree.
@ -122,6 +127,11 @@ public abstract class PipelineAggregationBuilder
this.e = validationFailuresSoFar;
}
/**
* The root of the tree
*
* @opensearch.internal
*/
private static class ForTreeRoot extends ValidationContext {
private final Collection<AggregationBuilder> siblingAggregations;
private final Collection<PipelineAggregationBuilder> siblingPipelineAggregations;
@ -162,6 +172,11 @@ public abstract class PipelineAggregationBuilder
}
}
/**
* The internal tree node
*
* @opensearch.internal
*/
private static class ForInsideTree extends ValidationContext {
private final AggregationBuilder parent;

View File

@ -65,6 +65,11 @@ import java.util.Objects;
* @opensearch.internal
*/
public class BestBucketsDeferringCollector extends DeferringBucketCollector {
/**
* Entry in the bucket collector
*
* @opensearch.internal
*/
static class Entry {
final LeafReaderContext context;
final PackedLongValues docDeltas;

View File

@ -330,6 +330,11 @@ public abstract class BucketsAggregator extends AggregatorBase {
return results;
}
/**
* A bucket builder for a fixed count
*
* @opensearch.internal
*/
@FunctionalInterface
protected interface BucketBuilderForFixedCount<B> {
B build(int offsetInOwningOrd, int docCount, InternalAggregations subAggregationResults);
@ -355,6 +360,11 @@ public abstract class BucketsAggregator extends AggregatorBase {
return results;
}
/**
* A single bucket result builder
*
* @opensearch.internal
*/
@FunctionalInterface
protected interface SingleBucketResultBuilder {
InternalAggregation build(long owningBucketOrd, InternalAggregations subAggregationResults);
@ -415,11 +425,21 @@ public abstract class BucketsAggregator extends AggregatorBase {
return results;
}
/**
* A bucket builder for a variable
*
* @opensearch.internal
*/
@FunctionalInterface
protected interface BucketBuilderForVariable<B> {
B build(long bucketValue, int docCount, InternalAggregations subAggregationResults);
}
/**
* A result builder for a bucket variable
*
* @opensearch.internal
*/
@FunctionalInterface
protected interface ResultBuilderForVariable<B> {
InternalAggregation build(long owninigBucketOrd, List<B> buckets);

View File

@ -72,6 +72,11 @@ public abstract class DeferringBucketCollector extends BucketCollector {
return new WrappedAggregator(in);
}
/**
* A wrapped aggregator
*
* @opensearch.internal
*/
protected class WrappedAggregator extends Aggregator {
private Aggregator in;

View File

@ -72,6 +72,11 @@ public class AdjacencyMatrixAggregator extends BucketsAggregator {
public static final ParseField FILTERS_FIELD = new ParseField("filters");
/**
* A keyed filter
*
* @opensearch.internal
*/
protected static class KeyedFilter implements Writeable, ToXContentFragment {
private final String key;
private final QueryBuilder filter;

View File

@ -56,6 +56,12 @@ import java.util.Objects;
public class InternalAdjacencyMatrix extends InternalMultiBucketAggregation<InternalAdjacencyMatrix, InternalAdjacencyMatrix.InternalBucket>
implements
AdjacencyMatrix {
/**
* An internal bucket of the adjacency matrix
*
* @opensearch.internal
*/
public static class InternalBucket extends InternalMultiBucketAggregation.InternalBucket implements AdjacencyMatrix.Bucket {
private final String key;

View File

@ -90,6 +90,11 @@ public class ParsedAdjacencyMatrix extends ParsedMultiBucketAggregation<ParsedAd
return aggregation;
}
/**
* A parsed bucket
*
* @opensearch.internal
*/
public static class ParsedBucket extends ParsedMultiBucketAggregation.ParsedBucket implements AdjacencyMatrix.Bucket {
private String key;

View File

@ -45,6 +45,11 @@ import java.util.Map;
* @opensearch.internal
*/
public interface CompositeAggregation extends MultiBucketsAggregation {
/**
* Bucket in a composite agg
*
* @opensearch.internal
*/
interface Bucket extends MultiBucketsAggregation.Bucket {
Map<String, Object> getKey();
}

View File

@ -577,6 +577,11 @@ final class CompositeAggregator extends BucketsAggregator {
};
}
/**
* An entry in the composite aggregator
*
* @opensearch.internal
*/
private static class Entry {
final LeafReaderContext context;
final DocIdSet docIdSet;

View File

@ -50,6 +50,11 @@ import java.util.function.LongConsumer;
*/
public class CompositeValuesSourceConfig {
/**
* A single dimension provider
*
* @opensearch.internal
*/
@FunctionalInterface
public interface SingleDimensionValuesSourceProvider {
SingleDimensionValuesSource<?> createValuesSource(

View File

@ -75,6 +75,11 @@ import java.util.function.LongConsumer;
public class DateHistogramValuesSourceBuilder extends CompositeValuesSourceBuilder<DateHistogramValuesSourceBuilder>
implements
DateIntervalConsumer {
/**
* Supplier for a composite date histogram
*
* @opensearch.internal
*/
@FunctionalInterface
public interface DateHistogramCompositeSupplier {
CompositeValuesSourceConfig apply(

View File

@ -68,6 +68,11 @@ import java.util.function.LongUnaryOperator;
* @opensearch.internal
*/
public class GeoTileGridValuesSourceBuilder extends CompositeValuesSourceBuilder<GeoTileGridValuesSourceBuilder> {
/**
* Supplier for a composite geotile
*
* @opensearch.internal
*/
@FunctionalInterface
public interface GeoTileCompositeSuppier {
CompositeValuesSourceConfig apply(

View File

@ -61,6 +61,11 @@ import java.util.function.LongConsumer;
* @opensearch.internal
*/
public class HistogramValuesSourceBuilder extends CompositeValuesSourceBuilder<HistogramValuesSourceBuilder> {
/**
* Composite histogram supplier
*
* @opensearch.internal
*/
@FunctionalInterface
public interface HistogramCompositeSupplier {
CompositeValuesSourceConfig apply(

View File

@ -317,6 +317,11 @@ public class InternalComposite extends InternalMultiBucketAggregation<InternalCo
return Objects.hash(super.hashCode(), size, buckets, afterKey, Arrays.hashCode(reverseMuls), Arrays.hashCode(missingOrders));
}
/**
* The bucket iterator
*
* @opensearch.internal
*/
private static class BucketIterator implements Comparable<BucketIterator> {
final Iterator<InternalBucket> it;
InternalBucket current;
@ -335,6 +340,11 @@ public class InternalComposite extends InternalMultiBucketAggregation<InternalCo
}
}
/**
* Internal bucket for the internal composite agg
*
* @opensearch.internal
*/
public static class InternalBucket extends InternalMultiBucketAggregation.InternalBucket
implements
CompositeAggregation.Bucket,
@ -516,6 +526,11 @@ public class InternalComposite extends InternalMultiBucketAggregation<InternalCo
return obj;
}
/**
* An array map used for the internal composite agg
*
* @opensearch.internal
*/
static class ArrayMap extends AbstractMap<String, Object> implements Comparable<ArrayMap> {
final List<String> keys;
final Comparable[] values;

View File

@ -106,6 +106,11 @@ public class ParsedComposite extends ParsedMultiBucketAggregation<ParsedComposit
return CompositeAggregation.toXContentFragment(this, builder, params);
}
/**
* Parsed bucket for the parsed composite agg
*
* @opensearch.internal
*/
public static class ParsedBucket extends ParsedMultiBucketAggregation.ParsedBucket implements CompositeAggregation.Bucket {
private Map<String, Object> key;

View File

@ -62,7 +62,11 @@ import java.util.function.LongUnaryOperator;
* @opensearch.internal
*/
public class TermsValuesSourceBuilder extends CompositeValuesSourceBuilder<TermsValuesSourceBuilder> {
/**
* Composite supplier for terms
*
* @opensearch.internal
*/
@FunctionalInterface
public interface TermsCompositeSupplier {
CompositeValuesSourceConfig apply(

View File

@ -71,6 +71,11 @@ public class FiltersAggregator extends BucketsAggregator {
public static final ParseField OTHER_BUCKET_FIELD = new ParseField("other_bucket");
public static final ParseField OTHER_BUCKET_KEY_FIELD = new ParseField("other_bucket_key");
/**
* Keyed filter for the filters agg
*
* @opensearch.internal
*/
public static class KeyedFilter implements Writeable, ToXContentFragment {
private final String key;
private final QueryBuilder filter;

View File

@ -53,6 +53,11 @@ import java.util.Objects;
* @opensearch.internal
*/
public class InternalFilters extends InternalMultiBucketAggregation<InternalFilters, InternalFilters.InternalBucket> implements Filters {
/**
* Internal bucket for an internal filters agg
*
* @opensearch.internal
*/
public static class InternalBucket extends InternalMultiBucketAggregation.InternalBucket implements Filters.Bucket {
private final boolean keyed;

View File

@ -106,6 +106,11 @@ public class ParsedFilters extends ParsedMultiBucketAggregation<ParsedFilters.Pa
return aggregation;
}
/**
* Parsed bucket for the parsed filters agg
*
* @opensearch.internal
*/
public static class ParsedBucket extends ParsedMultiBucketAggregation.ParsedBucket implements Filters.Bucket {
private String key;

View File

@ -89,6 +89,8 @@ public class CellIdSource extends ValuesSource.Numeric {
/**
* The encoder to use to convert a geopoint's (lon, lat, precision) into
* a long-encoded bucket key for aggregating.
*
* @opensearch.internal
*/
@FunctionalInterface
public interface GeoPointLongEncoder {

View File

@ -73,6 +73,11 @@ public abstract class GeoGridAggregationBuilder extends ValuesSourceAggregationB
protected int shardSize;
private GeoBoundingBox geoBoundingBox = new GeoBoundingBox(new GeoPoint(Double.NaN, Double.NaN), new GeoPoint(Double.NaN, Double.NaN));
/**
* A precision parser
*
* @opensearch.internal
*/
@FunctionalInterface
protected interface PrecisionParser {
int parse(XContentParser parser) throws IOException;

View File

@ -286,6 +286,11 @@ public class AutoDateHistogramAggregationBuilder extends ValuesSourceAggregation
return Objects.equals(numBuckets, other.numBuckets) && Objects.equals(minimumIntervalExpression, other.minimumIntervalExpression);
}
/**
* Round off information
*
* @opensearch.internal
*/
public static class RoundingInfo implements Writeable {
final Rounding rounding;
final int[] innerIntervals;

View File

@ -243,6 +243,8 @@ abstract class AutoDateHistogramAggregator extends DeferableBucketAggregator {
* rebucket roughly {@code O(log number_of_hits_collected_so_far)} because
* the "shape" of the roundings is <strong>roughly</strong>
* logarithmically increasing.
*
* @opensearch.internal
*/
private static class FromSingle extends AutoDateHistogramAggregator {
private int roundingIdx;
@ -406,6 +408,8 @@ abstract class AutoDateHistogramAggregator extends DeferableBucketAggregator {
* rounding all of the keys for {@code owningBucketOrd} that we're going to
* collect and picking the rounding based on a real, accurate count and the
* min and max.
*
* @opensearch.internal
*/
private static class FromMany extends AutoDateHistogramAggregator {
/**

View File

@ -75,6 +75,11 @@ public class DateIntervalWrapper implements ToXContentFragment, Writeable {
private static final ParseField FIXED_INTERVAL_FIELD = new ParseField("fixed_interval");
private static final ParseField CALENDAR_INTERVAL_FIELD = new ParseField("calendar_interval");
/**
* The type of interval
*
* @opensearch.internal
*/
public enum IntervalTypeEnum implements Writeable {
NONE,
FIXED,

View File

@ -66,6 +66,11 @@ public final class InternalAutoDateHistogram extends InternalMultiBucketAggregat
InternalAutoDateHistogram,
InternalAutoDateHistogram.Bucket> implements Histogram, HistogramFactory {
/**
* Bucket for the internal auto date histogram agg
*
* @opensearch.internal
*/
public static class Bucket extends InternalMultiBucketAggregation.InternalBucket implements Histogram.Bucket, KeyComparable<Bucket> {
final long key;
@ -157,6 +162,11 @@ public final class InternalAutoDateHistogram extends InternalMultiBucketAggregat
}
}
/**
* Information about the bucket
*
* @opensearch.internal
*/
static class BucketInfo {
final RoundingInfo[] roundingInfos;
@ -422,6 +432,11 @@ public final class InternalAutoDateHistogram extends InternalMultiBucketAggregat
return new InternalAutoDateHistogram.Bucket(buckets.get(0).key, docCount, format, aggs);
}
/**
* The result from a bucket reduce
*
* @opensearch.internal
*/
private static class BucketReduceResult {
final List<Bucket> buckets;
final int roundingIdx;

View File

@ -68,6 +68,11 @@ public final class InternalDateHistogram extends InternalMultiBucketAggregation<
Histogram,
HistogramFactory {
/**
* Bucket for an internal date histogram agg
*
* @opensearch.internal
*/
public static class Bucket extends InternalMultiBucketAggregation.InternalBucket implements Histogram.Bucket, KeyComparable<Bucket> {
final long key;
@ -170,6 +175,11 @@ public final class InternalDateHistogram extends InternalMultiBucketAggregation<
}
}
/**
* Information about an empty bucket
*
* @opensearch.internal
*/
static class EmptyBucketInfo {
final Rounding rounding;

View File

@ -64,6 +64,11 @@ public final class InternalHistogram extends InternalMultiBucketAggregation<Inte
implements
Histogram,
HistogramFactory {
/**
* Bucket for an internal histogram agg
*
* @opensearch.internal
*/
public static class Bucket extends InternalMultiBucketAggregation.InternalBucket implements Histogram.Bucket, KeyComparable<Bucket> {
final double key;
@ -166,6 +171,11 @@ public final class InternalHistogram extends InternalMultiBucketAggregation<Inte
}
}
/**
* Information about an empty bucket
*
* @opensearch.internal
*/
public static class EmptyBucketInfo {
final double interval, offset, minBound, maxBound;

View File

@ -62,8 +62,18 @@ public class InternalVariableWidthHistogram extends InternalMultiBucketAggregati
InternalVariableWidthHistogram,
InternalVariableWidthHistogram.Bucket> implements Histogram, HistogramFactory {
/**
* Bucket for an internal variable width histogram
*
* @opensearch.internal
*/
public static class Bucket extends InternalMultiBucketAggregation.InternalBucket implements Histogram.Bucket, KeyComparable<Bucket> {
/**
* Bounds of the bucket
*
* @opensearch.internal
*/
public static class BucketBounds {
public double min;
public double max;
@ -219,6 +229,11 @@ public class InternalVariableWidthHistogram extends InternalMultiBucketAggregati
}
}
/**
* Information about an empty bucket
*
* @opensearch.internal
*/
static class EmptyBucketInfo {
final InternalAggregations subAggregations;

View File

@ -97,6 +97,11 @@ public class ParsedAutoDateHistogram extends ParsedMultiBucketAggregation<Parsed
return builder;
}
/**
* A parsed bucket for a parsed auto date histogram agg
*
* @opensearch.internal
*/
public static class ParsedBucket extends ParsedMultiBucketAggregation.ParsedBucket implements Histogram.Bucket {
private Long key;

View File

@ -78,6 +78,11 @@ public class ParsedDateHistogram extends ParsedMultiBucketAggregation<ParsedDate
return aggregation;
}
/**
* Parsed Bucket for a parsed date histogram
*
* @opensearch.internal
*/
public static class ParsedBucket extends ParsedMultiBucketAggregation.ParsedBucket implements Histogram.Bucket {
private Long key;

View File

@ -75,6 +75,11 @@ public class ParsedHistogram extends ParsedMultiBucketAggregation<ParsedHistogra
return aggregation;
}
/**
* Parsed bucket for a parsed histogram
*
* @opensearch.internal
*/
static class ParsedBucket extends ParsedMultiBucketAggregation.ParsedBucket implements Histogram.Bucket {
private Double key;

View File

@ -84,6 +84,11 @@ public class ParsedVariableWidthHistogram extends ParsedMultiBucketAggregation<P
return aggregation;
}
/**
* Parsed bucket for a parsed variable width histogram
*
* @opensearch.internal
*/
public static class ParsedBucket extends ParsedMultiBucketAggregation.ParsedBucket implements Histogram.Bucket {
private Double key;

View File

@ -38,6 +38,11 @@ import org.opensearch.search.aggregations.bucket.InternalSingleBucketAggregation
import java.io.IOException;
import java.util.Map;
/**
* Missing single bucket agg
*
* @opensearch.internal
*/
public class InternalMissing extends InternalSingleBucketAggregation implements Missing {
InternalMissing(String name, long docCount, InternalAggregations aggregations, Map<String, Object> metadata) {
super(name, docCount, aggregations, metadata);

View File

@ -230,6 +230,11 @@ public class NestedAggregator extends BucketsAggregator implements SingleBucketA
}
}
/**
* A cached scorable doc
*
* @opensearch.internal
*/
private static class CachedScorable extends Scorable {
int doc;
float score;

View File

@ -82,6 +82,11 @@ public class NestedAggregatorFactory extends AggregatorFactory {
return new NestedAggregator(name, factories, parentObjectMapper, childObjectMapper, searchContext, parent, cardinality, metadata);
}
/**
* Unmapped class for nested agg
*
* @opensearch.internal
*/
private static final class Unmapped extends NonCollectingAggregator {
Unmapped(String name, SearchContext context, Aggregator parent, AggregatorFactories factories, Map<String, Object> metadata)

View File

@ -83,6 +83,11 @@ public class ReverseNestedAggregatorFactory extends AggregatorFactory {
}
}
/**
* Unmapped class for reverse nested agg
*
* @opensearch.internal
*/
private static final class Unmapped extends NonCollectingAggregator {
Unmapped(String name, SearchContext context, Aggregator parent, AggregatorFactories factories, Map<String, Object> metadata)

View File

@ -62,6 +62,11 @@ import static java.util.Collections.emptyList;
*/
public final class BinaryRangeAggregator extends BucketsAggregator {
/**
* Range for the binary range agg
*
* @opensearch.internal
*/
public static class Range {
final String key;
@ -144,6 +149,11 @@ public final class BinaryRangeAggregator extends BucketsAggregator {
}
}
/**
* Leaf collector for the sorted set range
*
* @opensearch.internal
*/
abstract static class SortedSetRangeLeafCollector extends LeafBucketCollectorBase {
final long[] froms, tos, maxTos;
@ -250,6 +260,11 @@ public final class BinaryRangeAggregator extends BucketsAggregator {
protected abstract void doCollect(LeafBucketCollector sub, int doc, long bucket) throws IOException;
}
/**
* Base class for a sorted binary range leaf collector
*
* @opensearch.internal
*/
abstract static class SortedBinaryRangeLeafCollector extends LeafBucketCollectorBase {
final Range[] ranges;

View File

@ -121,6 +121,11 @@ public class GeoDistanceAggregationBuilder extends ValuesSourceAggregationBuilde
return builder;
}
/**
* Range for a geo distance agg
*
* @opensearch.internal
*/
public static class Range extends RangeAggregator.Range {
public Range(String key, Double from, Double to) {
super(key(key, from, to), from == null ? 0 : from, to);

View File

@ -172,6 +172,11 @@ public class GeoDistanceRangeAggregatorFactory extends ValuesSourceAggregatorFac
);
}
/**
* The source location for the distance calculation
*
* @opensearch.internal
*/
private static class DistanceSource extends ValuesSource.Numeric {
private final ValuesSource.GeoPoint source;

View File

@ -61,6 +61,11 @@ public final class InternalBinaryRange extends InternalMultiBucketAggregation<In
implements
Range {
/**
* Bucket for a binary range agg
*
* @opensearch.internal
*/
public static class Bucket extends InternalMultiBucketAggregation.InternalBucket implements Range.Bucket {
private final transient DocValueFormat format;

View File

@ -51,6 +51,11 @@ import java.util.Map;
public class InternalDateRange extends InternalRange<InternalDateRange.Bucket, InternalDateRange> {
public static final Factory FACTORY = new Factory();
/**
* Bucket for a date range
*
* @opensearch.internal
*/
public static class Bucket extends InternalRange.Bucket {
public Bucket(
@ -113,6 +118,11 @@ public class InternalDateRange extends InternalRange<InternalDateRange.Bucket, I
}
}
/**
* Factory to create a date range
*
* @opensearch.internal
*/
public static class Factory extends InternalRange.Factory<InternalDateRange.Bucket, InternalDateRange> {
@Override
public ValueType getValueType() {

View File

@ -50,6 +50,11 @@ import java.util.Map;
public class InternalGeoDistance extends InternalRange<InternalGeoDistance.Bucket, InternalGeoDistance> {
public static final Factory FACTORY = new Factory();
/**
* Bucket for a geo distance range
*
* @opensearch.internal
*/
static class Bucket extends InternalRange.Bucket {
Bucket(String key, double from, double to, long docCount, InternalAggregations aggregations, boolean keyed) {
@ -66,6 +71,11 @@ public class InternalGeoDistance extends InternalRange<InternalGeoDistance.Bucke
}
}
/**
* Factory for a geo distance bucket
*
* @opensearch.internal
*/
public static class Factory extends InternalRange.Factory<InternalGeoDistance.Bucket, InternalGeoDistance> {
@Override
public ValuesSourceType getValueSourceType() {

View File

@ -59,6 +59,11 @@ public class InternalRange<B extends InternalRange.Bucket, R extends InternalRan
Range {
static final Factory FACTORY = new Factory();
/**
* Bucket for a range
*
* @opensearch.internal
*/
public static class Bucket extends InternalMultiBucketAggregation.InternalBucket implements Range.Bucket {
protected final transient boolean keyed;
@ -211,6 +216,11 @@ public class InternalRange<B extends InternalRange.Bucket, R extends InternalRan
}
}
/**
* Factory for a range
*
* @opensearch.internal
*/
public static class Factory<B extends Bucket, R extends InternalRange<B, R>> {
public ValuesSourceType getValueSourceType() {
return CoreValuesSourceType.NUMERIC;

View File

@ -128,6 +128,11 @@ public final class IpRangeAggregationBuilder extends ValuesSourceAggregationBuil
}
}
/**
* Range for an IP range
*
* @opensearch.internal
*/
public static class Range implements ToXContentObject {
private final String key;

View File

@ -82,6 +82,11 @@ public class ParsedBinaryRange extends ParsedMultiBucketAggregation<ParsedBinary
return aggregation;
}
/**
* Parsed bucket for a binary range
*
* @opensearch.internal
*/
public static class ParsedBucket extends ParsedMultiBucketAggregation.ParsedBucket implements Range.Bucket {
private String key;

View File

@ -71,6 +71,11 @@ public class ParsedDateRange extends ParsedRange {
return aggregation;
}
/**
* Parsed bucket for a date range
*
* @opensearch.internal
*/
public static class ParsedBucket extends ParsedRange.ParsedBucket {
@Override

View File

@ -68,6 +68,11 @@ public class ParsedGeoDistance extends ParsedRange {
return aggregation;
}
/**
* Parsed bucket for a geo distance
*
* @opensearch.internal
*/
public static class ParsedBucket extends ParsedRange.ParsedBucket {
static ParsedBucket fromXContent(final XContentParser parser, final boolean keyed) throws IOException {

View File

@ -92,6 +92,11 @@ public class ParsedRange extends ParsedMultiBucketAggregation<ParsedRange.Parsed
return aggregation;
}
/**
* Parsed bucket for a range
*
* @opensearch.internal
*/
public static class ParsedBucket extends ParsedMultiBucketAggregation.ParsedBucket implements Range.Bucket {
protected String key;

View File

@ -75,6 +75,11 @@ public class RangeAggregator extends BucketsAggregator {
public static final ParseField RANGES_FIELD = new ParseField("ranges");
public static final ParseField KEYED_FIELD = new ParseField("keyed");
/**
* Range for the range aggregator
*
* @opensearch.internal
*/
public static class Range implements Writeable, ToXContentObject {
public static final ParseField KEY_FIELD = new ParseField("key");
public static final ParseField FROM_FIELD = new ParseField("from");
@ -383,6 +388,11 @@ public class RangeAggregator extends BucketsAggregator {
return rangeFactory.create(name, buckets, format, keyed, metadata());
}
/**
* Unmapped range
*
* @opensearch.internal
*/
public static class Unmapped<R extends RangeAggregator.Range> extends NonCollectingAggregator {
private final R[] ranges;

View File

@ -73,6 +73,11 @@ public class SamplerAggregator extends DeferableBucketAggregator implements Sing
static final long SCOREDOCKEY_SIZE = RamUsageEstimator.shallowSizeOfInstance(DiversifiedTopDocsCollector.ScoreDocKey.class);
/**
* The execution mode for the sampler
*
* @opensearch.internal
*/
public enum ExecutionMode {
MAP(new ParseField("map")) {

View File

@ -82,6 +82,8 @@ public abstract class BytesKeyedBucketOrds implements Releasable {
/**
* An iterator for buckets inside a particular {@code owningBucketOrd}.
*
* @opensearch.internal
*/
public interface BucketOrdsEnum {
/**
@ -122,6 +124,8 @@ public abstract class BytesKeyedBucketOrds implements Releasable {
/**
* Implementation that only works if it is collecting from a single bucket.
*
* @opensearch.internal
*/
private static class FromSingle extends BytesKeyedBucketOrds {
private final BytesRefHash ords;
@ -177,6 +181,8 @@ public abstract class BytesKeyedBucketOrds implements Releasable {
/**
* Implementation that works properly when collecting from many buckets.
*
* @opensearch.internal
*/
private static class FromMany extends BytesKeyedBucketOrds {
// TODO we can almost certainly do better here by building something fit for purpose rather than trying to lego together stuff

View File

@ -53,6 +53,11 @@ import java.util.Objects;
public class DoubleTerms extends InternalMappedTerms<DoubleTerms, DoubleTerms.Bucket> {
public static final String NAME = "dterms";
/**
* Bucket for a double terms agg
*
* @opensearch.internal
*/
static class Bucket extends InternalTerms.Bucket<Bucket> {
double term;

View File

@ -90,6 +90,11 @@ public class GlobalOrdinalsStringTermsAggregator extends AbstractStringTermsAggr
protected int segmentsWithSingleValuedOrds = 0;
protected int segmentsWithMultiValuedOrds = 0;
/**
* Lookup global ordinals
*
* @opensearch.internal
*/
public interface GlobalOrdLookupFunction {
BytesRef apply(long ord) throws IOException;
}
@ -231,6 +236,8 @@ public class GlobalOrdinalsStringTermsAggregator extends AbstractStringTermsAggr
/**
* This is used internally only, just for compare using global ordinal instead of term bytes in the PQ
*
* @opensearch.internal
*/
static class OrdBucket extends InternalTerms.Bucket<OrdBucket> {
long globalOrd;
@ -284,6 +291,8 @@ public class GlobalOrdinalsStringTermsAggregator extends AbstractStringTermsAggr
* This is only supported for the standard {@code terms} aggregation and
* doesn't support {@code significant_terms} so this forces
* {@link StandardTermsResults}.
*
* @opensearch.internal
*/
static class LowCardinality extends GlobalOrdinalsStringTermsAggregator {

View File

@ -162,16 +162,29 @@ public class IncludeExclude implements Writeable, ToXContentFragment {
}
}
/**
* Base filter class
*
* @opensearch.internal
*/
public abstract static class Filter {}
// The includeValue and excludeValue ByteRefs which are the result of the parsing
// process are converted into a LongFilter when used on numeric fields
// in the index.
/**
* The includeValue and excludeValue ByteRefs which are the result of the parsing
* process are converted into a LongFilter when used on numeric fields
* in the index.
*
* @opensearch.internal
*/
public abstract static class LongFilter extends Filter {
public abstract boolean accept(long value);
}
/**
* Long filter that is partitioned
*
* @opensearch.internal
*/
public class PartitionedLongFilter extends LongFilter {
@Override
public boolean accept(long value) {
@ -181,6 +194,11 @@ public class IncludeExclude implements Writeable, ToXContentFragment {
}
}
/**
* Long filter backed by valid values
*
* @opensearch.internal
*/
public static class SetBackedLongFilter extends LongFilter {
private LongSet valids;
private LongSet invalids;
@ -208,7 +226,11 @@ public class IncludeExclude implements Writeable, ToXContentFragment {
}
}
// Only used for the 'map' execution mode (ie. scripts)
/**
* Only used for the 'map' execution mode (ie. scripts)
*
* @opensearch.internal
*/
public abstract static class StringFilter extends Filter {
public abstract boolean accept(BytesRef value);
}
@ -220,6 +242,11 @@ public class IncludeExclude implements Writeable, ToXContentFragment {
}
}
/**
* String filter backed by an automaton
*
* @opensearch.internal
*/
static class AutomatonBackedStringFilter extends StringFilter {
private final ByteRunAutomaton runAutomaton;
@ -237,6 +264,11 @@ public class IncludeExclude implements Writeable, ToXContentFragment {
}
}
/**
* String filter backed by a term list
*
* @opensearch.internal
*/
static class TermListBackedStringFilter extends StringFilter {
private final Set<BytesRef> valids;
@ -257,6 +289,11 @@ public class IncludeExclude implements Writeable, ToXContentFragment {
}
}
/**
* An ordinals filter
*
* @opensearch.internal
*/
public abstract static class OrdinalsFilter extends Filter {
public abstract LongBitSet acceptedGlobalOrdinals(SortedSetDocValues globalOrdinals) throws IOException;
@ -284,6 +321,11 @@ public class IncludeExclude implements Writeable, ToXContentFragment {
}
}
/**
* An ordinals filter backed by an automaton
*
* @opensearch.internal
*/
static class AutomatonBackedOrdinalsFilter extends OrdinalsFilter {
private final CompiledAutomaton compiled;
@ -311,6 +353,11 @@ public class IncludeExclude implements Writeable, ToXContentFragment {
}
/**
* An ordinals filter backed by a terms list
*
* @opensearch.internal
*/
static class TermListBackedOrdinalsFilter extends OrdinalsFilter {
private final SortedSet<BytesRef> includeValues;
@ -508,6 +555,8 @@ public class IncludeExclude implements Writeable, ToXContentFragment {
/**
* Terms adapter around doc values.
*
* @opensearch.internal
*/
private static class DocValuesTerms extends Terms {

View File

@ -36,6 +36,8 @@ import java.util.stream.Collectors;
public class InternalMultiTerms extends InternalTerms<InternalMultiTerms, InternalMultiTerms.Bucket> {
/**
* Internal Multi Terms Bucket.
*
* @opensearch.internal
*/
public static class Bucket extends InternalTerms.AbstractInternalBucket implements KeyComparable<Bucket> {
@ -193,6 +195,8 @@ public class InternalMultiTerms extends InternalTerms<InternalMultiTerms, Intern
/**
* Visible for testing.
*
* @opensearch.internal
*/
protected static class BucketComparator implements Comparator<List<Object>> {
@SuppressWarnings({ "unchecked" })

View File

@ -60,12 +60,19 @@ public abstract class InternalRareTerms<A extends InternalRareTerms<A, B>, B ext
implements
RareTerms {
/**
* Bucket for a rare terms agg
*
* @opensearch.internal
*/
public abstract static class Bucket<B extends Bucket<B>> extends InternalMultiBucketAggregation.InternalBucket
implements
RareTerms.Bucket,
KeyComparable<B> {
/**
* Reads a bucket. Should be a constructor reference.
*
* @opensearch.internal
*/
@FunctionalInterface
public interface Reader<B extends Bucket<B>> {

View File

@ -62,12 +62,19 @@ public abstract class InternalSignificantTerms<A extends InternalSignificantTerm
public static final String SCORE = "score";
public static final String BG_COUNT = "bg_count";
/**
* Bucket for a significant terms agg
*
* @opensearch.internal
*/
@SuppressWarnings("PMD.ConstructorCallsOverridableMethod")
public abstract static class Bucket<B extends Bucket<B>> extends InternalMultiBucketAggregation.InternalBucket
implements
SignificantTerms.Bucket {
/**
* Reads a bucket. Should be a constructor reference.
*
* @opensearch.internal
*/
@FunctionalInterface
public interface Reader<B extends Bucket<B>> {

View File

@ -75,6 +75,11 @@ public abstract class InternalTerms<A extends InternalTerms<A, B>, B extends Int
protected static final ParseField DOC_COUNT_ERROR_UPPER_BOUND_FIELD_NAME = new ParseField("doc_count_error_upper_bound");
protected static final ParseField SUM_OF_OTHER_DOC_COUNTS = new ParseField("sum_other_doc_count");
/**
* Base internal multi bucket
*
* @opensearch.internal
*/
public abstract static class AbstractInternalBucket extends InternalMultiBucketAggregation.InternalBucket implements Terms.Bucket {
abstract void setDocCountError(long docCountError);
@ -83,9 +88,16 @@ public abstract class InternalTerms<A extends InternalTerms<A, B>, B extends Int
abstract boolean showDocCountError();
}
/**
* Base bucket class
*
* @opensearch.internal
*/
public abstract static class Bucket<B extends Bucket<B>> extends AbstractInternalBucket implements KeyComparable<B> {
/**
* Reads a bucket. Should be a constructor reference.
*
* @opensearch.internal
*/
@FunctionalInterface
public interface Reader<B extends Bucket<B>> {

View File

@ -100,6 +100,8 @@ public abstract class LongKeyedBucketOrds implements Releasable {
/**
* An iterator for buckets inside a particular {@code owningBucketOrd}.
*
* @opensearch.internal
*/
public interface BucketOrdsEnum {
/**
@ -142,6 +144,8 @@ public abstract class LongKeyedBucketOrds implements Releasable {
/**
* Implementation that only works if it is collecting from a single bucket.
*
* @opensearch.internal
*/
public static class FromSingle extends LongKeyedBucketOrds {
private final LongHash ords;
@ -221,6 +225,8 @@ public abstract class LongKeyedBucketOrds implements Releasable {
/**
* Implementation that works properly when collecting from many buckets.
*
* @opensearch.internal
*/
public static class FromMany extends LongKeyedBucketOrds {
private final LongLongHash ords;

View File

@ -52,6 +52,11 @@ import java.util.Objects;
public class LongRareTerms extends InternalMappedRareTerms<LongRareTerms, LongRareTerms.Bucket> {
public static final String NAME = "lrareterms";
/**
* Bucket for rare long valued terms
*
* @opensearch.internal
*/
public static class Bucket extends InternalRareTerms.Bucket<Bucket> {
long term;

View File

@ -53,6 +53,11 @@ import java.util.Objects;
public class LongTerms extends InternalMappedTerms<LongTerms, LongTerms.Bucket> {
public static final String NAME = "lterms";
/**
* Bucket for long terms
*
* @opensearch.internal
*/
public static class Bucket extends InternalTerms.Bucket<Bucket> {
long term;

Some files were not shown because too many files have changed in this diff Show More