[Javadocs] Add to o.o.disovery, env, gateway, http, ingest, lucene and node pkgs (#3185)

Adds javadocs to classes in the org.opensearch.discovery, env, gateway, http,
ingest, lucene, and node packages.

Signed-off-by: Nicholas Walter Knize <nknize@apache.org>
This commit is contained in:
Nick Knize 2022-05-04 14:32:01 -05:00 committed by GitHub
parent 6ca3278f96
commit 4d3da00c12
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
95 changed files with 319 additions and 2 deletions

View File

@ -42,6 +42,8 @@ import java.util.Set;
* Allows to wait for all nodes to reply to the publish of a new cluster state
* and notifies the {@link org.opensearch.discovery.Discovery.AckListener}
* so that the cluster state update can be acknowledged
*
* @opensearch.internal
*/
public class AckClusterStatePublishResponseHandler extends BlockingClusterStatePublishResponseHandler {

View File

@ -43,6 +43,8 @@ import java.util.concurrent.TimeUnit;
/**
* Handles responses obtained when publishing a new cluster state from cluster-manager to all non cluster-manager nodes.
* Allows to await a reply from all non cluster-manager nodes, up to a timeout
*
* @opensearch.internal
*/
public class BlockingClusterStatePublishResponseHandler {

View File

@ -39,6 +39,8 @@ import org.opensearch.common.component.LifecycleComponent;
* A pluggable module allowing to implement discovery of other nodes, publishing of the cluster
* state to all nodes, electing a cluster-manager of the cluster that raises cluster state change
* events.
*
* @opensearch.internal
*/
public interface Discovery extends LifecycleComponent, ClusterStatePublisher {

View File

@ -75,6 +75,8 @@ import static org.opensearch.node.Node.NODE_NAME_SETTING;
/**
* A module for loading classes for node discovery.
*
* @opensearch.internal
*/
public class DiscoveryModule {
private static final Logger logger = LogManager.getLogger(DiscoveryModule.class);

View File

@ -42,6 +42,11 @@ import org.opensearch.cluster.coordination.PublishClusterStateStats;
import java.io.IOException;
/**
* Stores discovery stats
*
* @opensearch.internal
*/
public class DiscoveryStats implements Writeable, ToXContentFragment {
private final PendingClusterStateStats queueStats;

View File

@ -57,6 +57,8 @@ import java.util.stream.Stream;
* 67.81.244.10
* 67.81.244.11:9305
* 67.81.244.15:9400
*
* @opensearch.internal
*/
public class FileBasedSeedHostsProvider implements SeedHostsProvider {

View File

@ -57,6 +57,11 @@ import org.opensearch.transport.TransportService;
import static java.util.Collections.emptyMap;
import static java.util.Collections.emptySet;
/**
* Connector for transport handshake
*
* @opensearch.internal
*/
public class HandshakingTransportAddressConnector implements TransportAddressConnector {
private static final Logger logger = LogManager.getLogger(HandshakingTransportAddressConnector.class);

View File

@ -38,6 +38,11 @@ import org.opensearch.rest.RestStatus;
import java.io.IOException;
/**
* Exception when the cluster-manager is not discovered
*
* @opensearch.internal
*/
public class MasterNotDiscoveredException extends OpenSearchException {
public MasterNotDiscoveredException() {

View File

@ -65,6 +65,11 @@ import java.util.stream.Collectors;
import static java.util.Collections.emptyList;
/**
* finds peers
*
* @opensearch.internal
*/
public abstract class PeerFinder {
private static final Logger logger = LogManager.getLogger(PeerFinder.class);

View File

@ -41,6 +41,11 @@ import java.io.IOException;
import java.util.List;
import java.util.Objects;
/**
* Request sent to a peer node
*
* @opensearch.internal
*/
public class PeersRequest extends TransportRequest {
private final DiscoveryNode sourceNode;
private final List<DiscoveryNode> knownPeers;

View File

@ -38,6 +38,8 @@ import java.util.List;
/**
* A pluggable provider of the list of seed hosts to use for discovery.
*
* @opensearch.internal
*/
public interface SeedHostsProvider {

View File

@ -65,6 +65,11 @@ import java.util.concurrent.atomic.AtomicBoolean;
import java.util.function.Consumer;
import java.util.stream.Collectors;
/**
* Resolves seed hosts listed in the config
*
* @opensearch.internal
*/
public class SeedHostsResolver extends AbstractLifecycleComponent implements ConfiguredHostsResolver {
public static final Setting<Integer> LEGACY_DISCOVERY_ZEN_PING_UNICAST_CONCURRENT_CONNECTS_SETTING = Setting.intSetting(
"discovery.zen.ping.unicast.concurrent_connects",

View File

@ -52,6 +52,8 @@ import static java.util.Collections.emptyList;
*
* An example setting might look as follows:
* [67.81.244.10, 67.81.244.11:9305, 67.81.244.15:9400]
*
* @opensearch.internal
*/
public class SettingsBasedSeedHostsProvider implements SeedHostsProvider {

View File

@ -55,6 +55,8 @@ import java.util.stream.Collectors;
/**
* The environment of where things exists.
*
* @opensearch.internal
*/
@SuppressForbidden(reason = "configures paths for the system")
// TODO: move PathUtils to be package-private here instead of

View File

@ -107,6 +107,8 @@ import static java.util.Collections.unmodifiableSet;
/**
* A component that holds all data paths for a single node.
*
* @opensearch.internal
*/
public final class NodeEnvironment implements Closeable {
public static class NodePath {

View File

@ -48,6 +48,8 @@ import java.util.Objects;
/**
* Metadata associated with this node: its persistent node ID and its version.
* The metadata is persisted in the data folder of this node and is reused across restarts.
*
* @opensearch.internal
*/
public final class NodeMetadata {

View File

@ -59,6 +59,11 @@ import java.util.stream.StreamSupport;
import static org.opensearch.env.NodeEnvironment.INDICES_FOLDER;
/**
* Command to repurpose a node
*
* @opensearch.internal
*/
public class NodeRepurposeCommand extends OpenSearchNodeCommand {
static final String ABORTED_BY_USER_MSG = OpenSearchNodeCommand.ABORTED_BY_USER_MSG;

View File

@ -49,6 +49,8 @@ import java.util.List;
* additional features, such as SSD detection and better
* filesystem information for the root filesystem.
* @see Environment#getFileStore(Path)
*
* @opensearch.internal
*/
class OpenSearchFileStore extends FileStore {
/** Underlying filestore */

View File

@ -43,6 +43,11 @@ import java.io.IOException;
import java.nio.file.Path;
import java.util.Arrays;
/**
* Command to override a node version
*
* @opensearch.internal
*/
public class OverrideNodeVersionCommand extends OpenSearchNodeCommand {
private static final String TOO_NEW_MESSAGE = DELIMITER
+ "\n"

View File

@ -43,6 +43,8 @@ import java.util.concurrent.atomic.AtomicBoolean;
* before executing any write operations on the shards data directory.
*
* @see NodeEnvironment
*
* @opensearch.internal
*/
public abstract class ShardLock implements Closeable {

View File

@ -40,6 +40,8 @@ import java.io.IOException;
/**
* Exception used when the in-memory lock for a shard cannot be obtained
*
* @opensearch.internal
*/
public class ShardLockObtainFailedException extends OpenSearchException {

View File

@ -69,6 +69,8 @@ import static java.util.Collections.unmodifiableSet;
* The async fetch logic maintains a map of which nodes are being fetched from in an async manner,
* and once the results are back, it makes sure to schedule a reroute to make sure those results will
* be taken into account.
*
* @opensearch.internal
*/
public abstract class AsyncShardFetch<T extends BaseNodeResponse> implements Releasable {

View File

@ -53,6 +53,8 @@ import java.util.List;
*
* Individual implementations of this class are responsible for providing
* the logic to determine to which nodes (if any) those shards are allocated.
*
* @opensearch.internal
*/
public abstract class BaseGatewayShardAllocator {

View File

@ -49,6 +49,11 @@ import java.util.Map;
import static org.opensearch.gateway.GatewayService.STATE_NOT_RECOVERED_BLOCK;
/**
* Updates cluster state
*
* @opensearch.internal
*/
public class ClusterStateUpdaters {
private static final Logger logger = LogManager.getLogger(ClusterStateUpdaters.class);

View File

@ -36,6 +36,8 @@ import org.opensearch.OpenSearchCorruptionException;
/**
* This exception is thrown when OpenSearch detects
* an inconsistency in one of it's persistent states.
*
* @opensearch.internal
*/
public class CorruptStateException extends OpenSearchCorruptionException {

View File

@ -63,6 +63,8 @@ import static java.util.Collections.unmodifiableMap;
* The dangling indices state is responsible for finding new dangling indices (indices that have
* their state written on disk, but don't exists in the metadata of the cluster), and importing
* them into the cluster.
*
* @opensearch.internal
*/
public class DanglingIndicesState implements ClusterStateListener {

View File

@ -47,6 +47,11 @@ import org.opensearch.index.Index;
import java.util.Arrays;
import java.util.function.Function;
/**
* The Gateway
*
* @opensearch.internal
*/
public class Gateway {
private static final Logger logger = LogManager.getLogger(Gateway.class);

View File

@ -63,6 +63,11 @@ import java.util.concurrent.ConcurrentMap;
import java.util.stream.Collectors;
import java.util.stream.StreamSupport;
/**
* Allocator for the gateway
*
* @opensearch.internal
*/
public class GatewayAllocator implements ExistingShardsAllocator {
public static final String ALLOCATOR_NAME = "gateway_allocator";

View File

@ -37,6 +37,11 @@ import org.opensearch.common.io.stream.StreamInput;
import java.io.IOException;
/**
* Base Exception for the gateway
*
* @opensearch.internal
*/
public class GatewayException extends OpenSearchException {
public GatewayException(StreamInput in) throws IOException {

View File

@ -91,6 +91,8 @@ import static org.opensearch.common.util.concurrent.OpenSearchExecutors.daemonTh
* the state being loaded when constructing the instance of this class is not necessarily the state that will be used as {@link
* ClusterState#metadata()} because it might be stale or incomplete. Cluster-manager-eligible nodes must perform an election to find a complete and
* non-stale state, and cluster-manager-ineligible nodes receive the real cluster state from the elected cluster-manager after joining the cluster.
*
* @opensearch.internal
*/
public class GatewayMetaState implements Closeable {

View File

@ -34,6 +34,11 @@ package org.opensearch.gateway;
import org.opensearch.common.inject.AbstractModule;
/**
* Binds the gateway module
*
* @opensearch.internal
*/
public class GatewayModule extends AbstractModule {
@Override

View File

@ -60,6 +60,11 @@ import org.opensearch.threadpool.ThreadPool;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.function.Function;
/**
* The Gateway Service provider
*
* @opensearch.internal
*/
public class GatewayService extends AbstractLifecycleComponent implements ClusterStateListener {
private static final Logger logger = LogManager.getLogger(GatewayService.class);

View File

@ -54,6 +54,8 @@ import java.util.function.LongSupplier;
/**
* Tracks the metadata written to disk, allowing updated metadata to be written incrementally (i.e. only writing out the changed metadata).
*
* @opensearch.internal
*/
public class IncrementalClusterStateWriter {

View File

@ -68,6 +68,11 @@ import java.util.Collection;
import static org.opensearch.cluster.metadata.MetadataIndexStateService.isIndexVerifiedBeforeClosed;
/**
* Allocates dangled indices
*
* @opensearch.internal
*/
public class LocalAllocateDangledIndices {
private static final Logger logger = LogManager.getLogger(LocalAllocateDangledIndices.class);

View File

@ -54,6 +54,8 @@ import java.util.function.Predicate;
/**
* Handles writing and loading {@link Manifest}, {@link Metadata} and {@link IndexMetadata}
*
* @opensearch.internal
*/
public class MetaStateService {
private static final Logger logger = LogManager.getLogger(MetaStateService.class);

View File

@ -75,6 +75,8 @@ import java.util.stream.Collectors;
* MetadataStateFormat is a base class to write checksummed
* XContent based files to one or more directories in a standardized directory structure.
* @param <T> the type of the XContent base data-structure
*
* @opensearch.internal
*/
public abstract class MetadataStateFormat<T> {
public static final XContentType FORMAT = XContentType.SMILE;

View File

@ -131,6 +131,8 @@ import java.util.function.Supplier;
* +---------------------------+-------------------------+-------------------------------------------------------------------------------+
*
* (the last-accepted term is recorded in Metadata CoordinationMetadata so does not need repeating here)
*
* @opensearch.internal
*/
public class PersistedClusterStateService {
private static final Logger logger = LogManager.getLogger(PersistedClusterStateService.class);

View File

@ -74,6 +74,8 @@ import java.util.stream.Stream;
* (see {@link org.opensearch.cluster.routing.allocation.allocator.BalancedShardsAllocator}),
* nor does it allocate primaries when a primary shard failed and there is a valid replica
* copy that can immediately be promoted to primary, as this takes place in {@link RoutingNodes#failShard}.
*
* @opensearch.internal
*/
public abstract class PrimaryShardAllocator extends BaseGatewayShardAllocator {
/**

View File

@ -51,6 +51,8 @@ import java.util.Comparator;
* <li>Lastly the index names are compared, which is useful when a date is baked into the index
* name, e.g. <code>logstash-2015.05.03</code></li>
* </ol>
*
* @opensearch.internal
*/
public abstract class PriorityComparator implements Comparator<ShardRouting> {

View File

@ -65,6 +65,11 @@ import java.util.Set;
import static org.opensearch.cluster.routing.UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING;
/**
* Allocates replica shards
*
* @opensearch.internal
*/
public abstract class ReplicaShardAllocator extends BaseGatewayShardAllocator {
/**
* Process existing recoveries of replicas and see if we need to cancel them if we find a better

View File

@ -57,6 +57,11 @@ import org.opensearch.transport.TransportService;
import java.io.IOException;
import java.util.List;
/**
* Lists gateway meta state
*
* @opensearch.internal
*/
public class TransportNodesListGatewayMetaState extends TransportNodesAction<
TransportNodesListGatewayMetaState.Request,
TransportNodesListGatewayMetaState.NodesGatewayMetaState,

View File

@ -72,6 +72,8 @@ import java.util.Objects;
* This transport action is used to fetch the shard version from each node during primary allocation in {@link GatewayAllocator}.
* We use this to find out which node holds the latest shard version and which of them used to be a primary in order to allocate
* shards after node or cluster restarts.
*
* @opensearch.internal
*/
public class TransportNodesListGatewayStartedShards extends TransportNodesAction<
TransportNodesListGatewayStartedShards.Request,

View File

@ -37,6 +37,8 @@ import java.io.UncheckedIOException;
/**
* This exception is thrown when there is a problem of writing state to disk.
*
* @opensearch.internal
*/
public class WriteStateException extends IOException {
private final boolean dirty;

View File

@ -78,6 +78,11 @@ import static org.opensearch.http.HttpTransportSettings.SETTING_HTTP_PORT;
import static org.opensearch.http.HttpTransportSettings.SETTING_HTTP_PUBLISH_HOST;
import static org.opensearch.http.HttpTransportSettings.SETTING_HTTP_PUBLISH_PORT;
/**
* Base HttpServer class
*
* @opensearch.internal
*/
public abstract class AbstractHttpServerTransport extends AbstractLifecycleComponent implements HttpServerTransport {
private static final Logger logger = LogManager.getLogger(AbstractHttpServerTransport.class);
private static final ActionListener<Void> NO_OP = ActionListener.wrap(() -> {});

View File

@ -36,6 +36,11 @@ import org.opensearch.common.io.stream.StreamInput;
import java.io.IOException;
/**
* Exception thrown when trying to bind to an http endpoint
*
* @opensearch.internal
*/
public class BindHttpException extends HttpException {
public BindHttpException(String message) {

View File

@ -84,6 +84,8 @@ import static org.opensearch.http.HttpTransportSettings.SETTING_CORS_MAX_AGE;
*
* It modifies the original netty code to operate on OpenSearch http request/response abstractions.
* Additionally, it removes CORS features that are not used by OpenSearch.
*
* @opensearch.internal
*/
public class CorsHandler {

View File

@ -58,6 +58,8 @@ import static org.opensearch.tasks.Task.X_OPAQUE_ID;
/**
* The default rest channel for incoming requests. This class implements the basic logic for sending a rest
* response. It will set necessary headers nad ensure that bytes are released after the response is sent.
*
* @opensearch.internal
*/
public class DefaultRestChannel extends AbstractRestChannel implements RestChannel {

View File

@ -37,6 +37,11 @@ import org.opensearch.common.network.CloseableChannel;
import java.net.InetSocketAddress;
/**
* Represents an HTTP comms channel
*
* @opensearch.internal
*/
public interface HttpChannel extends CloseableChannel {
/**

View File

@ -37,6 +37,11 @@ import org.opensearch.common.io.stream.StreamInput;
import java.io.IOException;
/**
* Exception thrown on HTTP requests
*
* @opensearch.internal
*/
public class HttpException extends OpenSearchException {
public HttpException(String message) {

View File

@ -46,6 +46,11 @@ import static org.opensearch.http.HttpTransportSettings.SETTING_HTTP_READ_TIMEOU
import static org.opensearch.http.HttpTransportSettings.SETTING_HTTP_RESET_COOKIES;
import static org.opensearch.http.HttpTransportSettings.SETTING_PIPELINING_MAX_EVENTS;
/**
* HTTP Settings
*
* @opensearch.internal
*/
public class HttpHandlingSettings {
private final int maxContentLength;

View File

@ -44,6 +44,11 @@ import org.opensearch.node.ReportingService;
import java.io.IOException;
/**
* Information about an http connection
*
* @opensearch.internal
*/
public class HttpInfo implements ReportingService.Info {
private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(HttpInfo.class);

View File

@ -31,6 +31,11 @@
package org.opensearch.http;
/**
* Pipeline messages for http connections
*
* @opensearch.internal
*/
public interface HttpPipelinedMessage extends Comparable<HttpPipelinedMessage> {
/**

View File

@ -39,6 +39,11 @@ import org.opensearch.rest.RestStatus;
import java.util.List;
import java.util.Map;
/**
* Pipeline requests for http connections
*
* @opensearch.internal
*/
public class HttpPipelinedRequest implements HttpRequest, HttpPipelinedMessage {
private final int sequence;

View File

@ -31,6 +31,11 @@
package org.opensearch.http;
/**
* Pipeline responses for http connections
*
* @opensearch.internal
*/
public class HttpPipelinedResponse implements HttpPipelinedMessage, HttpResponse {
private final int sequence;

View File

@ -38,6 +38,11 @@ import java.util.Comparator;
import java.util.List;
import java.util.PriorityQueue;
/**
* Aggregates HTTP requests
*
* @opensearch.internal
*/
public class HttpPipeliningAggregator<Listener> {
private final int maxEventsHeld;

View File

@ -31,6 +31,11 @@
package org.opensearch.http;
/**
* Timeout Exception for HTTP read operations
*
* @opensearch.internal
*/
public class HttpReadTimeoutException extends RuntimeException {
public HttpReadTimeoutException(long readTimeoutMillis) {

View File

@ -44,6 +44,8 @@ import java.util.Map;
/**
* A basic http request abstraction. Http modules needs to implement this interface to integrate with the
* server package's rest handling.
*
* @opensearch.internal
*/
public interface HttpRequest {

View File

@ -35,6 +35,8 @@ package org.opensearch.http;
/**
* A basic http response abstraction. Http modules must implement this interface as the server package rest
* handling needs to set http headers for a response.
*
* @opensearch.internal
*/
public interface HttpResponse {

View File

@ -36,6 +36,11 @@ import org.opensearch.common.network.CloseableChannel;
import java.net.InetSocketAddress;
/**
* Http server channel
*
* @opensearch.internal
*/
public interface HttpServerChannel extends CloseableChannel {
/**

View File

@ -39,6 +39,11 @@ import org.opensearch.node.ReportingService;
import org.opensearch.rest.RestChannel;
import org.opensearch.rest.RestRequest;
/**
* HTTP Transport server
*
* @opensearch.internal
*/
public interface HttpServerTransport extends LifecycleComponent, ReportingService<HttpInfo> {
String HTTP_SERVER_WORKER_THREAD_NAME_PREFIX = "http_server_worker";

View File

@ -40,6 +40,11 @@ import org.opensearch.common.xcontent.XContentBuilder;
import java.io.IOException;
/**
* Stats for HTTP connections
*
* @opensearch.internal
*/
public class HttpStats implements Writeable, ToXContentFragment {
private final long serverOpen;

View File

@ -48,6 +48,8 @@ import java.util.List;
/**
* Http request trace logger. See {@link #maybeTraceRequest(RestRequest, Exception)} for details.
*
* @opensearch.internal
*/
class HttpTracer {

View File

@ -50,6 +50,11 @@ import static org.opensearch.common.settings.Setting.boolSetting;
import static org.opensearch.common.settings.Setting.intSetting;
import static org.opensearch.common.settings.Setting.listSetting;
/**
* Transport settings for http connections
*
* @opensearch.internal
*/
public final class HttpTransportSettings {
public static final Setting<Boolean> SETTING_CORS_ENABLED = Setting.boolSetting("http.cors.enabled", false, Property.NodeScope);

View File

@ -32,6 +32,11 @@
package org.opensearch.http;
/**
* Utility class for HTTP connections
*
* @opensearch.internal
*/
public class HttpUtils {
static final String CLOSE = "close";

View File

@ -35,6 +35,8 @@ package org.opensearch.ingest;
/**
* An Abstract Processor that holds tag and description information
* about the processor.
*
* @opensearch.internal
*/
public abstract class AbstractProcessor implements Processor {
protected final String tag;

View File

@ -48,6 +48,8 @@ import java.util.stream.Collectors;
/**
* A Processor that executes a list of other "processors". It executes a separate list of
* "onFailureProcessors" when any of the processors throw an {@link Exception}.
*
* @opensearch.internal
*/
public class CompoundProcessor implements Processor {
public static final String ON_FAILURE_MESSAGE_FIELD = "on_failure_message";

View File

@ -57,6 +57,11 @@ import java.util.stream.Collectors;
import static org.opensearch.ingest.ConfigurationUtils.newConfigurationException;
/**
* Ingest processor based on specific conditions
*
* @opensearch.internal
*/
public class ConditionalProcessor extends AbstractProcessor implements WrappingProcessor {
private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(DynamicMap.class);

View File

@ -58,6 +58,11 @@ import java.util.Map;
import static org.opensearch.script.Script.DEFAULT_TEMPLATE_LANG;
/**
* Utility class for ingest processor configurations
*
* @opensearch.internal
*/
public final class ConfigurationUtils {
public static final String TAG_KEY = "tag";

View File

@ -37,6 +37,8 @@ import java.util.Map;
/**
* Drop processor only returns {@code null} for the execution result to indicate that any document
* executed by it should not be indexed.
*
* @opensearch.internal
*/
public final class DropProcessor extends AbstractProcessor {

View File

@ -59,6 +59,8 @@ import java.util.function.BiConsumer;
/**
* Represents a single document being captured before indexing and holds the source and metadata (like id, type and index).
*
* @opensearch.internal
*/
public final class IngestDocument {

View File

@ -43,6 +43,11 @@ import java.util.Objects;
import java.util.Set;
import java.util.TreeSet;
/**
* Information about an ingest event
*
* @opensearch.internal
*/
public class IngestInfo implements ReportingService.Info {
private final Set<ProcessorInfo> processors;

View File

@ -54,6 +54,8 @@ import java.util.Map;
/**
* Holds the ingest pipelines that are available in the cluster
*
* @opensearch.internal
*/
public final class IngestMetadata implements Metadata.Custom {

View File

@ -43,6 +43,8 @@ import java.util.concurrent.atomic.AtomicLong;
* The scope is determined by the calling code. For example you can use this class to count all documents across all pipeline,
* or you can use this class to count documents for a given pipeline or a specific processor.
* This class does not make assumptions about it's given scope.
*
* @opensearch.internal
*/
class IngestMetric {

View File

@ -41,6 +41,8 @@ import java.io.IOException;
/**
* A dedicated wrapper for exceptions encountered executing an ingest processor. The wrapper is needed as we currently only unwrap causes
* for instances of {@link OpenSearchWrapperException}.
*
* @opensearch.internal
*/
public class IngestProcessorException extends OpenSearchException implements OpenSearchWrapperException {

View File

@ -92,6 +92,8 @@ import java.util.function.IntConsumer;
/**
* Holder class for several ingest related services.
*
* @opensearch.internal
*/
public class IngestService implements ClusterStateApplier, ReportingService<IngestInfo> {

View File

@ -49,6 +49,11 @@ import java.util.Map;
import java.util.Objects;
import java.util.concurrent.TimeUnit;
/**
* Stats for an ingest processor pipeline
*
* @opensearch.internal
*/
public class IngestStats implements Writeable, ToXContentFragment {
private final Stats totalStats;
private final List<PipelineStat> pipelineStats;

View File

@ -47,6 +47,8 @@ import org.opensearch.script.ScriptService;
/**
* A pipeline is a list of {@link Processor} instances grouped under a unique id.
*
* @opensearch.internal
*/
public final class Pipeline {

View File

@ -52,6 +52,8 @@ import java.util.Objects;
/**
* Encapsulates a pipeline's id and configuration as a blob
*
* @opensearch.internal
*/
public final class PipelineConfiguration extends AbstractDiffable<PipelineConfiguration> implements ToXContentObject {

View File

@ -37,6 +37,11 @@ import org.opensearch.script.TemplateScript;
import java.util.Map;
import java.util.function.BiConsumer;
/**
* Processes an ingest pipeline
*
* @opensearch.internal
*/
public class PipelineProcessor extends AbstractProcessor {
public static final String TYPE = "pipeline";

View File

@ -50,6 +50,8 @@ import java.util.function.LongSupplier;
* Whether changes are made and what exactly is modified is up to the implementation.
*
* Processors may get called concurrently and thus need to be thread-safe.
*
* @opensearch.internal
*/
public interface Processor {

View File

@ -41,6 +41,11 @@ import org.opensearch.common.xcontent.XContentBuilder;
import java.io.IOException;
/**
* Information about an ingest processor
*
* @opensearch.internal
*/
public class ProcessorInfo implements Writeable, ToXContentObject, Comparable<ProcessorInfo> {
private final String type;

View File

@ -44,6 +44,8 @@ import static org.opensearch.ingest.IngestDocument.PIPELINE_CYCLE_ERROR_MESSAGE;
/**
* Processor to be used within Simulate API to keep track of processors executed in pipeline.
*
* @opensearch.internal
*/
public final class TrackingResultProcessor implements Processor {

View File

@ -49,6 +49,8 @@ import static org.opensearch.script.Script.DEFAULT_TEMPLATE_LANG;
/**
* Holds a value. If the value is requested a copy is made and optionally template snippets are resolved too.
*
* @opensearch.internal
*/
public interface ValueSource {

View File

@ -35,6 +35,8 @@ package org.opensearch.ingest;
/**
* A srapping processor is one that encapsulates an inner processor, or a processor that the wrapped processor enacts upon. All processors
* that contain an "inner" processor should implement this interface, such that the actual processor can be obtained.
*
* @opensearch.internal
*/
public interface WrappingProcessor extends Processor {

View File

@ -48,7 +48,10 @@ import java.io.IOException;
import java.util.Objects;
/** A {@link Query} that only matches documents that are greater than or equal
* to a configured doc ID. */
* to a configured doc ID.
*
* @opensearch.internal
*/
public final class MinDocQuery extends Query {
// Matching documents depend on the sequence of segments that the index reader

View File

@ -56,6 +56,7 @@ import java.util.Objects;
/**
* A {@link Query} that only matches documents that are greater than the provided {@link FieldDoc}.
* This works only if the index is sorted according to the given search {@link Sort}.
* @opensearch.internal
*/
public class SearchAfterSortedDocQuery extends Query {
private final Sort sort;

View File

@ -52,6 +52,8 @@ import java.util.stream.Collectors;
* Class representing statistics about adaptive replica selection. This includes
* EWMA of queue size, service time, and response time, as well as outgoing
* searches to each node and the "rank" based on the ARS formula.
*
* @opensearch.internal
*/
public class AdaptiveSelectionStats implements Writeable, ToXContentFragment {

View File

@ -48,6 +48,11 @@ import org.opensearch.common.settings.Settings;
import org.opensearch.common.settings.SettingsException;
import org.opensearch.env.Environment;
/**
* Prepares internal settings
*
* @opensearch.internal
*/
public class InternalSettingsPreparer {
private static final String SECRET_PROMPT_VALUE = "${prompt.secret}";

View File

@ -224,6 +224,8 @@ import static org.opensearch.index.ShardIndexingPressureSettings.SHARD_INDEXING_
/**
* A node represent a node within a cluster ({@code cluster.name}). The {@link #client()} can be used
* in order to use a {@link Client} to perform actions/operations against the cluster.
*
* @opensearch.internal
*/
public class Node implements Closeable {
public static final Setting<Boolean> WRITE_PORTS_FILE_SETTING = Setting.boolSetting("node.portsfile", false, Property.NodeScope);

View File

@ -41,7 +41,7 @@ import java.io.IOException;
/**
* An exception indicating that node is closed.
*
*
* @opensearch.internal
*/
public class NodeClosedException extends OpenSearchException {

View File

@ -40,6 +40,11 @@ import org.opensearch.common.settings.Setting.Property;
import java.util.List;
import java.util.stream.Collectors;
/**
* Settings for a node role
*
* @opensearch.internal
*/
public class NodeRoleSettings {
public static final Setting<List<DiscoveryNodeRole>> NODE_ROLES_SETTING = Setting.listSetting(

View File

@ -60,6 +60,11 @@ import java.io.Closeable;
import java.io.IOException;
import java.util.concurrent.TimeUnit;
/**
* Services exposed to nodes
*
* @opensearch.internal
*/
public class NodeService implements Closeable {
private final Settings settings;
private final ThreadPool threadPool;

View File

@ -41,6 +41,8 @@ import java.util.List;
* begins accepting network requests in
* {@link Node#validateNodeBeforeAcceptingRequests(org.opensearch.bootstrap.BootstrapContext, BoundTransportAddress, List)}.
* This exception is a checked exception that is declared as thrown from this method for the purpose of bubbling up to the user.
*
* @opensearch.internal
*/
public class NodeValidationException extends Exception {

View File

@ -35,6 +35,11 @@ package org.opensearch.node;
import org.opensearch.common.io.stream.Writeable;
import org.opensearch.common.xcontent.ToXContent;
/**
* Node reporting service
*
* @opensearch.internal
*/
public interface ReportingService<I extends ReportingService.Info> {
I info();

View File

@ -53,6 +53,8 @@ import java.util.concurrent.ConcurrentMap;
* Collects statistics about queue size, response time, and service time of
* tasks executed on each node, making the EWMA of the values available to the
* coordinating node.
*
* @opensearch.internal
*/
public final class ResponseCollectorService implements ClusterStateListener {