[Rename] Second pass clean up of imports and variable names in server module (#382)

This commit cleans up imports, variable names, comments, and other misc usages
of ES with the new OpenSearch name.

Signed-off-by: Nicholas Walter Knize <nknize@apache.org>
This commit is contained in:
Nick Knize 2021-03-18 12:01:05 -05:00 committed by Nick Knize
parent b5e00e1e58
commit f75803e1aa
87 changed files with 222 additions and 222 deletions

View File

@ -256,8 +256,8 @@ public class RemoteScrollableHitSourceTests extends OpenSearchTestCase {
assertEquals("87A7NvevQxSrEwMbtRCecg", r.getFailures().get(0).getNodeId());
assertThat(r.getFailures().get(0).getReason(), instanceOf(OpenSearchRejectedExecutionException.class));
assertEquals("rejected execution of org.opensearch.transport.TransportService$5@52d06af2 on "
+ "EsThreadPoolExecutor[search, queue capacity = 1000, org.opensearch.common.util.concurrent."
+ "EsThreadPoolExecutor@778ea553[Running, pool size = 7, active threads = 7, queued tasks = 1000, "
+ "OpenSearchThreadPoolExecutor[search, queue capacity = 1000, org.opensearch.common.util.concurrent."
+ "OpenSearchThreadPoolExecutor@778ea553[Running, pool size = 7, active threads = 7, queued tasks = 1000, "
+ "completed tasks = 4182]]", r.getFailures().get(0).getReason().getMessage());
assertThat(r.getHits(), hasSize(1));
assertEquals("test", r.getHits().get(0).getIndex());

View File

@ -23,8 +23,8 @@ import org.opensearch.common.settings.Settings;
import org.opensearch.common.unit.TimeValue;
import org.opensearch.common.util.concurrent.AbstractRunnable;
import org.opensearch.common.util.concurrent.OpenSearchExecutors;
import org.opensearch.common.util.concurrent.EsThreadPoolExecutor;
import org.opensearch.common.util.concurrent.PrioritizedEsThreadPoolExecutor;
import org.opensearch.common.util.concurrent.OpenSearchThreadPoolExecutor;
import org.opensearch.common.util.concurrent.PrioritizedOpenSearchThreadPoolExecutor;
import org.opensearch.test.OpenSearchTestCase;
import org.junit.After;
import org.junit.Before;
@ -70,7 +70,7 @@ public class EvilThreadPoolTests extends OpenSearchTestCase {
}
public void testExecutionErrorOnFixedESThreadPoolExecutor() throws InterruptedException {
final EsThreadPoolExecutor fixedExecutor = OpenSearchExecutors.newFixed("test", 1, 1,
final OpenSearchThreadPoolExecutor fixedExecutor = OpenSearchExecutors.newFixed("test", 1, 1,
OpenSearchExecutors.daemonThreadFactory("test"), threadPool.getThreadContext());
try {
checkExecutionError(getExecuteRunner(fixedExecutor));
@ -81,7 +81,7 @@ public class EvilThreadPoolTests extends OpenSearchTestCase {
}
public void testExecutionErrorOnScalingESThreadPoolExecutor() throws InterruptedException {
final EsThreadPoolExecutor scalingExecutor = OpenSearchExecutors.newScaling("test", 1, 1,
final OpenSearchThreadPoolExecutor scalingExecutor = OpenSearchExecutors.newScaling("test", 1, 1,
10, TimeUnit.SECONDS, OpenSearchExecutors.daemonThreadFactory("test"), threadPool.getThreadContext());
try {
checkExecutionError(getExecuteRunner(scalingExecutor));
@ -92,7 +92,7 @@ public class EvilThreadPoolTests extends OpenSearchTestCase {
}
public void testExecutionErrorOnAutoQueueFixedESThreadPoolExecutor() throws InterruptedException {
final EsThreadPoolExecutor autoQueueFixedExecutor = OpenSearchExecutors.newAutoQueueFixed("test", 1, 1,
final OpenSearchThreadPoolExecutor autoQueueFixedExecutor = OpenSearchExecutors.newAutoQueueFixed("test", 1, 1,
1, 1, 1, TimeValue.timeValueSeconds(10), OpenSearchExecutors.daemonThreadFactory("test"), threadPool.getThreadContext());
try {
checkExecutionError(getExecuteRunner(autoQueueFixedExecutor));
@ -103,7 +103,7 @@ public class EvilThreadPoolTests extends OpenSearchTestCase {
}
public void testExecutionErrorOnSinglePrioritizingThreadPoolExecutor() throws InterruptedException {
final PrioritizedEsThreadPoolExecutor prioritizedExecutor = OpenSearchExecutors.newSinglePrioritizing("test",
final PrioritizedOpenSearchThreadPoolExecutor prioritizedExecutor = OpenSearchExecutors.newSinglePrioritizing("test",
OpenSearchExecutors.daemonThreadFactory("test"), threadPool.getThreadContext(), threadPool.scheduler());
try {
checkExecutionError(getExecuteRunner(prioritizedExecutor));
@ -179,7 +179,7 @@ public class EvilThreadPoolTests extends OpenSearchTestCase {
}
public void testExecutionExceptionOnFixedESThreadPoolExecutor() throws InterruptedException {
final EsThreadPoolExecutor fixedExecutor = OpenSearchExecutors.newFixed("test", 1, 1,
final OpenSearchThreadPoolExecutor fixedExecutor = OpenSearchExecutors.newFixed("test", 1, 1,
OpenSearchExecutors.daemonThreadFactory("test"), threadPool.getThreadContext());
try {
checkExecutionException(getExecuteRunner(fixedExecutor), true);
@ -190,7 +190,7 @@ public class EvilThreadPoolTests extends OpenSearchTestCase {
}
public void testExecutionExceptionOnScalingESThreadPoolExecutor() throws InterruptedException {
final EsThreadPoolExecutor scalingExecutor = OpenSearchExecutors.newScaling("test", 1, 1,
final OpenSearchThreadPoolExecutor scalingExecutor = OpenSearchExecutors.newScaling("test", 1, 1,
10, TimeUnit.SECONDS, OpenSearchExecutors.daemonThreadFactory("test"), threadPool.getThreadContext());
try {
checkExecutionException(getExecuteRunner(scalingExecutor), true);
@ -201,7 +201,7 @@ public class EvilThreadPoolTests extends OpenSearchTestCase {
}
public void testExecutionExceptionOnAutoQueueFixedESThreadPoolExecutor() throws InterruptedException {
final EsThreadPoolExecutor autoQueueFixedExecutor = OpenSearchExecutors.newAutoQueueFixed("test", 1, 1,
final OpenSearchThreadPoolExecutor autoQueueFixedExecutor = OpenSearchExecutors.newAutoQueueFixed("test", 1, 1,
1, 1, 1, TimeValue.timeValueSeconds(10), OpenSearchExecutors.daemonThreadFactory("test"), threadPool.getThreadContext());
try {
// fixed_auto_queue_size wraps stuff into TimedRunnable, which is an AbstractRunnable
@ -213,7 +213,7 @@ public class EvilThreadPoolTests extends OpenSearchTestCase {
}
public void testExecutionExceptionOnSinglePrioritizingThreadPoolExecutor() throws InterruptedException {
final PrioritizedEsThreadPoolExecutor prioritizedExecutor = OpenSearchExecutors.newSinglePrioritizing("test",
final PrioritizedOpenSearchThreadPoolExecutor prioritizedExecutor = OpenSearchExecutors.newSinglePrioritizing("test",
OpenSearchExecutors.daemonThreadFactory("test"), threadPool.getThreadContext(), threadPool.scheduler());
try {
checkExecutionException(getExecuteRunner(prioritizedExecutor), true);

View File

@ -236,7 +236,7 @@ public class Packages {
return sh.runIgnoreExitCode("service opensearch start");
}
public static void assertElasticsearchStarted(Shell sh, Installation installation) throws Exception {
public static void assertOpenSearchearchStarted(Shell sh, Installation installation) throws Exception {
waitForOpenSearch(installation);
if (isSystemd()) {
@ -261,7 +261,7 @@ public class Packages {
} else {
sh.run("service opensearch restart");
}
assertElasticsearchStarted(sh, installation);
assertOpenSearchearchStarted(sh, installation);
}
/**

View File

@ -32,7 +32,7 @@ import org.apache.lucene.search.TermQuery;
import org.apache.lucene.search.spans.SpanTermQuery;
import org.opensearch.common.lucene.search.MultiPhrasePrefixQuery;
import org.opensearch.common.lucene.search.function.FunctionScoreQuery;
import org.opensearch.index.search.ESToParentBlockJoinQuery;
import org.opensearch.index.search.OpenSearchToParentBlockJoinQuery;
import java.io.IOException;
import java.util.Collection;
@ -85,8 +85,8 @@ public class CustomFieldQuery extends FieldQuery {
for (Term term : synQuery.getTerms()) {
flatten(new TermQuery(term), reader, flatQueries, boost);
}
} else if (sourceQuery instanceof ESToParentBlockJoinQuery) {
Query childQuery = ((ESToParentBlockJoinQuery) sourceQuery).getChildQuery();
} else if (sourceQuery instanceof OpenSearchToParentBlockJoinQuery) {
Query childQuery = ((OpenSearchToParentBlockJoinQuery) sourceQuery).getChildQuery();
if (childQuery != null) {
flatten(childQuery, reader, flatQueries, boost);
}

View File

@ -95,12 +95,12 @@ public class Build {
// these are parsed at startup, and we require that we are able to recognize the values passed in by the startup scripts
type = Type.fromDisplayName(System.getProperty("es.distribution.type", "unknown"), true);
final String esPrefix = "elasticsearch-" + Version.CURRENT;
final String opensearchPrefix = "opensearch-" + Version.CURRENT;
final URL url = getOpenSearchCodeSourceLocation();
final String urlStr = url == null ? "" : url.toString();
if (urlStr.startsWith("file:/") && (
urlStr.endsWith(esPrefix + ".jar") ||
urlStr.matches("(.*)" + esPrefix + "(-)?((alpha|beta|rc)[0-9]+)?(-SNAPSHOT)?.jar")
urlStr.endsWith(opensearchPrefix + ".jar") ||
urlStr.matches("(.*)" + opensearchPrefix + "(-)?((alpha|beta|rc)[0-9]+)?(-SNAPSHOT)?.jar")
)) {
try (JarInputStream jar = new JarInputStream(FileSystemUtils.openFileURLStream(url))) {
Manifest manifest = jar.getManifest();
@ -112,7 +112,7 @@ public class Build {
throw new RuntimeException(e);
}
} else {
// not running from the official elasticsearch jar file (unit tests, IDE, uber client jar, shadiness)
// not running from the official opensearch jar file (unit tests, IDE, uber client jar, shadiness)
hash = "unknown";
date = "unknown";
version = Version.CURRENT.toString();

View File

@ -545,7 +545,7 @@ public class Version implements Comparable<Version>, ToXContentFragment {
/**
* Returns true iff this version is an alpha version
* Note: This has been introduced in elasticsearch version 5. Previous versions will never
* Note: This has been introduced in version 5 of the OpenSearch predecessor. Previous versions will never
* have an alpha version.
*/
public boolean isAlpha() {

View File

@ -117,7 +117,7 @@ public class NodeInfo extends BaseNodeResponse {
}
/**
* The current ES version
* The current OpenSearch version
*/
public Version getVersion() {
return version;

View File

@ -49,7 +49,7 @@ public class UpgradeSettingsRequest extends AcknowledgedRequest<UpgradeSettingsR
/**
* Constructs a new request to update minimum compatible version settings for one or more indices
*
* @param versions a map from index name to elasticsearch version, oldest lucene segment version tuple
* @param versions a map from index name to opensearch version, oldest lucene segment version tuple
*/
public UpgradeSettingsRequest(Map<String, Tuple<Version, String>> versions) {
this.versions = versions;

View File

@ -182,7 +182,7 @@ public class GetResponse extends ActionResponse implements Iterable<DocumentFiel
* doesn't know. But before returning the result it will check that enough information were
* parsed to return a valid {@link GetResponse} instance and throws a {@link ParsingException}
* otherwise. This is the case when we get a 404 back, which can be parsed as a normal
* {@link GetResponse} with found set to false, or as an elasticsearch exception. The caller
* {@link GetResponse} with found set to false, or as an opensearch exception. The caller
* of this method needs a way to figure out whether we got back a valid get response, which
* can be done by catching ParsingException.
*

View File

@ -309,7 +309,7 @@ final class Bootstrap {
builder.setSecureSettings(secureSettings);
}
return InternalSettingsPreparer.prepareEnvironment(builder.build(), Collections.emptyMap(), configPath,
// HOSTNAME is set by elasticsearch-env and elasticsearch-env.bat so it is always available
// HOSTNAME is set by opensearch-env and opensearch-env.bat so it is always available
() -> System.getenv("HOSTNAME"));
}

View File

@ -296,7 +296,7 @@ final class BootstrapChecks {
if (maxFileDescriptorCount != -1 && maxFileDescriptorCount < limit) {
final String message = String.format(
Locale.ROOT,
"max file descriptors [%d] for elasticsearch process is too low, increase to at least [%d]",
"max file descriptors [%d] for opensearch process is too low, increase to at least [%d]",
getMaxFileDescriptorCount(),
limit);
return BootstrapCheckResult.failure(message);
@ -317,7 +317,7 @@ final class BootstrapChecks {
@Override
public BootstrapCheckResult check(BootstrapContext context) {
if (BootstrapSettings.MEMORY_LOCK_SETTING.get(context.settings()) && !isMemoryLocked()) {
return BootstrapCheckResult.failure("memory locking requested for elasticsearch process but memory is not locked");
return BootstrapCheckResult.failure("memory locking requested for opensearch process but memory is not locked");
} else {
return BootstrapCheckResult.success();
}

View File

@ -48,7 +48,7 @@ class JNANatives {
// Set to true, in case native system call filter install was successful
static boolean LOCAL_SYSTEM_CALL_FILTER = false;
// Set to true, in case policy can be applied to all threads of the process (even existing ones)
// otherwise they are only inherited for new threads (ES app threads)
// otherwise they are only inherited for new threads (OpenSearch app threads)
static boolean LOCAL_SYSTEM_CALL_FILTER_ALL = false;
// set to the maximum number of threads that can be created for
// the user ID that owns the running OpenSearch process

View File

@ -67,7 +67,7 @@ class OpenSearchUncaughtExceptionHandler implements Thread.UncaughtExceptionHand
logger.error(message, t);
Terminal.DEFAULT.errorPrintln(message);
t.printStackTrace(Terminal.DEFAULT.getErrorWriter());
// Without a final flush, the stacktrace may not be shown before ES exits
// Without a final flush, the stacktrace may not be shown before OpenSearch exits
Terminal.DEFAULT.flush();
}
@ -76,7 +76,7 @@ class OpenSearchUncaughtExceptionHandler implements Thread.UncaughtExceptionHand
logger.error(message, t);
Terminal.DEFAULT.errorPrintln(message);
t.printStackTrace(Terminal.DEFAULT.getErrorWriter());
// Without a final flush, the stacktrace may not be shown if ES goes on to exit
// Without a final flush, the stacktrace may not be shown if OpenSearch goes on to exit
Terminal.DEFAULT.flush();
}

View File

@ -66,7 +66,7 @@ import static org.opensearch.bootstrap.FilePermissionUtils.addSingleFilePath;
* between security and ease of use:
* <ul>
* <li>Assigns file permissions to user-configurable paths that can
* be specified from the command-line or {@code elasticsearch.yml}.</li>
* be specified from the command-line or {@code opensearch.yml}.</li>
* <li>Allows for some contained usage of native code that would not
* otherwise be permitted.</li>
* </ul>
@ -85,7 +85,7 @@ import static org.opensearch.bootstrap.FilePermissionUtils.addSingleFilePath;
* permission, but there are extenuating circumstances.
* <p>
* Scripts (groovy) are assigned minimal permissions. This does not provide adequate
* sandboxing, as these scripts still have access to ES classes, and could
* sandboxing, as these scripts still have access to OpenSearch classes, and could
* modify members, etc that would cause bad things to happen later on their
* behalf (no package protections are yet in place, this would need some
* cleanups to the scripting apis). But still it can provide some defense for users
@ -94,7 +94,7 @@ import static org.opensearch.bootstrap.FilePermissionUtils.addSingleFilePath;
* <h2>Debugging Security</h2>
* A good place to start when there is a problem is to turn on security debugging:
* <pre>
* ES_JAVA_OPTS="-Djava.security.debug=access,failure" bin/elasticsearch
* ES_JAVA_OPTS="-Djava.security.debug=access,failure" bin/opensearch
* </pre>
* <p>
* When running tests you have to pass it to the test runner like this:
@ -134,7 +134,7 @@ final class Security {
}
/**
* Return a map from codebase name to codebase url of jar codebases used by ES core.
* Return a map from codebase name to codebase url of jar codebases used by OpenSearch core.
*/
@SuppressForbidden(reason = "find URL path")
static Map<String, URL> getCodebaseJarMap(Set<URL> urls) {

View File

@ -54,7 +54,7 @@ import java.util.Map;
* On Linux BPF Filters are installed using either {@code seccomp(2)} (3.17+) or {@code prctl(2)} (3.5+). {@code seccomp(2)}
* is preferred, as it allows filters to be applied to any existing threads in the process, and one motivation
* here is to protect against bugs in the JVM. Otherwise, code will fall back to the {@code prctl(2)} method
* which will at least protect elasticsearch application threads.
* which will at least protect opensearch application threads.
* <p>
* Linux BPF filters will return {@code EACCES} (Access Denied) for the following system calls:
* <ul>
@ -128,7 +128,7 @@ final class SystemCallFilter {
static final int SECCOMP_SET_MODE_FILTER = 1; // since Linux 3.17
static final int SECCOMP_FILTER_FLAG_TSYNC = 1; // since Linux 3.17
/** otherwise, we can use prctl(2), which will at least protect ES application threads */
/** otherwise, we can use prctl(2), which will at least protect OpenSearch application threads */
static final int PR_GET_NO_NEW_PRIVS = 39; // since Linux 3.5
static final int PR_SET_NO_NEW_PRIVS = 38; // since Linux 3.5
static final int PR_GET_SECCOMP = 21; // since Linux 2.6.23

View File

@ -99,7 +99,7 @@ public abstract class EnvironmentAwareCommand extends Command {
}
return InternalSettingsPreparer.prepareEnvironment(baseSettings, settings,
getConfigPath(esPathConf),
// HOSTNAME is set by elasticsearch-env and elasticsearch-env.bat so it is always available
// HOSTNAME is set by opensearch-env and opensearch-env.bat so it is always available
() -> System.getenv("HOSTNAME"));
}

View File

@ -29,7 +29,7 @@ import java.security.GeneralSecurityException;
import java.util.Arrays;
/**
* An {@link org.opensearch.cli.EnvironmentAwareCommand} that needs to access the elasticsearch keystore, possibly
* An {@link org.opensearch.cli.EnvironmentAwareCommand} that needs to access the opensearch keystore, possibly
* decrypting it if it is password protected.
*/
public abstract class KeyStoreAwareCommand extends EnvironmentAwareCommand {
@ -52,7 +52,7 @@ public abstract class KeyStoreAwareCommand extends EnvironmentAwareCommand {
protected static SecureString readPassword(Terminal terminal, boolean withVerification) throws UserException {
final char[] passwordArray;
if (withVerification) {
passwordArray = terminal.readSecret("Enter new password for the elasticsearch keystore (empty for no password): ",
passwordArray = terminal.readSecret("Enter new password for the opensearch keystore (empty for no password): ",
MAX_PASSPHRASE_LENGTH);
char[] passwordVerification = terminal.readSecret("Enter same password again: ",
MAX_PASSPHRASE_LENGTH);
@ -61,7 +61,7 @@ public abstract class KeyStoreAwareCommand extends EnvironmentAwareCommand {
}
Arrays.fill(passwordVerification, '\u0000');
} else {
passwordArray = terminal.readSecret("Enter password for the elasticsearch keystore : ");
passwordArray = terminal.readSecret("Enter password for the opensearch keystore : ");
}
return new SecureString(passwordArray);
}

View File

@ -31,7 +31,7 @@ import java.util.function.Predicate;
public class ClusterName implements Writeable {
public static final Setting<ClusterName> CLUSTER_NAME_SETTING = new Setting<>("cluster.name", "elasticsearch", (s) -> {
public static final Setting<ClusterName> CLUSTER_NAME_SETTING = new Setting<>("cluster.name", "opensearch", (s) -> {
if (s.isEmpty()) {
throw new IllegalArgumentException("[cluster.name] must not be empty");
}

View File

@ -37,7 +37,7 @@ public class DetachClusterCommand extends OpenSearchNodeCommand {
"You should only run this tool if you have permanently lost all of the\n" +
"master-eligible nodes in this cluster and you cannot restore the cluster\n" +
"from a snapshot, or you have already unsafely bootstrapped a new cluster\n" +
"by running `elasticsearch-node unsafe-bootstrap` on a master-eligible\n" +
"by running `opensearch-node unsafe-bootstrap` on a master-eligible\n" +
"node that belonged to the same cluster as this node. This tool can cause\n" +
"arbitrary data loss and its use should be your last resort.\n" +
"\n" +

View File

@ -261,7 +261,7 @@ public class JoinTaskExecutor implements ClusterStateTaskExecutor<JoinTaskExecut
/**
* Ensures that all indices are compatible with the given node version. This will ensure that all indices in the given metadata
* will not be created with a newer version of elasticsearch as well as that all indices are newer or equal to the minimum index
* will not be created with a newer version of opensearch as well as that all indices are newer or equal to the minimum index
* compatibility version.
* @see Version#minimumIndexCompatibilityVersion()
* @throws IllegalStateException if any index is incompatible with the given version

View File

@ -108,9 +108,9 @@ public class MetadataIndexUpgradeService {
}
/**
* Elasticsearch v6.0 no longer supports indices created pre v5.0. All indices
* The OpenSearch predecessor, Elasticsearch, v6.0 no longer supports indices created pre v5.0. All indices
* that were created before Elasticsearch v5.0 should be re-indexed in Elasticsearch 5.x
* before they can be opened by this version of elasticsearch.
* before they can be opened by this version of opensearch.
*/
private void checkSupportedVersion(IndexMetadata indexMetadata, Version minimumIndexCompatibilityVersion) {
if (indexMetadata.getState() == IndexMetadata.State.OPEN && isSupportedVersion(indexMetadata,
@ -124,7 +124,7 @@ public class MetadataIndexUpgradeService {
}
/*
* Returns true if this index can be supported by the current version of elasticsearch
* Returns true if this index can be supported by the current version of opensearch
*/
private static boolean isSupportedVersion(IndexMetadata indexMetadata, Version minimumIndexCompatibilityVersion) {
return indexMetadata.getCreationVersion().onOrAfter(minimumIndexCompatibilityVersion);

View File

@ -115,7 +115,7 @@ public class DiscoveryNode implements Writeable, ToXContentFragment {
* Creates a new {@link DiscoveryNode}
* <p>
* <b>Note:</b> if the version of the node is unknown {@link Version#minimumCompatibilityVersion()} should be used for the current
* version. it corresponds to the minimum version this elasticsearch version can communicate with. If a higher version is used
* version. it corresponds to the minimum version this opensearch version can communicate with. If a higher version is used
* the node might not be able to communicate with the remote node. After initial handshakes node versions will be discovered
* and updated.
* </p>
@ -132,7 +132,7 @@ public class DiscoveryNode implements Writeable, ToXContentFragment {
* Creates a new {@link DiscoveryNode}
* <p>
* <b>Note:</b> if the version of the node is unknown {@link Version#minimumCompatibilityVersion()} should be used for the current
* version. it corresponds to the minimum version this elasticsearch version can communicate with. If a higher version is used
* version. it corresponds to the minimum version this opensearch version can communicate with. If a higher version is used
* the node might not be able to communicate with the remote node. After initial handshakes node versions will be discovered
* and updated.
* </p>
@ -152,7 +152,7 @@ public class DiscoveryNode implements Writeable, ToXContentFragment {
* Creates a new {@link DiscoveryNode}
* <p>
* <b>Note:</b> if the version of the node is unknown {@link Version#minimumCompatibilityVersion()} should be used for the current
* version. it corresponds to the minimum version this elasticsearch version can communicate with. If a higher version is used
* version. it corresponds to the minimum version this opensearch version can communicate with. If a higher version is used
* the node might not be able to communicate with the remote node. After initial handshakes node versions will be discovered
* and updated.
* </p>
@ -174,7 +174,7 @@ public class DiscoveryNode implements Writeable, ToXContentFragment {
* Creates a new {@link DiscoveryNode}.
* <p>
* <b>Note:</b> if the version of the node is unknown {@link Version#minimumCompatibilityVersion()} should be used for the current
* version. it corresponds to the minimum version this elasticsearch version can communicate with. If a higher version is used
* version. it corresponds to the minimum version this opensearch version can communicate with. If a higher version is used
* the node might not be able to communicate with the remote node. After initial handshakes node versions will be discovered
* and updated.
* </p>

View File

@ -324,8 +324,8 @@ public class RoutingNodes implements Iterable<RoutingNode> {
* Returns one active replica shard for the given shard id or <code>null</code> if
* no active replica is found.
*
* Since replicas could possibly be on nodes with a older version of ES than
* the primary is, this will return replicas on the highest version of ES.
* Since replicas could possibly be on nodes with a older version of OpenSearch than
* the primary is, this will return replicas on the highest version of OpenSearch.
*
*/
public ShardRouting activeReplicaWithHighestVersion(ShardId shardId) {

View File

@ -44,7 +44,7 @@ import org.opensearch.common.settings.Settings;
import org.opensearch.common.unit.TimeValue;
import org.opensearch.common.util.concurrent.OpenSearchExecutors;
import org.opensearch.common.util.concurrent.OpenSearchRejectedExecutionException;
import org.opensearch.common.util.concurrent.PrioritizedEsThreadPoolExecutor;
import org.opensearch.common.util.concurrent.PrioritizedOpenSearchThreadPoolExecutor;
import org.opensearch.common.util.concurrent.ThreadContext;
import org.opensearch.threadpool.Scheduler;
import org.opensearch.threadpool.ThreadPool;
@ -79,7 +79,7 @@ public class ClusterApplierService extends AbstractLifecycleComponent implements
private volatile TimeValue slowTaskLoggingThreshold;
private volatile PrioritizedEsThreadPoolExecutor threadPoolExecutor;
private volatile PrioritizedOpenSearchThreadPoolExecutor threadPoolExecutor;
/**
* Those 3 state listeners are changing infrequently - CopyOnWriteArrayList is just fine
@ -133,7 +133,7 @@ public class ClusterApplierService extends AbstractLifecycleComponent implements
threadPoolExecutor = createThreadPoolExecutor();
}
protected PrioritizedEsThreadPoolExecutor createThreadPoolExecutor() {
protected PrioritizedOpenSearchThreadPoolExecutor createThreadPoolExecutor() {
return OpenSearchExecutors.newSinglePrioritizing(
nodeName + "/" + CLUSTER_UPDATE_THREAD_NAME,
daemonThreadFactory(nodeName, CLUSTER_UPDATE_THREAD_NAME),

View File

@ -51,7 +51,7 @@ import org.opensearch.common.util.concurrent.CountDown;
import org.opensearch.common.util.concurrent.OpenSearchExecutors;
import org.opensearch.common.util.concurrent.OpenSearchRejectedExecutionException;
import org.opensearch.common.util.concurrent.FutureUtils;
import org.opensearch.common.util.concurrent.PrioritizedEsThreadPoolExecutor;
import org.opensearch.common.util.concurrent.PrioritizedOpenSearchThreadPoolExecutor;
import org.opensearch.common.util.concurrent.ThreadContext;
import org.opensearch.discovery.Discovery;
import org.opensearch.node.Node;
@ -89,7 +89,7 @@ public class MasterService extends AbstractLifecycleComponent {
protected final ThreadPool threadPool;
private volatile PrioritizedEsThreadPoolExecutor threadPoolExecutor;
private volatile PrioritizedOpenSearchThreadPoolExecutor threadPoolExecutor;
private volatile Batcher taskBatcher;
public MasterService(Settings settings, ClusterSettings clusterSettings, ThreadPool threadPool) {
@ -121,7 +121,7 @@ public class MasterService extends AbstractLifecycleComponent {
taskBatcher = new Batcher(logger, threadPoolExecutor);
}
protected PrioritizedEsThreadPoolExecutor createThreadPoolExecutor() {
protected PrioritizedOpenSearchThreadPoolExecutor createThreadPoolExecutor() {
return OpenSearchExecutors.newSinglePrioritizing(
nodeName + "/" + MASTER_UPDATE_THREAD_NAME,
daemonThreadFactory(nodeName, MASTER_UPDATE_THREAD_NAME),
@ -132,7 +132,7 @@ public class MasterService extends AbstractLifecycleComponent {
@SuppressWarnings("unchecked")
class Batcher extends TaskBatcher {
Batcher(Logger logger, PrioritizedEsThreadPoolExecutor threadExecutor) {
Batcher(Logger logger, PrioritizedOpenSearchThreadPoolExecutor threadExecutor) {
super(logger, threadExecutor);
}

View File

@ -24,7 +24,7 @@ import org.opensearch.common.Nullable;
import org.opensearch.common.Priority;
import org.opensearch.common.unit.TimeValue;
import org.opensearch.common.util.concurrent.OpenSearchRejectedExecutionException;
import org.opensearch.common.util.concurrent.PrioritizedEsThreadPoolExecutor;
import org.opensearch.common.util.concurrent.PrioritizedOpenSearchThreadPoolExecutor;
import java.util.ArrayList;
import java.util.Collections;
@ -38,17 +38,17 @@ import java.util.function.Function;
import java.util.stream.Collectors;
/**
* Batching support for {@link PrioritizedEsThreadPoolExecutor}
* Batching support for {@link PrioritizedOpenSearchThreadPoolExecutor}
* Tasks that share the same batching key are batched (see {@link BatchedTask#batchingKey})
*/
public abstract class TaskBatcher {
private final Logger logger;
private final PrioritizedEsThreadPoolExecutor threadExecutor;
private final PrioritizedOpenSearchThreadPoolExecutor threadExecutor;
// package visible for tests
final Map<Object, LinkedHashSet<BatchedTask>> tasksPerBatchingKey = new HashMap<>();
public TaskBatcher(Logger logger, PrioritizedEsThreadPoolExecutor threadExecutor) {
public TaskBatcher(Logger logger, PrioritizedOpenSearchThreadPoolExecutor threadExecutor) {
this.logger = logger;
this.threadExecutor = threadExecutor;
}

View File

@ -578,7 +578,7 @@ public class GeoUtils {
}
/**
* Checks that the precision is within range supported by elasticsearch - between 1 and 12
* Checks that the precision is within range supported by opensearch - between 1 and 12
*
* Returns the precision value if it is in the range and throws an IllegalArgumentException if it
* is outside the range.

View File

@ -729,7 +729,7 @@ class InjectorImpl implements Injector, Lookups {
<T> Provider<T> getProviderOrThrow(final Key<T> key, Errors errors) throws ErrorsException {
final InternalFactory<? extends T> factory = getInternalFactory(key, errors);
// ES: optimize for a common case of read only instance getting from the parent...
// OpenSearch: optimize for a common case of read only instance getting from the parent...
if (factory instanceof InternalFactory.Instance) {
return new Provider<T>() {
@Override

View File

@ -41,7 +41,7 @@ public class ModulesBuilder implements Iterable<Module> {
public Injector createInjector() {
Injector injector = Guice.createInjector(modules);
((InjectorImpl) injector).clearCache();
// in ES, we always create all instances as if they are eager singletons
// in OpenSearch, we always create all instances as if they are eager singletons
// this allows for considerable memory savings (no need to store construction info) as well as cycles
((InjectorImpl) injector).readOnlyAllSingletons();
return injector;

View File

@ -51,7 +51,7 @@ import java.util.stream.Stream;
* <li>level - INFO, WARN etc</li>
* <li>component - logger name, most of the times class name</li>
* <li>cluster.name - taken from sys:es.logs.cluster_name system property because it is always set</li>
* <li>node.name - taken from NodeNamePatternConverter, as it can be set in runtime as hostname when not set in elasticsearch.yml</li>
* <li>node.name - taken from NodeNamePatternConverter, as it can be set in runtime as hostname when not set in opensearch.yml</li>
* <li>node_and_cluster_id - in json as node.id and cluster.uuid - taken from NodeAndClusterIdConverter and present
* once clusterStateUpdate is first received</li>
* <li>message - a json escaped message. Multiline messages will be converted to single line with new line explicitly

View File

@ -138,7 +138,7 @@ public class LogConfigurator {
/**
* Sets the node name. This is called before logging is configured if the
* node name is set in elasticsearch.yml. Otherwise it is called as soon
* node name is set in opensearch.yml. Otherwise it is called as soon
* as the node id is available.
*/
public static void setNodeName(String nodeName) {

View File

@ -32,7 +32,7 @@ import org.apache.lucene.util.SetOnce;
* Converts {@code %node_name} in log4j patterns into the current node name.
* We can't use a system property for this because the node name system
* property is only set if the node name is explicitly defined in
* elasticsearch.yml.
* opensearch.yml.
*/
@Plugin(category = PatternConverter.CATEGORY, name = "NodeNamePatternConverter")
@ConverterKeys({"node_name"})

View File

@ -67,7 +67,7 @@ public final class OpenSearchDirectoryReader extends FilterDirectoryReader {
* expose the given shard Id.
*
* @param reader the reader to wrap
* @param shardId the shard ID to expose via the elasticsearch internal reader wrappers.
* @param shardId the shard ID to expose via the opensearch internal reader wrappers.
*/
public static OpenSearchDirectoryReader wrap(DirectoryReader reader, ShardId shardId) throws IOException {
return new OpenSearchDirectoryReader(reader, new SubReaderWrapper(shardId), shardId);

View File

@ -51,7 +51,7 @@ public abstract class BaseKeyStoreCommand extends KeyStoreAwareCommand {
throw new UserException(ExitCodes.DATA_ERROR, "Elasticsearch keystore not found at [" +
KeyStoreWrapper.keystorePath(env.configFile()) + "]. Use 'create' command to create one.");
} else if (options.has(forceOption) == false) {
if (terminal.promptYesNo("The elasticsearch keystore does not exist. Do you want to create it?", false) == false) {
if (terminal.promptYesNo("The opensearch keystore does not exist. Do you want to create it?", false) == false) {
terminal.println("Exiting without creating keystore.");
return;
}

View File

@ -111,7 +111,7 @@ public class KeyStoreWrapper implements SecureSettings {
"~!@#$%^&*-_=+?").toCharArray();
/** The name of the keystore file to read and write. */
private static final String KEYSTORE_FILENAME = "elasticsearch.keystore";
private static final String KEYSTORE_FILENAME = "opensearch.keystore";
/** The version of the metadata written before the keystore data. */
static final int FORMAT_VERSION = 4;
@ -517,7 +517,7 @@ public class KeyStoreWrapper implements SecureSettings {
} catch (final AccessDeniedException e) {
final String message = String.format(
Locale.ROOT,
"unable to create temporary keystore at [%s], write permissions required for [%s] or run [elasticsearch-keystore upgrade]",
"unable to create temporary keystore at [%s], write permissions required for [%s] or run [opensearch-keystore upgrade]",
configDir.resolve(tmpFile),
configDir);
throw new UserException(ExitCodes.CONFIG, message, e);

View File

@ -86,7 +86,7 @@ public abstract class SecureSetting<T> extends Setting<T> {
if (secureSettings == null || secureSettings.getSettingNames().contains(getKey()) == false) {
if (super.exists(settings)) {
throw new IllegalArgumentException("Setting [" + getKey() + "] is a secure setting" +
" and must be stored inside the Elasticsearch keystore, but was found inside elasticsearch.yml");
" and must be stored inside the Elasticsearch keystore, but was found inside opensearch.yml");
}
return getFallback(settings);
}

View File

@ -65,7 +65,7 @@ import java.util.stream.Stream;
/**
* A setting. Encapsulates typical stuff like default value, parsing, and scope.
* Some (SettingsProperty.Dynamic) can by modified at run time using the API.
* All settings inside elasticsearch or in any of the plugins should use this type-safe and generic settings infrastructure
* All settings inside opensearch or in any of the plugins should use this type-safe and generic settings infrastructure
* together with {@link AbstractScopedSettings}. This class contains several utility methods that makes it straight forward
* to add settings for the majority of the cases. For instance a simple boolean settings can be defined like this:
* <pre>{@code
@ -518,7 +518,7 @@ public class Setting<T> implements ToXContentObject {
SecureSettings secureSettings = settings.getSecureSettings();
if (secureSettings != null && secureSettings.getSettingNames().contains(getKey())) {
throw new IllegalArgumentException("Setting [" + getKey() + "] is a non-secure setting" +
" and must be stored inside elasticsearch.yml, but was found inside the Elasticsearch keystore");
" and must be stored inside opensearch.yml, but was found inside the Elasticsearch keystore");
}
return settings.get(getKey(), defaultValue.apply(settings));
}

View File

@ -108,7 +108,7 @@ public class SettingsModule implements Module {
builder.append(System.lineSeparator());
int count = 0;
for (String word : ("Since elasticsearch 5.x index level settings can NOT be set on the nodes configuration like " +
"the elasticsearch.yaml, in system properties or command line arguments." +
"the opensearch.yaml, in system properties or command line arguments." +
"In order to upgrade all indices the settings must be updated via the /${index}/_settings API. " +
"Unless all settings are dynamic all indices must be closed in order to apply the upgrade" +
"Indices created in the future should use index templates to set default values."

View File

@ -166,7 +166,7 @@ public abstract class BaseFuture<V> implements Future<V> {
// call stack, so we rethrow it.
// we want to notify the listeners we have with errors as well, as it breaks
// how we work in ES in terms of using assertions
// how we work in OpenSearch in terms of using assertions
// if (throwable instanceof Error) {
// throw (Error) throwable;
// }

View File

@ -96,29 +96,29 @@ public class OpenSearchExecutors {
return NODE_PROCESSORS_SETTING.get(settings);
}
public static PrioritizedEsThreadPoolExecutor newSinglePrioritizing(String name, ThreadFactory threadFactory,
ThreadContext contextHolder, ScheduledExecutorService timer) {
return new PrioritizedEsThreadPoolExecutor(name, 1, 1, 0L, TimeUnit.MILLISECONDS, threadFactory, contextHolder, timer);
public static PrioritizedOpenSearchThreadPoolExecutor newSinglePrioritizing(String name, ThreadFactory threadFactory,
ThreadContext contextHolder, ScheduledExecutorService timer) {
return new PrioritizedOpenSearchThreadPoolExecutor(name, 1, 1, 0L, TimeUnit.MILLISECONDS, threadFactory, contextHolder, timer);
}
public static EsThreadPoolExecutor newScaling(String name, int min, int max, long keepAliveTime, TimeUnit unit,
ThreadFactory threadFactory, ThreadContext contextHolder) {
public static OpenSearchThreadPoolExecutor newScaling(String name, int min, int max, long keepAliveTime, TimeUnit unit,
ThreadFactory threadFactory, ThreadContext contextHolder) {
ExecutorScalingQueue<Runnable> queue = new ExecutorScalingQueue<>();
EsThreadPoolExecutor executor =
new EsThreadPoolExecutor(name, min, max, keepAliveTime, unit, queue, threadFactory, new ForceQueuePolicy(), contextHolder);
OpenSearchThreadPoolExecutor executor =
new OpenSearchThreadPoolExecutor(name, min, max, keepAliveTime, unit, queue, threadFactory, new ForceQueuePolicy(), contextHolder);
queue.executor = executor;
return executor;
}
public static EsThreadPoolExecutor newFixed(String name, int size, int queueCapacity,
ThreadFactory threadFactory, ThreadContext contextHolder) {
public static OpenSearchThreadPoolExecutor newFixed(String name, int size, int queueCapacity,
ThreadFactory threadFactory, ThreadContext contextHolder) {
BlockingQueue<Runnable> queue;
if (queueCapacity < 0) {
queue = ConcurrentCollections.newBlockingQueue();
} else {
queue = new SizeBlockingQueue<>(ConcurrentCollections.<Runnable>newBlockingQueue(), queueCapacity);
}
return new EsThreadPoolExecutor(name, size, size, 0, TimeUnit.MILLISECONDS,
return new OpenSearchThreadPoolExecutor(name, size, size, 0, TimeUnit.MILLISECONDS,
queue, threadFactory, new OpenSearchAbortPolicy(), contextHolder);
}
@ -131,16 +131,16 @@ public class OpenSearchExecutors {
* @param maxQueueSize maximum queue size that the queue can be adjusted to
* @param frameSize number of tasks during which stats are collected before adjusting queue size
*/
public static EsThreadPoolExecutor newAutoQueueFixed(String name, int size, int initialQueueCapacity, int minQueueSize,
int maxQueueSize, int frameSize, TimeValue targetedResponseTime,
ThreadFactory threadFactory, ThreadContext contextHolder) {
public static OpenSearchThreadPoolExecutor newAutoQueueFixed(String name, int size, int initialQueueCapacity, int minQueueSize,
int maxQueueSize, int frameSize, TimeValue targetedResponseTime,
ThreadFactory threadFactory, ThreadContext contextHolder) {
if (initialQueueCapacity <= 0) {
throw new IllegalArgumentException("initial queue capacity for [" + name + "] executor must be positive, got: " +
initialQueueCapacity);
}
ResizableBlockingQueue<Runnable> queue =
new ResizableBlockingQueue<>(ConcurrentCollections.<Runnable>newBlockingQueue(), initialQueueCapacity);
return new QueueResizingEsThreadPoolExecutor(name, size, size, 0, TimeUnit.MILLISECONDS,
return new QueueResizingOpenSearchThreadPoolExecutor(name, size, size, 0, TimeUnit.MILLISECONDS,
queue, minQueueSize, maxQueueSize, TimedRunnable::new, frameSize, targetedResponseTime, threadFactory,
new OpenSearchAbortPolicy(), contextHolder);
}
@ -249,7 +249,7 @@ public class OpenSearchExecutors {
public static String threadName(final String nodeName, final String namePrefix) {
// TODO missing node names should only be allowed in tests
return "elasticsearch" + (nodeName.isEmpty() ? "" : "[") + nodeName + (nodeName.isEmpty() ? "" : "]") + "[" + namePrefix + "]";
return "opensearch" + (nodeName.isEmpty() ? "" : "[") + nodeName + (nodeName.isEmpty() ? "" : "]") + "[" + namePrefix + "]";
}
public static ThreadFactory daemonThreadFactory(Settings settings, String namePrefix) {

View File

@ -30,7 +30,7 @@ import java.util.stream.Stream;
/**
* An extension to thread pool executor, allowing (in the future) to add specific additional stats to it.
*/
public class EsThreadPoolExecutor extends ThreadPoolExecutor {
public class OpenSearchThreadPoolExecutor extends ThreadPoolExecutor {
private final ThreadContext contextHolder;
private volatile ShutdownListener listener;
@ -45,15 +45,15 @@ public class EsThreadPoolExecutor extends ThreadPoolExecutor {
return name;
}
EsThreadPoolExecutor(String name, int corePoolSize, int maximumPoolSize, long keepAliveTime, TimeUnit unit,
BlockingQueue<Runnable> workQueue, ThreadFactory threadFactory, ThreadContext contextHolder) {
OpenSearchThreadPoolExecutor(String name, int corePoolSize, int maximumPoolSize, long keepAliveTime, TimeUnit unit,
BlockingQueue<Runnable> workQueue, ThreadFactory threadFactory, ThreadContext contextHolder) {
this(name, corePoolSize, maximumPoolSize, keepAliveTime, unit, workQueue, threadFactory, new OpenSearchAbortPolicy(), contextHolder);
}
@SuppressForbidden(reason = "properly rethrowing errors, see OpenSearchExecutors.rethrowErrors")
EsThreadPoolExecutor(String name, int corePoolSize, int maximumPoolSize, long keepAliveTime, TimeUnit unit,
BlockingQueue<Runnable> workQueue, ThreadFactory threadFactory, XRejectedExecutionHandler handler,
ThreadContext contextHolder) {
OpenSearchThreadPoolExecutor(String name, int corePoolSize, int maximumPoolSize, long keepAliveTime, TimeUnit unit,
BlockingQueue<Runnable> workQueue, ThreadFactory threadFactory, XRejectedExecutionHandler handler,
ThreadContext contextHolder) {
super(corePoolSize, maximumPoolSize, keepAliveTime, unit, workQueue, threadFactory, handler);
this.name = name;
this.contextHolder = contextHolder;

View File

@ -41,15 +41,15 @@ import java.util.concurrent.atomic.AtomicLong;
* <p>
* Note, if two tasks have the same priority, the first to arrive will be executed first (FIFO style).
*/
public class PrioritizedEsThreadPoolExecutor extends EsThreadPoolExecutor {
public class PrioritizedOpenSearchThreadPoolExecutor extends OpenSearchThreadPoolExecutor {
private static final TimeValue NO_WAIT_TIME_VALUE = TimeValue.timeValueMillis(0);
private final AtomicLong insertionOrder = new AtomicLong();
private final Queue<Runnable> current = ConcurrentCollections.newQueue();
private final ScheduledExecutorService timer;
public PrioritizedEsThreadPoolExecutor(String name, int corePoolSize, int maximumPoolSize, long keepAliveTime, TimeUnit unit,
ThreadFactory threadFactory, ThreadContext contextHolder, ScheduledExecutorService timer) {
public PrioritizedOpenSearchThreadPoolExecutor(String name, int corePoolSize, int maximumPoolSize, long keepAliveTime, TimeUnit unit,
ThreadFactory threadFactory, ThreadContext contextHolder, ScheduledExecutorService timer) {
super(name, corePoolSize, maximumPoolSize, keepAliveTime, unit, new PriorityBlockingQueue<>(), threadFactory, contextHolder);
this.timer = timer;
}

View File

@ -36,12 +36,12 @@ import java.util.function.Function;
* An extension to thread pool executor, which automatically adjusts the queue size of the
* {@code ResizableBlockingQueue} according to Little's Law.
*/
public final class QueueResizingEsThreadPoolExecutor extends EsThreadPoolExecutor {
public final class QueueResizingOpenSearchThreadPoolExecutor extends OpenSearchThreadPoolExecutor {
// This is a random starting point alpha. TODO: revisit this with actual testing and/or make it configurable
public static double EWMA_ALPHA = 0.3;
private static final Logger logger = LogManager.getLogger(QueueResizingEsThreadPoolExecutor.class);
private static final Logger logger = LogManager.getLogger(QueueResizingOpenSearchThreadPoolExecutor.class);
// The amount the queue size is adjusted by for each calcuation
private static final int QUEUE_ADJUSTMENT_AMOUNT = 50;
@ -58,11 +58,11 @@ public final class QueueResizingEsThreadPoolExecutor extends EsThreadPoolExecuto
private long startNs;
QueueResizingEsThreadPoolExecutor(String name, int corePoolSize, int maximumPoolSize, long keepAliveTime, TimeUnit unit,
ResizableBlockingQueue<Runnable> workQueue, int minQueueSize, int maxQueueSize,
Function<Runnable, WrappedRunnable> runnableWrapper, final int tasksPerFrame,
TimeValue targetedResponseTime, ThreadFactory threadFactory, XRejectedExecutionHandler handler,
ThreadContext contextHolder) {
QueueResizingOpenSearchThreadPoolExecutor(String name, int corePoolSize, int maximumPoolSize, long keepAliveTime, TimeUnit unit,
ResizableBlockingQueue<Runnable> workQueue, int minQueueSize, int maxQueueSize,
Function<Runnable, WrappedRunnable> runnableWrapper, final int tasksPerFrame,
TimeValue targetedResponseTime, ThreadFactory threadFactory, XRejectedExecutionHandler handler,
ThreadContext contextHolder) {
super(name, corePoolSize, maximumPoolSize, keepAliveTime, unit,
workQueue, threadFactory, handler, contextHolder);
this.runnableWrapper = runnableWrapper;

View File

@ -62,7 +62,7 @@ import static org.opensearch.http.HttpTransportSettings.SETTING_HTTP_MAX_WARNING
* have out of the box support for {@link ThreadContext} and all threads spawned will inherit the {@link ThreadContext} from the thread
* that it is forking from.". Network calls will also preserve the senders headers automatically.
* <p>
* Consumers of ThreadContext usually don't need to interact with adding or stashing contexts. Every elasticsearch thread is managed by
* Consumers of ThreadContext usually don't need to interact with adding or stashing contexts. Every opensearch thread is managed by
* a thread pool or executor being responsible for stashing and restoring the threads context. For instance if a network request is
* received, all headers are deserialized from the network and directly added as the headers of the threads {@link ThreadContext}
* (see {@link #readHeaders(StreamInput)}. In order to not modify the context that is currently active on this thread the network code

View File

@ -72,7 +72,7 @@ public class XContentOpenSearchExtension implements XContentBuilderExtension {
public Map<Class<?>, XContentBuilder.Writer> getXContentWriters() {
Map<Class<?>, XContentBuilder.Writer> writers = new HashMap<>();
// Fully-qualified here to reduce ambiguity around our (ES') Version class
// Fully-qualified here to reduce ambiguity around our (OpenSearch') Version class
writers.put(org.apache.lucene.util.Version.class, (b, v) -> b.value(Objects.toString(v)));
writers.put(DateTimeZone.class, (b, v) -> b.value(Objects.toString(v)));
writers.put(CachedDateTimeZone.class, (b, v) -> b.value(Objects.toString(v)));

View File

@ -43,7 +43,7 @@ import org.opensearch.common.util.CollectionUtils;
import org.opensearch.common.util.concurrent.AbstractRunnable;
import org.opensearch.common.util.concurrent.ConcurrentCollections;
import org.opensearch.common.util.concurrent.OpenSearchExecutors;
import org.opensearch.common.util.concurrent.EsThreadPoolExecutor;
import org.opensearch.common.util.concurrent.OpenSearchThreadPoolExecutor;
import org.opensearch.common.util.concurrent.KeyedLock;
import org.opensearch.core.internal.io.IOUtils;
import org.opensearch.discovery.SeedHostsProvider;
@ -108,7 +108,7 @@ public class UnicastZenPing implements ZenPing {
private final SeedHostsProvider hostsProvider;
protected final EsThreadPoolExecutor unicastZenPingExecutorService;
protected final OpenSearchThreadPoolExecutor unicastZenPingExecutorService;
private final TimeValue resolveTimeout;

View File

@ -46,7 +46,7 @@ import org.opensearch.common.collect.Tuple;
import org.opensearch.common.settings.Settings;
import org.opensearch.common.util.concurrent.AbstractRunnable;
import org.opensearch.common.util.concurrent.OpenSearchExecutors;
import org.opensearch.common.util.concurrent.EsThreadPoolExecutor;
import org.opensearch.common.util.concurrent.OpenSearchThreadPoolExecutor;
import org.opensearch.core.internal.io.IOUtils;
import org.opensearch.discovery.DiscoveryModule;
import org.opensearch.env.NodeMetadata;
@ -334,7 +334,7 @@ public class GatewayMetaState implements Closeable {
static final String THREAD_NAME = "AsyncLucenePersistedState#updateTask";
private final EsThreadPoolExecutor threadPoolExecutor;
private final OpenSearchThreadPoolExecutor threadPoolExecutor;
private final PersistedState persistedState;
boolean newCurrentTermQueued = false;

View File

@ -125,7 +125,7 @@ public class TransportNodesListGatewayStartedShards extends
if (request.getCustomDataPath() != null) {
customDataPath = request.getCustomDataPath();
} else {
// TODO: Fallback for BWC with older ES versions. Remove once request.getCustomDataPath() always returns non-null
// TODO: Fallback for BWC with older OpenSearch versions. Remove once request.getCustomDataPath() always returns non-null
final IndexMetadata metadata = clusterService.state().metadata().index(shardId.getIndex());
if (metadata != null) {
customDataPath = new IndexSettings(metadata, settings).customDataPath();

View File

@ -29,7 +29,7 @@ import org.opensearch.common.unit.ByteSizeUnit;
import org.opensearch.common.unit.ByteSizeValue;
/**
* A shard in elasticsearch is a Lucene index, and a Lucene index is broken
* A shard in opensearch is a Lucene index, and a Lucene index is broken
* down into segments. Segments are internal storage elements in the index
* where the index data is stored, and are immutable up to delete markers.
* Segments are, periodically, merged into larger segments to keep the

View File

@ -48,7 +48,7 @@ import java.util.function.Supplier;
* doing range searches. Therefore the {@code _seq_no} field is stored both
* as a numeric doc value and as numeric indexed field.
*
* This mapper also manages the primary term field, which has no ES named
* This mapper also manages the primary term field, which has no OpenSearch named
* equivalent. The primary term is only used during collision after receiving
* identical seq# values for two document copies. The primary term is stored as
* a doc value field without being indexed, since it is only intended for use

View File

@ -46,7 +46,7 @@ import org.opensearch.common.lucene.search.TopDocsAndMaxScore;
import org.opensearch.common.xcontent.XContentBuilder;
import org.opensearch.common.xcontent.XContentParser;
import org.opensearch.index.mapper.ObjectMapper;
import org.opensearch.index.search.ESToParentBlockJoinQuery;
import org.opensearch.index.search.OpenSearchToParentBlockJoinQuery;
import org.opensearch.index.search.NestedHelper;
import org.opensearch.search.SearchHit;
import org.opensearch.search.fetch.subphase.InnerHitsContext;
@ -240,7 +240,7 @@ public class NestedQueryBuilder extends AbstractQueryBuilder<NestedQueryBuilder>
public static String scoreModeAsString(ScoreMode scoreMode) {
if (scoreMode == ScoreMode.Total) {
// Lucene uses 'total' but 'sum' is more consistent with other elasticsearch APIs
// Lucene uses 'total' but 'sum' is more consistent with other opensearch APIs
return "sum";
} else {
return scoreMode.name().toLowerCase(Locale.ROOT);
@ -306,7 +306,7 @@ public class NestedQueryBuilder extends AbstractQueryBuilder<NestedQueryBuilder>
innerQuery = Queries.filtered(innerQuery, nestedObjectMapper.nestedTypeFilter());
}
return new ESToParentBlockJoinQuery(innerQuery, parentFilter, scoreMode,
return new OpenSearchToParentBlockJoinQuery(innerQuery, parentFilter, scoreMode,
objectMapper == null ? null : objectMapper.fullPath());
}

View File

@ -139,7 +139,7 @@ public class SimpleQueryStringBuilder extends AbstractQueryBuilder<SimpleQuerySt
/** Whether or not the lenient flag has been set or not */
private boolean lenientSet = false;
/** Further search settings needed by the ES specific query string parser only. */
/** Further search settings needed by the OpenSearch specific query string parser only. */
private Settings settings = new Settings();
/** Construct a new simple query with this query string. */

View File

@ -233,7 +233,7 @@ public class SpanNearQueryBuilder extends AbstractQueryBuilder<SpanNearQueryBuil
builder.setSlop(slop);
/*
* Lucene SpanNearQuery throws exceptions for certain use cases like adding gap to a
* unordered SpanNearQuery. Should ES have the same checks or wrap those thrown exceptions?
* unordered SpanNearQuery. Should OpenSearch have the same checks or wrap those thrown exceptions?
*/
if (isGap) {
int gap = ((SpanGapQueryBuilder) queryBuilder).width();

View File

@ -86,8 +86,8 @@ public final class NestedHelper {
.map(BooleanClause::getQuery)
.anyMatch(this::mightMatchNestedDocs);
}
} else if (query instanceof ESToParentBlockJoinQuery) {
return ((ESToParentBlockJoinQuery) query).getPath() != null;
} else if (query instanceof OpenSearchToParentBlockJoinQuery) {
return ((OpenSearchToParentBlockJoinQuery) query).getPath() != null;
} else {
return true;
}

View File

@ -649,7 +649,7 @@ public class ReplicationTracker extends AbstractIndexShardComponent implements L
if (in.getVersion().onOrAfter(Version.V_6_3_0)) {
this.tracked = in.readBoolean();
} else {
// Every in-sync shard copy is also tracked (see invariant). This was the case even in earlier ES versions.
// Every in-sync shard copy is also tracked (see invariant). This was the case even in earlier OpenSearch versions.
// Non in-sync shard copies might be tracked or not. As this information here is only serialized during relocation hand-off,
// after which replica recoveries cannot complete anymore (i.e. they cannot move from in-sync == false to in-sync == true),
// we can treat non in-sync replica shard copies as untracked. They will go through a fresh recovery against the new

View File

@ -148,7 +148,7 @@ public class TransportNodesListShardStoreMetadata extends TransportNodesAction<T
if (request.getCustomDataPath() != null) {
customDataPath = request.getCustomDataPath();
} else {
// TODO: Fallback for BWC with older ES versions. Remove this once request.getCustomDataPath() always returns non-null
// TODO: Fallback for BWC with older predecessor (ES) versions. Remove this once request.getCustomDataPath() always returns non-null
if (indexService != null) {
customDataPath = indexService.getIndexSettings().customDataPath();
} else {

View File

@ -510,7 +510,7 @@ public final class RepositoryData {
builder.field(MIN_VERSION, SnapshotsService.INDEX_GEN_IN_REPO_DATA_VERSION.toString());
builder.field(INDEX_METADATA_IDENTIFIERS, indexMetaDataGenerations.identifiers);
} else if (shouldWriteShardGens) {
// Add min version field to make it impossible for older ES versions to deserialize this object
// Add min version field to make it impossible for older OpenSearch versions to deserialize this object
builder.field(MIN_VERSION, SnapshotsService.SHARD_GEN_IN_REPO_DATA_VERSION.toString());
}
builder.endObject();

View File

@ -59,7 +59,7 @@
* | | in the repository
* | |- meta-20131010.dat - JSON Serialized {@link org.opensearch.cluster.metadata.IndexMetadata } for index "foo"
* | |- 0/ - data for shard "0" of index "foo"
* | | |- __1 \ (files with numeric names were created by older ES versions)
* | | |- __1 \ (files with numeric names were created by older preceding ES versions)
* | | |- __2 |
* | | |- __VPO5oDMVT5y4Akv8T_AO_A |- files from different segments see snap-* for their mappings to real segment files
* | | |- __1gbJy18wS_2kv1qI7FgKuQ |
@ -70,7 +70,7 @@
* | | |- snap-20131011.dat - SMILE serialized {@link org.opensearch.index.snapshots.blobstore.BlobStoreIndexShardSnapshot} for
* | | | snapshot "20131011"
* | | |- index-123 - SMILE serialized {@link org.opensearch.index.snapshots.blobstore.BlobStoreIndexShardSnapshots} for
* | | | the shard (files with numeric suffixes were created by older versions, newer ES versions use a uuid
* | | | the shard (files with numeric suffixes were created by older versions, newer OpenSearch versions use a uuid
* | | | suffix instead)
* | |
* | |- 1/ - data for shard "1" of index "foo"

View File

@ -95,8 +95,8 @@ public class RestAllocationAction extends AbstractCatAction {
final Table table = new Table();
table.startHeaders();
table.addCell("shards", "alias:s;text-align:right;desc:number of shards on node");
table.addCell("disk.indices", "alias:di,diskIndices;text-align:right;desc:disk used by ES indices");
table.addCell("disk.used", "alias:du,diskUsed;text-align:right;desc:disk used (total, not just ES)");
table.addCell("disk.indices", "alias:di,diskIndices;text-align:right;desc:disk used by OpenSearch indices");
table.addCell("disk.used", "alias:du,diskUsed;text-align:right;desc:disk used (total, not just OpenSearch)");
table.addCell("disk.avail", "alias:da,diskAvail;text-align:right;desc:disk available");
table.addCell("disk.total", "alias:dt,diskTotal;text-align:right;desc:total capacity of all volumes");
table.addCell("disk.percent", "alias:dp,diskPercent;text-align:right;desc:percent disk used");

View File

@ -61,7 +61,7 @@ public class ReverseNestedAggregator extends BucketsAggregator implements Single
@Override
protected LeafBucketCollector getLeafCollector(LeafReaderContext ctx, final LeafBucketCollector sub) throws IOException {
// In ES if parent is deleted, then also the children are deleted, so the child docs this agg receives
// In OpenSearch if parent is deleted, then also the children are deleted, so the child docs this agg receives
// must belong to parent docs that is alive. For this reason acceptedDocs can be null here.
final BitSet parentDocs = parentBitsetProducer.getBitSet(ctx);
if (parentDocs == null) {

View File

@ -95,7 +95,7 @@ public class DiversifiedBytesHashSamplerAggregator extends SamplerAggregator {
}
// This class extends the DiversifiedTopDocsCollector and provides
// a lookup from elasticsearch's ValuesSource
// a lookup from opensearch's ValuesSource
class ValuesDiversifiedTopDocsCollector extends DiversifiedTopDocsCollector {
private SortedBinaryDocValues values;

View File

@ -101,7 +101,7 @@ public class DiversifiedMapSamplerAggregator extends SamplerAggregator {
}
// This class extends the DiversifiedTopDocsCollector and provides
// a lookup from elasticsearch's ValuesSource
// a lookup from opensearch's ValuesSource
class ValuesDiversifiedTopDocsCollector extends DiversifiedTopDocsCollector {
private SortedBinaryDocValues values;

View File

@ -88,7 +88,7 @@ public class DiversifiedNumericSamplerAggregator extends SamplerAggregator {
}
// This class extends the DiversifiedTopDocsCollector and provides
// a lookup from elasticsearch's ValuesSource
// a lookup from opensearch's ValuesSource
class ValuesDiversifiedTopDocsCollector extends DiversifiedTopDocsCollector {
private SortedNumericDocValues values;

View File

@ -90,7 +90,7 @@ public class DiversifiedOrdinalsSamplerAggregator extends SamplerAggregator {
}
// This class extends the DiversifiedTopDocsCollector and provides
// a lookup from elasticsearch's ValuesSource
// a lookup from opensearch's ValuesSource
class ValuesDiversifiedTopDocsCollector extends DiversifiedTopDocsCollector {

View File

@ -183,7 +183,7 @@ public class ContextIndexSearcher extends IndexSearcher implements Releasable {
}
TopFieldDocs mergedTopDocs = (TopFieldDocs) manager.reduce(collectors);
// Lucene sets shards indexes during merging of topDocs from different collectors
// We need to reset shard index; ES will set shard index later during reduce stage
// We need to reset shard index; OpenSearch will set shard index later during reduce stage
for (ScoreDoc scoreDoc : mergedTopDocs.scoreDocs) {
scoreDoc.shardIndex = -1;
}

View File

@ -51,7 +51,7 @@ import org.opensearch.common.Booleans;
import org.opensearch.common.CheckedConsumer;
import org.opensearch.common.lucene.Lucene;
import org.opensearch.common.lucene.search.TopDocsAndMaxScore;
import org.opensearch.common.util.concurrent.QueueResizingEsThreadPoolExecutor;
import org.opensearch.common.util.concurrent.QueueResizingOpenSearchThreadPoolExecutor;
import org.opensearch.index.IndexSortConfig;
import org.opensearch.index.mapper.DateFieldMapper.DateFieldType;
import org.opensearch.index.mapper.MappedFieldType;
@ -303,8 +303,8 @@ public class QueryPhase {
}
ExecutorService executor = searchContext.indexShard().getThreadPool().executor(ThreadPool.Names.SEARCH);
if (executor instanceof QueueResizingEsThreadPoolExecutor) {
QueueResizingEsThreadPoolExecutor rExecutor = (QueueResizingEsThreadPoolExecutor) executor;
if (executor instanceof QueueResizingOpenSearchThreadPoolExecutor) {
QueueResizingOpenSearchThreadPoolExecutor rExecutor = (QueueResizingOpenSearchThreadPoolExecutor) executor;
queryResult.nodeQueueSize(rExecutor.getCurrentQueueSize());
queryResult.serviceTimeEWMA((long) rExecutor.getTaskExecutionEWMA());
}

View File

@ -32,8 +32,8 @@ import org.opensearch.common.settings.Settings;
import org.opensearch.common.unit.SizeValue;
import org.opensearch.common.unit.TimeValue;
import org.opensearch.common.util.concurrent.OpenSearchExecutors;
import org.opensearch.common.util.concurrent.EsThreadPoolExecutor;
import org.opensearch.common.util.concurrent.OpenSearchRejectedExecutionException;
import org.opensearch.common.util.concurrent.OpenSearchThreadPoolExecutor;
import org.opensearch.common.util.concurrent.ThreadContext;
import org.opensearch.common.util.concurrent.XRejectedExecutionHandler;
import org.opensearch.common.xcontent.ToXContentFragment;
@ -606,7 +606,7 @@ public class ThreadPool implements ReportingService<ThreadPoolInfo>, Scheduler {
public final Info info;
ExecutorHolder(ExecutorService executor, Info info) {
assert executor instanceof EsThreadPoolExecutor || executor == DIRECT_EXECUTOR;
assert executor instanceof OpenSearchThreadPoolExecutor || executor == DIRECT_EXECUTOR;
this.executor = executor;
this.info = info;
}

View File

@ -39,7 +39,7 @@ import org.opensearch.cluster.service.FakeThreadPoolMasterService;
import org.elasticsearch.cluster.service.MasterService;
import org.opensearch.common.settings.ClusterSettings;
import org.opensearch.common.settings.Settings;
import org.opensearch.common.util.concurrent.PrioritizedEsThreadPoolExecutor;
import org.opensearch.common.util.concurrent.PrioritizedOpenSearchThreadPoolExecutor;
import org.opensearch.node.Node;
import org.opensearch.test.ClusterServiceUtils;
import org.opensearch.test.OpenSearchTestCase;
@ -72,7 +72,7 @@ public class InternalClusterInfoServiceSchedulingTests extends OpenSearchTestCas
final ClusterApplierService clusterApplierService = new ClusterApplierService("test", settings, clusterSettings, threadPool) {
@Override
protected PrioritizedEsThreadPoolExecutor createThreadPoolExecutor() {
protected PrioritizedOpenSearchThreadPoolExecutor createThreadPoolExecutor() {
return new MockSinglePrioritizingExecutor("mock-executor", deterministicTaskQueue, threadPool);
}
};

View File

@ -35,12 +35,12 @@ import org.opensearch.cluster.service.ClusterService;
import org.opensearch.common.collect.Map;
import org.opensearch.common.settings.ClusterSettings;
import org.opensearch.common.settings.Settings;
import org.elasticsearch.index.Index;
import org.elasticsearch.index.mapper.ContentPath;
import org.elasticsearch.index.mapper.Mapper;
import org.elasticsearch.index.mapper.Mapping;
import org.elasticsearch.index.mapper.MetadataFieldMapper;
import org.elasticsearch.index.mapper.RootObjectMapper;
import org.opensearch.index.Index;
import org.opensearch.index.mapper.ContentPath;
import org.opensearch.index.mapper.Mapper;
import org.opensearch.index.mapper.Mapping;
import org.opensearch.index.mapper.MetadataFieldMapper;
import org.opensearch.index.mapper.RootObjectMapper;
import org.opensearch.test.OpenSearchTestCase;
import java.util.List;

View File

@ -27,7 +27,7 @@ import org.opensearch.cluster.node.DiscoveryNode;
import org.opensearch.common.io.stream.StreamInput;
import org.opensearch.common.lease.Releasable;
import org.opensearch.common.settings.Settings;
import org.elasticsearch.monitor.StatusInfo;
import org.opensearch.monitor.StatusInfo;
import org.opensearch.test.OpenSearchTestCase;
import org.opensearch.test.transport.MockTransport;
import org.opensearch.transport.ConnectTransportException;
@ -58,8 +58,8 @@ import java.util.concurrent.atomic.AtomicReference;
import static java.util.Collections.emptySet;
import static org.opensearch.cluster.coordination.PreVoteCollector.REQUEST_PRE_VOTE_ACTION_NAME;
import static org.elasticsearch.monitor.StatusInfo.Status.HEALTHY;
import static org.elasticsearch.monitor.StatusInfo.Status.UNHEALTHY;
import static org.opensearch.monitor.StatusInfo.Status.HEALTHY;
import static org.opensearch.monitor.StatusInfo.Status.UNHEALTHY;
import static org.opensearch.node.Node.NODE_NAME_SETTING;
import static org.opensearch.threadpool.ThreadPool.Names.SAME;
import static org.hamcrest.Matchers.equalTo;

View File

@ -26,7 +26,7 @@ import org.opensearch.cluster.metadata.ProcessClusterEventTimeoutException;
import org.opensearch.common.Priority;
import org.opensearch.common.collect.Tuple;
import org.opensearch.common.unit.TimeValue;
import org.opensearch.common.util.concurrent.PrioritizedEsThreadPoolExecutor;
import org.opensearch.common.util.concurrent.PrioritizedOpenSearchThreadPoolExecutor;
import org.junit.Before;
import org.opensearch.cluster.service.TaskBatcher;
@ -61,7 +61,7 @@ public class TaskBatcherTests extends TaskExecutorTests {
class TestTaskBatcher extends TaskBatcher {
TestTaskBatcher(Logger logger, PrioritizedEsThreadPoolExecutor threadExecutor) {
TestTaskBatcher(Logger logger, PrioritizedOpenSearchThreadPoolExecutor threadExecutor) {
super(logger, threadExecutor);
}

View File

@ -26,7 +26,7 @@ import org.opensearch.common.lease.Releasable;
import org.opensearch.common.settings.Settings;
import org.opensearch.common.unit.TimeValue;
import org.opensearch.common.util.concurrent.OpenSearchExecutors;
import org.opensearch.common.util.concurrent.PrioritizedEsThreadPoolExecutor;
import org.opensearch.common.util.concurrent.PrioritizedOpenSearchThreadPoolExecutor;
import org.opensearch.test.OpenSearchTestCase;
import org.opensearch.cluster.service.SourcePrioritizedRunnable;
import org.opensearch.threadpool.TestThreadPool;
@ -50,7 +50,7 @@ import static org.hamcrest.core.Is.is;
public class TaskExecutorTests extends OpenSearchTestCase {
protected static ThreadPool threadPool;
protected PrioritizedEsThreadPoolExecutor threadExecutor;
protected PrioritizedOpenSearchThreadPoolExecutor threadExecutor;
@BeforeClass
public static void createThreadPool() {

View File

@ -24,7 +24,7 @@ import org.opensearch.common.CheckedRunnable;
import org.opensearch.common.lease.Releasable;
import org.opensearch.common.settings.Settings;
import org.opensearch.common.util.concurrent.OpenSearchRejectedExecutionException;
import org.opensearch.common.util.concurrent.EsThreadPoolExecutor;
import org.opensearch.common.util.concurrent.OpenSearchThreadPoolExecutor;
import org.opensearch.common.util.concurrent.ThreadContext;
import org.opensearch.test.OpenSearchTestCase;
import org.opensearch.threadpool.TestThreadPool;
@ -77,10 +77,10 @@ public class IndexShardOperationPermitsTests extends OpenSearchTestCase {
.put("thread_pool." + ThreadPool.Names.WRITE + ".size", writeThreadPoolSize)
.put("thread_pool." + ThreadPool.Names.WRITE + ".queue_size", writeThreadPoolQueueSize)
.build());
assertThat(threadPool.executor(ThreadPool.Names.WRITE), instanceOf(EsThreadPoolExecutor.class));
assertThat(((EsThreadPoolExecutor) threadPool.executor(ThreadPool.Names.WRITE)).getCorePoolSize(), equalTo(writeThreadPoolSize));
assertThat(((EsThreadPoolExecutor) threadPool.executor(ThreadPool.Names.WRITE)).getMaximumPoolSize(), equalTo(writeThreadPoolSize));
assertThat(((EsThreadPoolExecutor) threadPool.executor(ThreadPool.Names.WRITE)).getQueue().remainingCapacity(),
assertThat(threadPool.executor(ThreadPool.Names.WRITE), instanceOf(OpenSearchThreadPoolExecutor.class));
assertThat(((OpenSearchThreadPoolExecutor) threadPool.executor(ThreadPool.Names.WRITE)).getCorePoolSize(), equalTo(writeThreadPoolSize));
assertThat(((OpenSearchThreadPoolExecutor) threadPool.executor(ThreadPool.Names.WRITE)).getMaximumPoolSize(), equalTo(writeThreadPoolSize));
assertThat(((OpenSearchThreadPoolExecutor) threadPool.executor(ThreadPool.Names.WRITE)).getQueue().remainingCapacity(),
equalTo(writeThreadPoolQueueSize));
}

View File

@ -28,7 +28,7 @@ import org.opensearch.common.breaker.NoopCircuitBreaker;
import org.opensearch.common.lucene.search.TopDocsAndMaxScore;
import org.opensearch.common.util.BigArrays;
import org.opensearch.common.util.concurrent.OpenSearchExecutors;
import org.opensearch.common.util.concurrent.EsThreadPoolExecutor;
import org.opensearch.common.util.concurrent.OpenSearchThreadPoolExecutor;
import org.opensearch.index.shard.ShardId;
import org.opensearch.search.DocValueFormat;
import org.opensearch.search.SearchShardTarget;
@ -54,7 +54,7 @@ public class QueryPhaseResultConsumerTests extends OpenSearchTestCase {
private SearchPhaseController searchPhaseController;
private ThreadPool threadPool;
private EsThreadPoolExecutor executor;
private OpenSearchThreadPoolExecutor executor;
@Before
public void setup() {

View File

@ -44,7 +44,7 @@ import org.opensearch.common.text.Text;
import org.opensearch.common.util.BigArrays;
import org.opensearch.common.util.concurrent.AtomicArray;
import org.opensearch.common.util.concurrent.OpenSearchExecutors;
import org.opensearch.common.util.concurrent.EsThreadPoolExecutor;
import org.opensearch.common.util.concurrent.OpenSearchThreadPoolExecutor;
import org.opensearch.index.shard.ShardId;
import org.opensearch.search.DocValueFormat;
import org.opensearch.search.SearchHit;
@ -102,7 +102,7 @@ import static org.hamcrest.Matchers.lessThanOrEqualTo;
public class SearchPhaseControllerTests extends OpenSearchTestCase {
private ThreadPool threadPool;
private EsThreadPoolExecutor fixedExecutor;
private OpenSearchThreadPoolExecutor fixedExecutor;
private SearchPhaseController searchPhaseController;
private List<Boolean> reductions;

View File

@ -51,7 +51,7 @@ public class OpenSearchExecutorsTests extends OpenSearchTestCase {
}
public void testFixedForcedExecution() throws Exception {
EsThreadPoolExecutor executor =
OpenSearchThreadPoolExecutor executor =
OpenSearchExecutors.newFixed(getName(), 1, 1, OpenSearchExecutors.daemonThreadFactory("test"), threadContext);
final CountDownLatch wait = new CountDownLatch(1);
@ -114,7 +114,7 @@ public class OpenSearchExecutorsTests extends OpenSearchTestCase {
}
public void testFixedRejected() throws Exception {
EsThreadPoolExecutor executor =
OpenSearchThreadPoolExecutor executor =
OpenSearchExecutors.newFixed(getName(), 1, 1, OpenSearchExecutors.daemonThreadFactory("test"), threadContext);
final CountDownLatch wait = new CountDownLatch(1);
@ -247,7 +247,7 @@ public class OpenSearchExecutorsTests extends OpenSearchTestCase {
int queue = between(0, 100);
int actions = queue + pool;
final CountDownLatch latch = new CountDownLatch(1);
EsThreadPoolExecutor executor =
OpenSearchThreadPoolExecutor executor =
OpenSearchExecutors.newFixed(getName(), pool, queue, OpenSearchExecutors.daemonThreadFactory("dummy"), threadContext);
try {
for (int i = 0; i < actions; i++) {
@ -279,7 +279,7 @@ public class OpenSearchExecutorsTests extends OpenSearchTestCase {
assertFalse("Thread pool registering as terminated when it isn't", e.isExecutorShutdown());
String message = e.getMessage();
assertThat(message, containsString("of dummy runnable"));
assertThat(message, containsString("on EsThreadPoolExecutor[name = " + getName()));
assertThat(message, containsString("on OpenSearchThreadPoolExecutor[name = " + getName()));
assertThat(message, containsString("queue capacity = " + queue));
assertThat(message, containsString("[Running"));
/*
@ -319,7 +319,7 @@ public class OpenSearchExecutorsTests extends OpenSearchTestCase {
assertTrue("Thread pool not registering as terminated when it is", e.isExecutorShutdown());
String message = e.getMessage();
assertThat(message, containsString("of dummy runnable"));
assertThat(message, containsString("on EsThreadPoolExecutor[name = " + getName()));
assertThat(message, containsString("on OpenSearchThreadPoolExecutor[name = " + getName()));
assertThat(message, containsString("queue capacity = " + queue));
assertThat(message, containsString("[Terminated"));
assertThat(message, containsString("active threads = 0"));
@ -337,7 +337,7 @@ public class OpenSearchExecutorsTests extends OpenSearchTestCase {
threadContext.putHeader("foo", "bar");
final Integer one = Integer.valueOf(1);
threadContext.putTransient("foo", one);
EsThreadPoolExecutor executor =
OpenSearchThreadPoolExecutor executor =
OpenSearchExecutors.newFixed(getName(), pool, queue, OpenSearchExecutors.daemonThreadFactory("dummy"), threadContext);
try {
executor.execute(() -> {
@ -368,7 +368,7 @@ public class OpenSearchExecutorsTests extends OpenSearchTestCase {
int queue = between(0, 100);
final CountDownLatch latch = new CountDownLatch(1);
final CountDownLatch executed = new CountDownLatch(1);
EsThreadPoolExecutor executor =
OpenSearchThreadPoolExecutor executor =
OpenSearchExecutors.newFixed(getName(), pool, queue, OpenSearchExecutors.daemonThreadFactory("dummy"), threadContext);
try {
Runnable r = () -> {

View File

@ -190,7 +190,7 @@ public class PrioritizedExecutorsTests extends OpenSearchTestCase {
public void testTimeout() throws Exception {
ScheduledExecutorService timer = Executors.newSingleThreadScheduledExecutor(OpenSearchExecutors.daemonThreadFactory(getTestName()));
PrioritizedEsThreadPoolExecutor executor =
PrioritizedOpenSearchThreadPoolExecutor executor =
OpenSearchExecutors.newSinglePrioritizing(getName(), OpenSearchExecutors.daemonThreadFactory(getTestName()), holder, timer);
final CountDownLatch invoked = new CountDownLatch(1);
final CountDownLatch block = new CountDownLatch(1);
@ -211,7 +211,7 @@ public class PrioritizedExecutorsTests extends OpenSearchTestCase {
}
});
invoked.await();
PrioritizedEsThreadPoolExecutor.Pending[] pending = executor.getPending();
PrioritizedOpenSearchThreadPoolExecutor.Pending[] pending = executor.getPending();
assertThat(pending.length, equalTo(1));
assertThat(pending[0].task.toString(), equalTo("the blocking"));
assertThat(pending[0].executing, equalTo(true));
@ -254,7 +254,7 @@ public class PrioritizedExecutorsTests extends OpenSearchTestCase {
ThreadPool threadPool = new TestThreadPool("test");
final ScheduledThreadPoolExecutor timer = (ScheduledThreadPoolExecutor) threadPool.scheduler();
final AtomicBoolean timeoutCalled = new AtomicBoolean();
PrioritizedEsThreadPoolExecutor executor =
PrioritizedOpenSearchThreadPoolExecutor executor =
OpenSearchExecutors.newSinglePrioritizing(getName(), OpenSearchExecutors.daemonThreadFactory(getTestName()), holder, timer);
final CountDownLatch invoked = new CountDownLatch(1);
executor.execute(new Runnable() {

View File

@ -44,8 +44,8 @@ public class QueueResizingOpenSearchThreadPoolExecutorTests extends OpenSearchTe
int threads = randomIntBetween(1, 3);
int measureWindow = 3;
logger.info("--> auto-queue with a measurement window of {} tasks", measureWindow);
QueueResizingEsThreadPoolExecutor executor =
new QueueResizingEsThreadPoolExecutor(
QueueResizingOpenSearchThreadPoolExecutor executor =
new QueueResizingOpenSearchThreadPoolExecutor(
"test-threadpool", threads, threads, 1000,
TimeUnit.MILLISECONDS, queue, 10, 1000, fastWrapper(),
measureWindow, TimeValue.timeValueMillis(1), OpenSearchExecutors.daemonThreadFactory("queuetest"),
@ -75,8 +75,8 @@ public class QueueResizingOpenSearchThreadPoolExecutorTests extends OpenSearchTe
int threads = randomIntBetween(1, 10);
int measureWindow = randomIntBetween(100, 200);
logger.info("--> auto-queue with a measurement window of {} tasks", measureWindow);
QueueResizingEsThreadPoolExecutor executor =
new QueueResizingEsThreadPoolExecutor(
QueueResizingOpenSearchThreadPoolExecutor executor =
new QueueResizingOpenSearchThreadPoolExecutor(
"test-threadpool", threads, threads, 1000,
TimeUnit.MILLISECONDS, queue, 10, 3000, fastWrapper(),
measureWindow, TimeValue.timeValueMillis(1), OpenSearchExecutors.daemonThreadFactory("queuetest"),
@ -103,8 +103,8 @@ public class QueueResizingOpenSearchThreadPoolExecutorTests extends OpenSearchTe
int threads = randomIntBetween(1, 10);
int measureWindow = randomIntBetween(100, 200);
logger.info("--> auto-queue with a measurement window of {} tasks", measureWindow);
QueueResizingEsThreadPoolExecutor executor =
new QueueResizingEsThreadPoolExecutor(
QueueResizingOpenSearchThreadPoolExecutor executor =
new QueueResizingOpenSearchThreadPoolExecutor(
"test-threadpool", threads, threads, 1000,
TimeUnit.MILLISECONDS, queue, 10, 3000, slowWrapper(), measureWindow, TimeValue.timeValueMillis(1),
OpenSearchExecutors.daemonThreadFactory("queuetest"), new OpenSearchAbortPolicy(), context);
@ -131,8 +131,8 @@ public class QueueResizingOpenSearchThreadPoolExecutorTests extends OpenSearchTe
int measureWindow = randomIntBetween(10, 100);
int min = randomIntBetween(4981, 4999);
logger.info("--> auto-queue with a measurement window of {} tasks", measureWindow);
QueueResizingEsThreadPoolExecutor executor =
new QueueResizingEsThreadPoolExecutor(
QueueResizingOpenSearchThreadPoolExecutor executor =
new QueueResizingOpenSearchThreadPoolExecutor(
"test-threadpool", threads, threads, 1000,
TimeUnit.MILLISECONDS, queue, min, 100000, slowWrapper(), measureWindow, TimeValue.timeValueMillis(1),
OpenSearchExecutors.daemonThreadFactory("queuetest"), new OpenSearchAbortPolicy(), context);
@ -160,8 +160,8 @@ public class QueueResizingOpenSearchThreadPoolExecutorTests extends OpenSearchTe
int measureWindow = randomIntBetween(10, 100);
int max = randomIntBetween(5010, 5024);
logger.info("--> auto-queue with a measurement window of {} tasks", measureWindow);
QueueResizingEsThreadPoolExecutor executor =
new QueueResizingEsThreadPoolExecutor(
QueueResizingOpenSearchThreadPoolExecutor executor =
new QueueResizingOpenSearchThreadPoolExecutor(
"test-threadpool", threads, threads, 1000,
TimeUnit.MILLISECONDS, queue, 10, max, fastWrapper(), measureWindow, TimeValue.timeValueMillis(1),
OpenSearchExecutors.daemonThreadFactory("queuetest"), new OpenSearchAbortPolicy(), context);
@ -185,8 +185,8 @@ public class QueueResizingOpenSearchThreadPoolExecutorTests extends OpenSearchTe
new ResizableBlockingQueue<>(ConcurrentCollections.<Runnable>newBlockingQueue(),
100);
QueueResizingEsThreadPoolExecutor executor =
new QueueResizingEsThreadPoolExecutor(
QueueResizingOpenSearchThreadPoolExecutor executor =
new QueueResizingOpenSearchThreadPoolExecutor(
"test-threadpool", 1, 1, 1000,
TimeUnit.MILLISECONDS, queue, 10, 200, fastWrapper(), 10, TimeValue.timeValueMillis(1),
OpenSearchExecutors.daemonThreadFactory("queuetest"), new OpenSearchAbortPolicy(), context);
@ -226,8 +226,8 @@ public class QueueResizingOpenSearchThreadPoolExecutorTests extends OpenSearchTe
new ResizableBlockingQueue<>(ConcurrentCollections.<Runnable>newBlockingQueue(),
100);
QueueResizingEsThreadPoolExecutor executor =
new QueueResizingEsThreadPoolExecutor(
QueueResizingOpenSearchThreadPoolExecutor executor =
new QueueResizingOpenSearchThreadPoolExecutor(
"test-threadpool", 1, 1, 1000,
TimeUnit.MILLISECONDS, queue, 10, 200, exceptionalWrapper(), 10, TimeValue.timeValueMillis(1),
OpenSearchExecutors.daemonThreadFactory("queuetest"), new OpenSearchAbortPolicy(), context);
@ -258,7 +258,7 @@ public class QueueResizingOpenSearchThreadPoolExecutorTests extends OpenSearchTe
}
/** Execute a blank task {@code times} times for the executor */
private void executeTask(QueueResizingEsThreadPoolExecutor executor, int times) {
private void executeTask(QueueResizingOpenSearchThreadPoolExecutor executor, int times) {
logger.info("--> executing a task [{}] times", times);
for (int i = 0; i < times; i++) {
executor.execute(() -> {});

View File

@ -144,7 +144,7 @@ import org.opensearch.common.transport.TransportAddress;
import org.opensearch.common.util.BigArrays;
import org.elasticsearch.common.util.PageCacheRecycler;
import org.opensearch.common.util.concurrent.AbstractRunnable;
import org.opensearch.common.util.concurrent.PrioritizedEsThreadPoolExecutor;
import org.opensearch.common.util.concurrent.PrioritizedOpenSearchThreadPoolExecutor;
import org.opensearch.common.util.concurrent.ThreadContext;
import org.opensearch.common.xcontent.NamedXContentRegistry;
import org.opensearch.env.Environment;
@ -1414,7 +1414,7 @@ public class SnapshotResiliencyTests extends OpenSearchTestCase {
clusterService = new ClusterService(settings, clusterSettings, masterService,
new ClusterApplierService(node.getName(), settings, clusterSettings, threadPool) {
@Override
protected PrioritizedEsThreadPoolExecutor createThreadPoolExecutor() {
protected PrioritizedOpenSearchThreadPoolExecutor createThreadPoolExecutor() {
return new MockSinglePrioritizingExecutor(node.getName(), deterministicTaskQueue, threadPool);
}

View File

@ -21,7 +21,7 @@ package org.opensearch.threadpool;
import org.opensearch.common.settings.ClusterSettings;
import org.opensearch.common.settings.Settings;
import org.opensearch.common.util.concurrent.EsThreadPoolExecutor;
import org.opensearch.common.util.concurrent.OpenSearchThreadPoolExecutor;
import java.util.HashMap;
import java.util.Map;
@ -79,23 +79,23 @@ public class ScalingThreadPoolTests extends OpenSearchThreadPoolTestCase {
runScalingThreadPoolTest(builder.build(), (clusterSettings, threadPool) -> {
final Executor executor = threadPool.executor(threadPoolName);
assertThat(executor, instanceOf(EsThreadPoolExecutor.class));
final EsThreadPoolExecutor esThreadPoolExecutor = (EsThreadPoolExecutor)executor;
assertThat(executor, instanceOf(OpenSearchThreadPoolExecutor.class));
final OpenSearchThreadPoolExecutor openSearchThreadPoolExecutor = (OpenSearchThreadPoolExecutor)executor;
final ThreadPool.Info info = info(threadPool, threadPoolName);
assertThat(info.getName(), equalTo(threadPoolName));
assertThat(info.getThreadPoolType(), equalTo(ThreadPool.ThreadPoolType.SCALING));
assertThat(info.getKeepAlive().seconds(), equalTo(keepAlive));
assertThat(esThreadPoolExecutor.getKeepAliveTime(TimeUnit.SECONDS), equalTo(keepAlive));
assertThat(openSearchThreadPoolExecutor.getKeepAliveTime(TimeUnit.SECONDS), equalTo(keepAlive));
assertNull(info.getQueueSize());
assertThat(esThreadPoolExecutor.getQueue().remainingCapacity(), equalTo(Integer.MAX_VALUE));
assertThat(openSearchThreadPoolExecutor.getQueue().remainingCapacity(), equalTo(Integer.MAX_VALUE));
assertThat(info.getMin(), equalTo(core));
assertThat(esThreadPoolExecutor.getCorePoolSize(), equalTo(core));
assertThat(openSearchThreadPoolExecutor.getCorePoolSize(), equalTo(core));
assertThat(info.getMax(), equalTo(expectedMax));
assertThat(esThreadPoolExecutor.getMaximumPoolSize(), equalTo(expectedMax));
assertThat(openSearchThreadPoolExecutor.getMaximumPoolSize(), equalTo(expectedMax));
});
if (processorsUsed > availableProcessors) {

View File

@ -22,7 +22,7 @@ package org.opensearch.threadpool;
import org.opensearch.common.settings.Settings;
import org.opensearch.common.unit.TimeValue;
import org.opensearch.common.util.concurrent.OpenSearchExecutors;
import org.opensearch.common.util.concurrent.EsThreadPoolExecutor;
import org.opensearch.common.util.concurrent.OpenSearchThreadPoolExecutor;
import org.opensearch.threadpool.ThreadPool.Names;
import java.lang.reflect.Field;
@ -105,16 +105,16 @@ public class UpdateThreadPoolSettingsTests extends OpenSearchThreadPoolTestCase
.put("thread_pool." + threadPoolName + ".size", expectedSize)
.build();
threadPool = new ThreadPool(nodeSettings);
assertThat(threadPool.executor(threadPoolName), instanceOf(EsThreadPoolExecutor.class));
assertThat(threadPool.executor(threadPoolName), instanceOf(OpenSearchThreadPoolExecutor.class));
assertEquals(info(threadPool, threadPoolName).getThreadPoolType(), ThreadPool.ThreadPoolType.FIXED);
assertThat(threadPool.executor(threadPoolName), instanceOf(EsThreadPoolExecutor.class));
assertThat(((EsThreadPoolExecutor) threadPool.executor(threadPoolName)).getCorePoolSize(), equalTo(expectedSize));
assertThat(((EsThreadPoolExecutor) threadPool.executor(threadPoolName)).getMaximumPoolSize(), equalTo(expectedSize));
assertThat(threadPool.executor(threadPoolName), instanceOf(OpenSearchThreadPoolExecutor.class));
assertThat(((OpenSearchThreadPoolExecutor) threadPool.executor(threadPoolName)).getCorePoolSize(), equalTo(expectedSize));
assertThat(((OpenSearchThreadPoolExecutor) threadPool.executor(threadPoolName)).getMaximumPoolSize(), equalTo(expectedSize));
assertThat(info(threadPool, threadPoolName).getMin(), equalTo(expectedSize));
assertThat(info(threadPool, threadPoolName).getMax(), equalTo(expectedSize));
// keep alive does not apply to fixed thread pools
assertThat(((EsThreadPoolExecutor) threadPool.executor(threadPoolName)).getKeepAliveTime(TimeUnit.MINUTES), equalTo(0L));
assertThat(((OpenSearchThreadPoolExecutor) threadPool.executor(threadPoolName)).getKeepAliveTime(TimeUnit.MINUTES), equalTo(0L));
} finally {
terminateThreadPoolIfNeeded(threadPool);
}
@ -139,7 +139,7 @@ public class UpdateThreadPoolSettingsTests extends OpenSearchThreadPoolTestCase
final long expectedKeepAlive = "generic".equals(threadPoolName) ? 30 : 300;
assertThat(info(threadPool, threadPoolName).getKeepAlive().seconds(), equalTo(expectedKeepAlive));
assertEquals(info(threadPool, threadPoolName).getThreadPoolType(), ThreadPool.ThreadPoolType.SCALING);
assertThat(threadPool.executor(threadPoolName), instanceOf(EsThreadPoolExecutor.class));
assertThat(threadPool.executor(threadPoolName), instanceOf(OpenSearchThreadPoolExecutor.class));
} finally {
terminateThreadPoolIfNeeded(threadPool);
}

View File

@ -59,7 +59,7 @@ import org.opensearch.common.unit.TimeValue;
import org.opensearch.common.util.BigArrays;
import org.opensearch.common.util.MockBigArrays;
import org.opensearch.common.util.MockPageCacheRecycler;
import org.opensearch.common.util.concurrent.PrioritizedEsThreadPoolExecutor;
import org.opensearch.common.util.concurrent.PrioritizedOpenSearchThreadPoolExecutor;
import org.opensearch.discovery.DiscoveryModule;
import org.opensearch.discovery.SeedHostsProvider;
import org.opensearch.env.NodeEnvironment;
@ -1366,7 +1366,7 @@ public class AbstractCoordinatorTestCase extends OpenSearchTestCase {
}
@Override
protected PrioritizedEsThreadPoolExecutor createThreadPoolExecutor() {
protected PrioritizedOpenSearchThreadPoolExecutor createThreadPoolExecutor() {
return new MockSinglePrioritizingExecutor(nodeName, deterministicTaskQueue, threadPool);
}

View File

@ -19,16 +19,16 @@
package org.opensearch.cluster.coordination;
import org.opensearch.common.util.concurrent.OpenSearchExecutors;
import org.opensearch.common.util.concurrent.PrioritizedEsThreadPoolExecutor;
import org.opensearch.common.util.concurrent.PrioritizedOpenSearchThreadPoolExecutor;
import org.opensearch.threadpool.ThreadPool;
import java.util.concurrent.TimeUnit;
/**
* Mock single threaded {@link PrioritizedEsThreadPoolExecutor} based on {@link DeterministicTaskQueue},
* Mock single threaded {@link PrioritizedOpenSearchThreadPoolExecutor} based on {@link DeterministicTaskQueue},
* simulating the behaviour of an executor returned by {@link OpenSearchExecutors#newSinglePrioritizing}.
*/
public class MockSinglePrioritizingExecutor extends PrioritizedEsThreadPoolExecutor {
public class MockSinglePrioritizingExecutor extends PrioritizedOpenSearchThreadPoolExecutor {
public MockSinglePrioritizingExecutor(String name, DeterministicTaskQueue deterministicTaskQueue, ThreadPool threadPool) {
super(name, 0, 1, 0L, TimeUnit.MILLISECONDS,

View File

@ -29,7 +29,7 @@ import org.opensearch.common.settings.ClusterSettings;
import org.opensearch.common.settings.Settings;
import org.opensearch.common.unit.TimeValue;
import org.opensearch.common.util.concurrent.OpenSearchExecutors;
import org.opensearch.common.util.concurrent.PrioritizedEsThreadPoolExecutor;
import org.opensearch.common.util.concurrent.PrioritizedOpenSearchThreadPoolExecutor;
import org.opensearch.common.util.concurrent.ThreadContext;
import org.opensearch.node.Node;
import org.opensearch.threadpool.ThreadPool;
@ -61,8 +61,8 @@ public class FakeThreadPoolMasterService extends MasterService {
}
@Override
protected PrioritizedEsThreadPoolExecutor createThreadPoolExecutor() {
return new PrioritizedEsThreadPoolExecutor(name, 1, 1, 1, TimeUnit.SECONDS, OpenSearchExecutors.daemonThreadFactory(name),
protected PrioritizedOpenSearchThreadPoolExecutor createThreadPoolExecutor() {
return new PrioritizedOpenSearchThreadPoolExecutor(name, 1, 1, 1, TimeUnit.SECONDS, OpenSearchExecutors.daemonThreadFactory(name),
null, null) {
@Override

View File

@ -24,7 +24,7 @@ import org.opensearch.action.support.PlainActionFuture;
import org.opensearch.common.settings.Settings;
import org.opensearch.common.util.concurrent.AbstractRunnable;
import org.opensearch.common.util.concurrent.OpenSearchExecutors;
import org.opensearch.common.util.concurrent.EsThreadPoolExecutor;
import org.opensearch.common.util.concurrent.OpenSearchThreadPoolExecutor;
import org.opensearch.common.util.concurrent.ThreadContext;
import org.opensearch.test.OpenSearchTestCase;
import org.junit.AfterClass;
@ -38,7 +38,7 @@ import java.util.concurrent.CountDownLatch;
*/
public class OpenSearchIndexInputTestCase extends OpenSearchTestCase {
private static EsThreadPoolExecutor executor;
private static OpenSearchThreadPoolExecutor executor;
@BeforeClass
public static void createExecutor() {

View File

@ -1856,7 +1856,7 @@ public abstract class OpenSearchIntegTestCase extends OpenSearchTestCase {
.putList(DISCOVERY_SEED_PROVIDERS_SETTING.getKey(), "file");
if (rarely()) {
// Sometimes adjust the minimum search thread pool size, causing
// QueueResizingEsThreadPoolExecutor to be used instead of a regular
// QueueResizingOpenSearchThreadPoolExecutor to be used instead of a regular
// fixed thread pool
builder.put("thread_pool.search.min_queue_size", 100);
}

View File

@ -19,7 +19,7 @@
package org.opensearch.cluster.coordination;
import org.opensearch.common.Priority;
import org.opensearch.common.util.concurrent.PrioritizedEsThreadPoolExecutor;
import org.opensearch.common.util.concurrent.PrioritizedOpenSearchThreadPoolExecutor;
import org.opensearch.common.util.concurrent.PrioritizedRunnable;
import org.opensearch.test.OpenSearchTestCase;
@ -29,7 +29,7 @@ public class MockSinglePrioritizingExecutorTests extends OpenSearchTestCase {
public void testPrioritizedEsThreadPoolExecutor() {
final DeterministicTaskQueue taskQueue = DeterministicTaskQueueTests.newTaskQueue();
final PrioritizedEsThreadPoolExecutor executor = new MockSinglePrioritizingExecutor("test", taskQueue, taskQueue.getThreadPool());
final PrioritizedOpenSearchThreadPoolExecutor executor = new MockSinglePrioritizingExecutor("test", taskQueue, taskQueue.getThreadPool());
final AtomicBoolean called1 = new AtomicBoolean();
final AtomicBoolean called2 = new AtomicBoolean();
executor.execute(new PrioritizedRunnable(Priority.NORMAL) {