[Rename] Second pass clean up of imports and variable names in server module (#382)

This commit cleans up imports, variable names, comments, and other misc usages
of ES with the new OpenSearch name.

Signed-off-by: Nicholas Walter Knize <nknize@apache.org>
This commit is contained in:
Nick Knize 2021-03-18 12:01:05 -05:00 committed by Nick Knize
parent b5e00e1e58
commit f75803e1aa
87 changed files with 222 additions and 222 deletions

View File

@ -256,8 +256,8 @@ public class RemoteScrollableHitSourceTests extends OpenSearchTestCase {
assertEquals("87A7NvevQxSrEwMbtRCecg", r.getFailures().get(0).getNodeId()); assertEquals("87A7NvevQxSrEwMbtRCecg", r.getFailures().get(0).getNodeId());
assertThat(r.getFailures().get(0).getReason(), instanceOf(OpenSearchRejectedExecutionException.class)); assertThat(r.getFailures().get(0).getReason(), instanceOf(OpenSearchRejectedExecutionException.class));
assertEquals("rejected execution of org.opensearch.transport.TransportService$5@52d06af2 on " assertEquals("rejected execution of org.opensearch.transport.TransportService$5@52d06af2 on "
+ "EsThreadPoolExecutor[search, queue capacity = 1000, org.opensearch.common.util.concurrent." + "OpenSearchThreadPoolExecutor[search, queue capacity = 1000, org.opensearch.common.util.concurrent."
+ "EsThreadPoolExecutor@778ea553[Running, pool size = 7, active threads = 7, queued tasks = 1000, " + "OpenSearchThreadPoolExecutor@778ea553[Running, pool size = 7, active threads = 7, queued tasks = 1000, "
+ "completed tasks = 4182]]", r.getFailures().get(0).getReason().getMessage()); + "completed tasks = 4182]]", r.getFailures().get(0).getReason().getMessage());
assertThat(r.getHits(), hasSize(1)); assertThat(r.getHits(), hasSize(1));
assertEquals("test", r.getHits().get(0).getIndex()); assertEquals("test", r.getHits().get(0).getIndex());

View File

@ -23,8 +23,8 @@ import org.opensearch.common.settings.Settings;
import org.opensearch.common.unit.TimeValue; import org.opensearch.common.unit.TimeValue;
import org.opensearch.common.util.concurrent.AbstractRunnable; import org.opensearch.common.util.concurrent.AbstractRunnable;
import org.opensearch.common.util.concurrent.OpenSearchExecutors; import org.opensearch.common.util.concurrent.OpenSearchExecutors;
import org.opensearch.common.util.concurrent.EsThreadPoolExecutor; import org.opensearch.common.util.concurrent.OpenSearchThreadPoolExecutor;
import org.opensearch.common.util.concurrent.PrioritizedEsThreadPoolExecutor; import org.opensearch.common.util.concurrent.PrioritizedOpenSearchThreadPoolExecutor;
import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.OpenSearchTestCase;
import org.junit.After; import org.junit.After;
import org.junit.Before; import org.junit.Before;
@ -70,7 +70,7 @@ public class EvilThreadPoolTests extends OpenSearchTestCase {
} }
public void testExecutionErrorOnFixedESThreadPoolExecutor() throws InterruptedException { public void testExecutionErrorOnFixedESThreadPoolExecutor() throws InterruptedException {
final EsThreadPoolExecutor fixedExecutor = OpenSearchExecutors.newFixed("test", 1, 1, final OpenSearchThreadPoolExecutor fixedExecutor = OpenSearchExecutors.newFixed("test", 1, 1,
OpenSearchExecutors.daemonThreadFactory("test"), threadPool.getThreadContext()); OpenSearchExecutors.daemonThreadFactory("test"), threadPool.getThreadContext());
try { try {
checkExecutionError(getExecuteRunner(fixedExecutor)); checkExecutionError(getExecuteRunner(fixedExecutor));
@ -81,7 +81,7 @@ public class EvilThreadPoolTests extends OpenSearchTestCase {
} }
public void testExecutionErrorOnScalingESThreadPoolExecutor() throws InterruptedException { public void testExecutionErrorOnScalingESThreadPoolExecutor() throws InterruptedException {
final EsThreadPoolExecutor scalingExecutor = OpenSearchExecutors.newScaling("test", 1, 1, final OpenSearchThreadPoolExecutor scalingExecutor = OpenSearchExecutors.newScaling("test", 1, 1,
10, TimeUnit.SECONDS, OpenSearchExecutors.daemonThreadFactory("test"), threadPool.getThreadContext()); 10, TimeUnit.SECONDS, OpenSearchExecutors.daemonThreadFactory("test"), threadPool.getThreadContext());
try { try {
checkExecutionError(getExecuteRunner(scalingExecutor)); checkExecutionError(getExecuteRunner(scalingExecutor));
@ -92,7 +92,7 @@ public class EvilThreadPoolTests extends OpenSearchTestCase {
} }
public void testExecutionErrorOnAutoQueueFixedESThreadPoolExecutor() throws InterruptedException { public void testExecutionErrorOnAutoQueueFixedESThreadPoolExecutor() throws InterruptedException {
final EsThreadPoolExecutor autoQueueFixedExecutor = OpenSearchExecutors.newAutoQueueFixed("test", 1, 1, final OpenSearchThreadPoolExecutor autoQueueFixedExecutor = OpenSearchExecutors.newAutoQueueFixed("test", 1, 1,
1, 1, 1, TimeValue.timeValueSeconds(10), OpenSearchExecutors.daemonThreadFactory("test"), threadPool.getThreadContext()); 1, 1, 1, TimeValue.timeValueSeconds(10), OpenSearchExecutors.daemonThreadFactory("test"), threadPool.getThreadContext());
try { try {
checkExecutionError(getExecuteRunner(autoQueueFixedExecutor)); checkExecutionError(getExecuteRunner(autoQueueFixedExecutor));
@ -103,7 +103,7 @@ public class EvilThreadPoolTests extends OpenSearchTestCase {
} }
public void testExecutionErrorOnSinglePrioritizingThreadPoolExecutor() throws InterruptedException { public void testExecutionErrorOnSinglePrioritizingThreadPoolExecutor() throws InterruptedException {
final PrioritizedEsThreadPoolExecutor prioritizedExecutor = OpenSearchExecutors.newSinglePrioritizing("test", final PrioritizedOpenSearchThreadPoolExecutor prioritizedExecutor = OpenSearchExecutors.newSinglePrioritizing("test",
OpenSearchExecutors.daemonThreadFactory("test"), threadPool.getThreadContext(), threadPool.scheduler()); OpenSearchExecutors.daemonThreadFactory("test"), threadPool.getThreadContext(), threadPool.scheduler());
try { try {
checkExecutionError(getExecuteRunner(prioritizedExecutor)); checkExecutionError(getExecuteRunner(prioritizedExecutor));
@ -179,7 +179,7 @@ public class EvilThreadPoolTests extends OpenSearchTestCase {
} }
public void testExecutionExceptionOnFixedESThreadPoolExecutor() throws InterruptedException { public void testExecutionExceptionOnFixedESThreadPoolExecutor() throws InterruptedException {
final EsThreadPoolExecutor fixedExecutor = OpenSearchExecutors.newFixed("test", 1, 1, final OpenSearchThreadPoolExecutor fixedExecutor = OpenSearchExecutors.newFixed("test", 1, 1,
OpenSearchExecutors.daemonThreadFactory("test"), threadPool.getThreadContext()); OpenSearchExecutors.daemonThreadFactory("test"), threadPool.getThreadContext());
try { try {
checkExecutionException(getExecuteRunner(fixedExecutor), true); checkExecutionException(getExecuteRunner(fixedExecutor), true);
@ -190,7 +190,7 @@ public class EvilThreadPoolTests extends OpenSearchTestCase {
} }
public void testExecutionExceptionOnScalingESThreadPoolExecutor() throws InterruptedException { public void testExecutionExceptionOnScalingESThreadPoolExecutor() throws InterruptedException {
final EsThreadPoolExecutor scalingExecutor = OpenSearchExecutors.newScaling("test", 1, 1, final OpenSearchThreadPoolExecutor scalingExecutor = OpenSearchExecutors.newScaling("test", 1, 1,
10, TimeUnit.SECONDS, OpenSearchExecutors.daemonThreadFactory("test"), threadPool.getThreadContext()); 10, TimeUnit.SECONDS, OpenSearchExecutors.daemonThreadFactory("test"), threadPool.getThreadContext());
try { try {
checkExecutionException(getExecuteRunner(scalingExecutor), true); checkExecutionException(getExecuteRunner(scalingExecutor), true);
@ -201,7 +201,7 @@ public class EvilThreadPoolTests extends OpenSearchTestCase {
} }
public void testExecutionExceptionOnAutoQueueFixedESThreadPoolExecutor() throws InterruptedException { public void testExecutionExceptionOnAutoQueueFixedESThreadPoolExecutor() throws InterruptedException {
final EsThreadPoolExecutor autoQueueFixedExecutor = OpenSearchExecutors.newAutoQueueFixed("test", 1, 1, final OpenSearchThreadPoolExecutor autoQueueFixedExecutor = OpenSearchExecutors.newAutoQueueFixed("test", 1, 1,
1, 1, 1, TimeValue.timeValueSeconds(10), OpenSearchExecutors.daemonThreadFactory("test"), threadPool.getThreadContext()); 1, 1, 1, TimeValue.timeValueSeconds(10), OpenSearchExecutors.daemonThreadFactory("test"), threadPool.getThreadContext());
try { try {
// fixed_auto_queue_size wraps stuff into TimedRunnable, which is an AbstractRunnable // fixed_auto_queue_size wraps stuff into TimedRunnable, which is an AbstractRunnable
@ -213,7 +213,7 @@ public class EvilThreadPoolTests extends OpenSearchTestCase {
} }
public void testExecutionExceptionOnSinglePrioritizingThreadPoolExecutor() throws InterruptedException { public void testExecutionExceptionOnSinglePrioritizingThreadPoolExecutor() throws InterruptedException {
final PrioritizedEsThreadPoolExecutor prioritizedExecutor = OpenSearchExecutors.newSinglePrioritizing("test", final PrioritizedOpenSearchThreadPoolExecutor prioritizedExecutor = OpenSearchExecutors.newSinglePrioritizing("test",
OpenSearchExecutors.daemonThreadFactory("test"), threadPool.getThreadContext(), threadPool.scheduler()); OpenSearchExecutors.daemonThreadFactory("test"), threadPool.getThreadContext(), threadPool.scheduler());
try { try {
checkExecutionException(getExecuteRunner(prioritizedExecutor), true); checkExecutionException(getExecuteRunner(prioritizedExecutor), true);

View File

@ -236,7 +236,7 @@ public class Packages {
return sh.runIgnoreExitCode("service opensearch start"); return sh.runIgnoreExitCode("service opensearch start");
} }
public static void assertElasticsearchStarted(Shell sh, Installation installation) throws Exception { public static void assertOpenSearchearchStarted(Shell sh, Installation installation) throws Exception {
waitForOpenSearch(installation); waitForOpenSearch(installation);
if (isSystemd()) { if (isSystemd()) {
@ -261,7 +261,7 @@ public class Packages {
} else { } else {
sh.run("service opensearch restart"); sh.run("service opensearch restart");
} }
assertElasticsearchStarted(sh, installation); assertOpenSearchearchStarted(sh, installation);
} }
/** /**

View File

@ -32,7 +32,7 @@ import org.apache.lucene.search.TermQuery;
import org.apache.lucene.search.spans.SpanTermQuery; import org.apache.lucene.search.spans.SpanTermQuery;
import org.opensearch.common.lucene.search.MultiPhrasePrefixQuery; import org.opensearch.common.lucene.search.MultiPhrasePrefixQuery;
import org.opensearch.common.lucene.search.function.FunctionScoreQuery; import org.opensearch.common.lucene.search.function.FunctionScoreQuery;
import org.opensearch.index.search.ESToParentBlockJoinQuery; import org.opensearch.index.search.OpenSearchToParentBlockJoinQuery;
import java.io.IOException; import java.io.IOException;
import java.util.Collection; import java.util.Collection;
@ -85,8 +85,8 @@ public class CustomFieldQuery extends FieldQuery {
for (Term term : synQuery.getTerms()) { for (Term term : synQuery.getTerms()) {
flatten(new TermQuery(term), reader, flatQueries, boost); flatten(new TermQuery(term), reader, flatQueries, boost);
} }
} else if (sourceQuery instanceof ESToParentBlockJoinQuery) { } else if (sourceQuery instanceof OpenSearchToParentBlockJoinQuery) {
Query childQuery = ((ESToParentBlockJoinQuery) sourceQuery).getChildQuery(); Query childQuery = ((OpenSearchToParentBlockJoinQuery) sourceQuery).getChildQuery();
if (childQuery != null) { if (childQuery != null) {
flatten(childQuery, reader, flatQueries, boost); flatten(childQuery, reader, flatQueries, boost);
} }

View File

@ -95,12 +95,12 @@ public class Build {
// these are parsed at startup, and we require that we are able to recognize the values passed in by the startup scripts // these are parsed at startup, and we require that we are able to recognize the values passed in by the startup scripts
type = Type.fromDisplayName(System.getProperty("es.distribution.type", "unknown"), true); type = Type.fromDisplayName(System.getProperty("es.distribution.type", "unknown"), true);
final String esPrefix = "elasticsearch-" + Version.CURRENT; final String opensearchPrefix = "opensearch-" + Version.CURRENT;
final URL url = getOpenSearchCodeSourceLocation(); final URL url = getOpenSearchCodeSourceLocation();
final String urlStr = url == null ? "" : url.toString(); final String urlStr = url == null ? "" : url.toString();
if (urlStr.startsWith("file:/") && ( if (urlStr.startsWith("file:/") && (
urlStr.endsWith(esPrefix + ".jar") || urlStr.endsWith(opensearchPrefix + ".jar") ||
urlStr.matches("(.*)" + esPrefix + "(-)?((alpha|beta|rc)[0-9]+)?(-SNAPSHOT)?.jar") urlStr.matches("(.*)" + opensearchPrefix + "(-)?((alpha|beta|rc)[0-9]+)?(-SNAPSHOT)?.jar")
)) { )) {
try (JarInputStream jar = new JarInputStream(FileSystemUtils.openFileURLStream(url))) { try (JarInputStream jar = new JarInputStream(FileSystemUtils.openFileURLStream(url))) {
Manifest manifest = jar.getManifest(); Manifest manifest = jar.getManifest();
@ -112,7 +112,7 @@ public class Build {
throw new RuntimeException(e); throw new RuntimeException(e);
} }
} else { } else {
// not running from the official elasticsearch jar file (unit tests, IDE, uber client jar, shadiness) // not running from the official opensearch jar file (unit tests, IDE, uber client jar, shadiness)
hash = "unknown"; hash = "unknown";
date = "unknown"; date = "unknown";
version = Version.CURRENT.toString(); version = Version.CURRENT.toString();

View File

@ -545,7 +545,7 @@ public class Version implements Comparable<Version>, ToXContentFragment {
/** /**
* Returns true iff this version is an alpha version * Returns true iff this version is an alpha version
* Note: This has been introduced in elasticsearch version 5. Previous versions will never * Note: This has been introduced in version 5 of the OpenSearch predecessor. Previous versions will never
* have an alpha version. * have an alpha version.
*/ */
public boolean isAlpha() { public boolean isAlpha() {

View File

@ -117,7 +117,7 @@ public class NodeInfo extends BaseNodeResponse {
} }
/** /**
* The current ES version * The current OpenSearch version
*/ */
public Version getVersion() { public Version getVersion() {
return version; return version;

View File

@ -49,7 +49,7 @@ public class UpgradeSettingsRequest extends AcknowledgedRequest<UpgradeSettingsR
/** /**
* Constructs a new request to update minimum compatible version settings for one or more indices * Constructs a new request to update minimum compatible version settings for one or more indices
* *
* @param versions a map from index name to elasticsearch version, oldest lucene segment version tuple * @param versions a map from index name to opensearch version, oldest lucene segment version tuple
*/ */
public UpgradeSettingsRequest(Map<String, Tuple<Version, String>> versions) { public UpgradeSettingsRequest(Map<String, Tuple<Version, String>> versions) {
this.versions = versions; this.versions = versions;

View File

@ -182,7 +182,7 @@ public class GetResponse extends ActionResponse implements Iterable<DocumentFiel
* doesn't know. But before returning the result it will check that enough information were * doesn't know. But before returning the result it will check that enough information were
* parsed to return a valid {@link GetResponse} instance and throws a {@link ParsingException} * parsed to return a valid {@link GetResponse} instance and throws a {@link ParsingException}
* otherwise. This is the case when we get a 404 back, which can be parsed as a normal * otherwise. This is the case when we get a 404 back, which can be parsed as a normal
* {@link GetResponse} with found set to false, or as an elasticsearch exception. The caller * {@link GetResponse} with found set to false, or as an opensearch exception. The caller
* of this method needs a way to figure out whether we got back a valid get response, which * of this method needs a way to figure out whether we got back a valid get response, which
* can be done by catching ParsingException. * can be done by catching ParsingException.
* *

View File

@ -309,7 +309,7 @@ final class Bootstrap {
builder.setSecureSettings(secureSettings); builder.setSecureSettings(secureSettings);
} }
return InternalSettingsPreparer.prepareEnvironment(builder.build(), Collections.emptyMap(), configPath, return InternalSettingsPreparer.prepareEnvironment(builder.build(), Collections.emptyMap(), configPath,
// HOSTNAME is set by elasticsearch-env and elasticsearch-env.bat so it is always available // HOSTNAME is set by opensearch-env and opensearch-env.bat so it is always available
() -> System.getenv("HOSTNAME")); () -> System.getenv("HOSTNAME"));
} }

View File

@ -296,7 +296,7 @@ final class BootstrapChecks {
if (maxFileDescriptorCount != -1 && maxFileDescriptorCount < limit) { if (maxFileDescriptorCount != -1 && maxFileDescriptorCount < limit) {
final String message = String.format( final String message = String.format(
Locale.ROOT, Locale.ROOT,
"max file descriptors [%d] for elasticsearch process is too low, increase to at least [%d]", "max file descriptors [%d] for opensearch process is too low, increase to at least [%d]",
getMaxFileDescriptorCount(), getMaxFileDescriptorCount(),
limit); limit);
return BootstrapCheckResult.failure(message); return BootstrapCheckResult.failure(message);
@ -317,7 +317,7 @@ final class BootstrapChecks {
@Override @Override
public BootstrapCheckResult check(BootstrapContext context) { public BootstrapCheckResult check(BootstrapContext context) {
if (BootstrapSettings.MEMORY_LOCK_SETTING.get(context.settings()) && !isMemoryLocked()) { if (BootstrapSettings.MEMORY_LOCK_SETTING.get(context.settings()) && !isMemoryLocked()) {
return BootstrapCheckResult.failure("memory locking requested for elasticsearch process but memory is not locked"); return BootstrapCheckResult.failure("memory locking requested for opensearch process but memory is not locked");
} else { } else {
return BootstrapCheckResult.success(); return BootstrapCheckResult.success();
} }

View File

@ -48,7 +48,7 @@ class JNANatives {
// Set to true, in case native system call filter install was successful // Set to true, in case native system call filter install was successful
static boolean LOCAL_SYSTEM_CALL_FILTER = false; static boolean LOCAL_SYSTEM_CALL_FILTER = false;
// Set to true, in case policy can be applied to all threads of the process (even existing ones) // Set to true, in case policy can be applied to all threads of the process (even existing ones)
// otherwise they are only inherited for new threads (ES app threads) // otherwise they are only inherited for new threads (OpenSearch app threads)
static boolean LOCAL_SYSTEM_CALL_FILTER_ALL = false; static boolean LOCAL_SYSTEM_CALL_FILTER_ALL = false;
// set to the maximum number of threads that can be created for // set to the maximum number of threads that can be created for
// the user ID that owns the running OpenSearch process // the user ID that owns the running OpenSearch process

View File

@ -67,7 +67,7 @@ class OpenSearchUncaughtExceptionHandler implements Thread.UncaughtExceptionHand
logger.error(message, t); logger.error(message, t);
Terminal.DEFAULT.errorPrintln(message); Terminal.DEFAULT.errorPrintln(message);
t.printStackTrace(Terminal.DEFAULT.getErrorWriter()); t.printStackTrace(Terminal.DEFAULT.getErrorWriter());
// Without a final flush, the stacktrace may not be shown before ES exits // Without a final flush, the stacktrace may not be shown before OpenSearch exits
Terminal.DEFAULT.flush(); Terminal.DEFAULT.flush();
} }
@ -76,7 +76,7 @@ class OpenSearchUncaughtExceptionHandler implements Thread.UncaughtExceptionHand
logger.error(message, t); logger.error(message, t);
Terminal.DEFAULT.errorPrintln(message); Terminal.DEFAULT.errorPrintln(message);
t.printStackTrace(Terminal.DEFAULT.getErrorWriter()); t.printStackTrace(Terminal.DEFAULT.getErrorWriter());
// Without a final flush, the stacktrace may not be shown if ES goes on to exit // Without a final flush, the stacktrace may not be shown if OpenSearch goes on to exit
Terminal.DEFAULT.flush(); Terminal.DEFAULT.flush();
} }

View File

@ -66,7 +66,7 @@ import static org.opensearch.bootstrap.FilePermissionUtils.addSingleFilePath;
* between security and ease of use: * between security and ease of use:
* <ul> * <ul>
* <li>Assigns file permissions to user-configurable paths that can * <li>Assigns file permissions to user-configurable paths that can
* be specified from the command-line or {@code elasticsearch.yml}.</li> * be specified from the command-line or {@code opensearch.yml}.</li>
* <li>Allows for some contained usage of native code that would not * <li>Allows for some contained usage of native code that would not
* otherwise be permitted.</li> * otherwise be permitted.</li>
* </ul> * </ul>
@ -85,7 +85,7 @@ import static org.opensearch.bootstrap.FilePermissionUtils.addSingleFilePath;
* permission, but there are extenuating circumstances. * permission, but there are extenuating circumstances.
* <p> * <p>
* Scripts (groovy) are assigned minimal permissions. This does not provide adequate * Scripts (groovy) are assigned minimal permissions. This does not provide adequate
* sandboxing, as these scripts still have access to ES classes, and could * sandboxing, as these scripts still have access to OpenSearch classes, and could
* modify members, etc that would cause bad things to happen later on their * modify members, etc that would cause bad things to happen later on their
* behalf (no package protections are yet in place, this would need some * behalf (no package protections are yet in place, this would need some
* cleanups to the scripting apis). But still it can provide some defense for users * cleanups to the scripting apis). But still it can provide some defense for users
@ -94,7 +94,7 @@ import static org.opensearch.bootstrap.FilePermissionUtils.addSingleFilePath;
* <h2>Debugging Security</h2> * <h2>Debugging Security</h2>
* A good place to start when there is a problem is to turn on security debugging: * A good place to start when there is a problem is to turn on security debugging:
* <pre> * <pre>
* ES_JAVA_OPTS="-Djava.security.debug=access,failure" bin/elasticsearch * ES_JAVA_OPTS="-Djava.security.debug=access,failure" bin/opensearch
* </pre> * </pre>
* <p> * <p>
* When running tests you have to pass it to the test runner like this: * When running tests you have to pass it to the test runner like this:
@ -134,7 +134,7 @@ final class Security {
} }
/** /**
* Return a map from codebase name to codebase url of jar codebases used by ES core. * Return a map from codebase name to codebase url of jar codebases used by OpenSearch core.
*/ */
@SuppressForbidden(reason = "find URL path") @SuppressForbidden(reason = "find URL path")
static Map<String, URL> getCodebaseJarMap(Set<URL> urls) { static Map<String, URL> getCodebaseJarMap(Set<URL> urls) {

View File

@ -54,7 +54,7 @@ import java.util.Map;
* On Linux BPF Filters are installed using either {@code seccomp(2)} (3.17+) or {@code prctl(2)} (3.5+). {@code seccomp(2)} * On Linux BPF Filters are installed using either {@code seccomp(2)} (3.17+) or {@code prctl(2)} (3.5+). {@code seccomp(2)}
* is preferred, as it allows filters to be applied to any existing threads in the process, and one motivation * is preferred, as it allows filters to be applied to any existing threads in the process, and one motivation
* here is to protect against bugs in the JVM. Otherwise, code will fall back to the {@code prctl(2)} method * here is to protect against bugs in the JVM. Otherwise, code will fall back to the {@code prctl(2)} method
* which will at least protect elasticsearch application threads. * which will at least protect opensearch application threads.
* <p> * <p>
* Linux BPF filters will return {@code EACCES} (Access Denied) for the following system calls: * Linux BPF filters will return {@code EACCES} (Access Denied) for the following system calls:
* <ul> * <ul>
@ -128,7 +128,7 @@ final class SystemCallFilter {
static final int SECCOMP_SET_MODE_FILTER = 1; // since Linux 3.17 static final int SECCOMP_SET_MODE_FILTER = 1; // since Linux 3.17
static final int SECCOMP_FILTER_FLAG_TSYNC = 1; // since Linux 3.17 static final int SECCOMP_FILTER_FLAG_TSYNC = 1; // since Linux 3.17
/** otherwise, we can use prctl(2), which will at least protect ES application threads */ /** otherwise, we can use prctl(2), which will at least protect OpenSearch application threads */
static final int PR_GET_NO_NEW_PRIVS = 39; // since Linux 3.5 static final int PR_GET_NO_NEW_PRIVS = 39; // since Linux 3.5
static final int PR_SET_NO_NEW_PRIVS = 38; // since Linux 3.5 static final int PR_SET_NO_NEW_PRIVS = 38; // since Linux 3.5
static final int PR_GET_SECCOMP = 21; // since Linux 2.6.23 static final int PR_GET_SECCOMP = 21; // since Linux 2.6.23

View File

@ -99,7 +99,7 @@ public abstract class EnvironmentAwareCommand extends Command {
} }
return InternalSettingsPreparer.prepareEnvironment(baseSettings, settings, return InternalSettingsPreparer.prepareEnvironment(baseSettings, settings,
getConfigPath(esPathConf), getConfigPath(esPathConf),
// HOSTNAME is set by elasticsearch-env and elasticsearch-env.bat so it is always available // HOSTNAME is set by opensearch-env and opensearch-env.bat so it is always available
() -> System.getenv("HOSTNAME")); () -> System.getenv("HOSTNAME"));
} }

View File

@ -29,7 +29,7 @@ import java.security.GeneralSecurityException;
import java.util.Arrays; import java.util.Arrays;
/** /**
* An {@link org.opensearch.cli.EnvironmentAwareCommand} that needs to access the elasticsearch keystore, possibly * An {@link org.opensearch.cli.EnvironmentAwareCommand} that needs to access the opensearch keystore, possibly
* decrypting it if it is password protected. * decrypting it if it is password protected.
*/ */
public abstract class KeyStoreAwareCommand extends EnvironmentAwareCommand { public abstract class KeyStoreAwareCommand extends EnvironmentAwareCommand {
@ -52,7 +52,7 @@ public abstract class KeyStoreAwareCommand extends EnvironmentAwareCommand {
protected static SecureString readPassword(Terminal terminal, boolean withVerification) throws UserException { protected static SecureString readPassword(Terminal terminal, boolean withVerification) throws UserException {
final char[] passwordArray; final char[] passwordArray;
if (withVerification) { if (withVerification) {
passwordArray = terminal.readSecret("Enter new password for the elasticsearch keystore (empty for no password): ", passwordArray = terminal.readSecret("Enter new password for the opensearch keystore (empty for no password): ",
MAX_PASSPHRASE_LENGTH); MAX_PASSPHRASE_LENGTH);
char[] passwordVerification = terminal.readSecret("Enter same password again: ", char[] passwordVerification = terminal.readSecret("Enter same password again: ",
MAX_PASSPHRASE_LENGTH); MAX_PASSPHRASE_LENGTH);
@ -61,7 +61,7 @@ public abstract class KeyStoreAwareCommand extends EnvironmentAwareCommand {
} }
Arrays.fill(passwordVerification, '\u0000'); Arrays.fill(passwordVerification, '\u0000');
} else { } else {
passwordArray = terminal.readSecret("Enter password for the elasticsearch keystore : "); passwordArray = terminal.readSecret("Enter password for the opensearch keystore : ");
} }
return new SecureString(passwordArray); return new SecureString(passwordArray);
} }

View File

@ -31,7 +31,7 @@ import java.util.function.Predicate;
public class ClusterName implements Writeable { public class ClusterName implements Writeable {
public static final Setting<ClusterName> CLUSTER_NAME_SETTING = new Setting<>("cluster.name", "elasticsearch", (s) -> { public static final Setting<ClusterName> CLUSTER_NAME_SETTING = new Setting<>("cluster.name", "opensearch", (s) -> {
if (s.isEmpty()) { if (s.isEmpty()) {
throw new IllegalArgumentException("[cluster.name] must not be empty"); throw new IllegalArgumentException("[cluster.name] must not be empty");
} }

View File

@ -37,7 +37,7 @@ public class DetachClusterCommand extends OpenSearchNodeCommand {
"You should only run this tool if you have permanently lost all of the\n" + "You should only run this tool if you have permanently lost all of the\n" +
"master-eligible nodes in this cluster and you cannot restore the cluster\n" + "master-eligible nodes in this cluster and you cannot restore the cluster\n" +
"from a snapshot, or you have already unsafely bootstrapped a new cluster\n" + "from a snapshot, or you have already unsafely bootstrapped a new cluster\n" +
"by running `elasticsearch-node unsafe-bootstrap` on a master-eligible\n" + "by running `opensearch-node unsafe-bootstrap` on a master-eligible\n" +
"node that belonged to the same cluster as this node. This tool can cause\n" + "node that belonged to the same cluster as this node. This tool can cause\n" +
"arbitrary data loss and its use should be your last resort.\n" + "arbitrary data loss and its use should be your last resort.\n" +
"\n" + "\n" +

View File

@ -261,7 +261,7 @@ public class JoinTaskExecutor implements ClusterStateTaskExecutor<JoinTaskExecut
/** /**
* Ensures that all indices are compatible with the given node version. This will ensure that all indices in the given metadata * Ensures that all indices are compatible with the given node version. This will ensure that all indices in the given metadata
* will not be created with a newer version of elasticsearch as well as that all indices are newer or equal to the minimum index * will not be created with a newer version of opensearch as well as that all indices are newer or equal to the minimum index
* compatibility version. * compatibility version.
* @see Version#minimumIndexCompatibilityVersion() * @see Version#minimumIndexCompatibilityVersion()
* @throws IllegalStateException if any index is incompatible with the given version * @throws IllegalStateException if any index is incompatible with the given version

View File

@ -108,9 +108,9 @@ public class MetadataIndexUpgradeService {
} }
/** /**
* Elasticsearch v6.0 no longer supports indices created pre v5.0. All indices * The OpenSearch predecessor, Elasticsearch, v6.0 no longer supports indices created pre v5.0. All indices
* that were created before Elasticsearch v5.0 should be re-indexed in Elasticsearch 5.x * that were created before Elasticsearch v5.0 should be re-indexed in Elasticsearch 5.x
* before they can be opened by this version of elasticsearch. * before they can be opened by this version of opensearch.
*/ */
private void checkSupportedVersion(IndexMetadata indexMetadata, Version minimumIndexCompatibilityVersion) { private void checkSupportedVersion(IndexMetadata indexMetadata, Version minimumIndexCompatibilityVersion) {
if (indexMetadata.getState() == IndexMetadata.State.OPEN && isSupportedVersion(indexMetadata, if (indexMetadata.getState() == IndexMetadata.State.OPEN && isSupportedVersion(indexMetadata,
@ -124,7 +124,7 @@ public class MetadataIndexUpgradeService {
} }
/* /*
* Returns true if this index can be supported by the current version of elasticsearch * Returns true if this index can be supported by the current version of opensearch
*/ */
private static boolean isSupportedVersion(IndexMetadata indexMetadata, Version minimumIndexCompatibilityVersion) { private static boolean isSupportedVersion(IndexMetadata indexMetadata, Version minimumIndexCompatibilityVersion) {
return indexMetadata.getCreationVersion().onOrAfter(minimumIndexCompatibilityVersion); return indexMetadata.getCreationVersion().onOrAfter(minimumIndexCompatibilityVersion);

View File

@ -115,7 +115,7 @@ public class DiscoveryNode implements Writeable, ToXContentFragment {
* Creates a new {@link DiscoveryNode} * Creates a new {@link DiscoveryNode}
* <p> * <p>
* <b>Note:</b> if the version of the node is unknown {@link Version#minimumCompatibilityVersion()} should be used for the current * <b>Note:</b> if the version of the node is unknown {@link Version#minimumCompatibilityVersion()} should be used for the current
* version. it corresponds to the minimum version this elasticsearch version can communicate with. If a higher version is used * version. it corresponds to the minimum version this opensearch version can communicate with. If a higher version is used
* the node might not be able to communicate with the remote node. After initial handshakes node versions will be discovered * the node might not be able to communicate with the remote node. After initial handshakes node versions will be discovered
* and updated. * and updated.
* </p> * </p>
@ -132,7 +132,7 @@ public class DiscoveryNode implements Writeable, ToXContentFragment {
* Creates a new {@link DiscoveryNode} * Creates a new {@link DiscoveryNode}
* <p> * <p>
* <b>Note:</b> if the version of the node is unknown {@link Version#minimumCompatibilityVersion()} should be used for the current * <b>Note:</b> if the version of the node is unknown {@link Version#minimumCompatibilityVersion()} should be used for the current
* version. it corresponds to the minimum version this elasticsearch version can communicate with. If a higher version is used * version. it corresponds to the minimum version this opensearch version can communicate with. If a higher version is used
* the node might not be able to communicate with the remote node. After initial handshakes node versions will be discovered * the node might not be able to communicate with the remote node. After initial handshakes node versions will be discovered
* and updated. * and updated.
* </p> * </p>
@ -152,7 +152,7 @@ public class DiscoveryNode implements Writeable, ToXContentFragment {
* Creates a new {@link DiscoveryNode} * Creates a new {@link DiscoveryNode}
* <p> * <p>
* <b>Note:</b> if the version of the node is unknown {@link Version#minimumCompatibilityVersion()} should be used for the current * <b>Note:</b> if the version of the node is unknown {@link Version#minimumCompatibilityVersion()} should be used for the current
* version. it corresponds to the minimum version this elasticsearch version can communicate with. If a higher version is used * version. it corresponds to the minimum version this opensearch version can communicate with. If a higher version is used
* the node might not be able to communicate with the remote node. After initial handshakes node versions will be discovered * the node might not be able to communicate with the remote node. After initial handshakes node versions will be discovered
* and updated. * and updated.
* </p> * </p>
@ -174,7 +174,7 @@ public class DiscoveryNode implements Writeable, ToXContentFragment {
* Creates a new {@link DiscoveryNode}. * Creates a new {@link DiscoveryNode}.
* <p> * <p>
* <b>Note:</b> if the version of the node is unknown {@link Version#minimumCompatibilityVersion()} should be used for the current * <b>Note:</b> if the version of the node is unknown {@link Version#minimumCompatibilityVersion()} should be used for the current
* version. it corresponds to the minimum version this elasticsearch version can communicate with. If a higher version is used * version. it corresponds to the minimum version this opensearch version can communicate with. If a higher version is used
* the node might not be able to communicate with the remote node. After initial handshakes node versions will be discovered * the node might not be able to communicate with the remote node. After initial handshakes node versions will be discovered
* and updated. * and updated.
* </p> * </p>

View File

@ -324,8 +324,8 @@ public class RoutingNodes implements Iterable<RoutingNode> {
* Returns one active replica shard for the given shard id or <code>null</code> if * Returns one active replica shard for the given shard id or <code>null</code> if
* no active replica is found. * no active replica is found.
* *
* Since replicas could possibly be on nodes with a older version of ES than * Since replicas could possibly be on nodes with a older version of OpenSearch than
* the primary is, this will return replicas on the highest version of ES. * the primary is, this will return replicas on the highest version of OpenSearch.
* *
*/ */
public ShardRouting activeReplicaWithHighestVersion(ShardId shardId) { public ShardRouting activeReplicaWithHighestVersion(ShardId shardId) {

View File

@ -44,7 +44,7 @@ import org.opensearch.common.settings.Settings;
import org.opensearch.common.unit.TimeValue; import org.opensearch.common.unit.TimeValue;
import org.opensearch.common.util.concurrent.OpenSearchExecutors; import org.opensearch.common.util.concurrent.OpenSearchExecutors;
import org.opensearch.common.util.concurrent.OpenSearchRejectedExecutionException; import org.opensearch.common.util.concurrent.OpenSearchRejectedExecutionException;
import org.opensearch.common.util.concurrent.PrioritizedEsThreadPoolExecutor; import org.opensearch.common.util.concurrent.PrioritizedOpenSearchThreadPoolExecutor;
import org.opensearch.common.util.concurrent.ThreadContext; import org.opensearch.common.util.concurrent.ThreadContext;
import org.opensearch.threadpool.Scheduler; import org.opensearch.threadpool.Scheduler;
import org.opensearch.threadpool.ThreadPool; import org.opensearch.threadpool.ThreadPool;
@ -79,7 +79,7 @@ public class ClusterApplierService extends AbstractLifecycleComponent implements
private volatile TimeValue slowTaskLoggingThreshold; private volatile TimeValue slowTaskLoggingThreshold;
private volatile PrioritizedEsThreadPoolExecutor threadPoolExecutor; private volatile PrioritizedOpenSearchThreadPoolExecutor threadPoolExecutor;
/** /**
* Those 3 state listeners are changing infrequently - CopyOnWriteArrayList is just fine * Those 3 state listeners are changing infrequently - CopyOnWriteArrayList is just fine
@ -133,7 +133,7 @@ public class ClusterApplierService extends AbstractLifecycleComponent implements
threadPoolExecutor = createThreadPoolExecutor(); threadPoolExecutor = createThreadPoolExecutor();
} }
protected PrioritizedEsThreadPoolExecutor createThreadPoolExecutor() { protected PrioritizedOpenSearchThreadPoolExecutor createThreadPoolExecutor() {
return OpenSearchExecutors.newSinglePrioritizing( return OpenSearchExecutors.newSinglePrioritizing(
nodeName + "/" + CLUSTER_UPDATE_THREAD_NAME, nodeName + "/" + CLUSTER_UPDATE_THREAD_NAME,
daemonThreadFactory(nodeName, CLUSTER_UPDATE_THREAD_NAME), daemonThreadFactory(nodeName, CLUSTER_UPDATE_THREAD_NAME),

View File

@ -51,7 +51,7 @@ import org.opensearch.common.util.concurrent.CountDown;
import org.opensearch.common.util.concurrent.OpenSearchExecutors; import org.opensearch.common.util.concurrent.OpenSearchExecutors;
import org.opensearch.common.util.concurrent.OpenSearchRejectedExecutionException; import org.opensearch.common.util.concurrent.OpenSearchRejectedExecutionException;
import org.opensearch.common.util.concurrent.FutureUtils; import org.opensearch.common.util.concurrent.FutureUtils;
import org.opensearch.common.util.concurrent.PrioritizedEsThreadPoolExecutor; import org.opensearch.common.util.concurrent.PrioritizedOpenSearchThreadPoolExecutor;
import org.opensearch.common.util.concurrent.ThreadContext; import org.opensearch.common.util.concurrent.ThreadContext;
import org.opensearch.discovery.Discovery; import org.opensearch.discovery.Discovery;
import org.opensearch.node.Node; import org.opensearch.node.Node;
@ -89,7 +89,7 @@ public class MasterService extends AbstractLifecycleComponent {
protected final ThreadPool threadPool; protected final ThreadPool threadPool;
private volatile PrioritizedEsThreadPoolExecutor threadPoolExecutor; private volatile PrioritizedOpenSearchThreadPoolExecutor threadPoolExecutor;
private volatile Batcher taskBatcher; private volatile Batcher taskBatcher;
public MasterService(Settings settings, ClusterSettings clusterSettings, ThreadPool threadPool) { public MasterService(Settings settings, ClusterSettings clusterSettings, ThreadPool threadPool) {
@ -121,7 +121,7 @@ public class MasterService extends AbstractLifecycleComponent {
taskBatcher = new Batcher(logger, threadPoolExecutor); taskBatcher = new Batcher(logger, threadPoolExecutor);
} }
protected PrioritizedEsThreadPoolExecutor createThreadPoolExecutor() { protected PrioritizedOpenSearchThreadPoolExecutor createThreadPoolExecutor() {
return OpenSearchExecutors.newSinglePrioritizing( return OpenSearchExecutors.newSinglePrioritizing(
nodeName + "/" + MASTER_UPDATE_THREAD_NAME, nodeName + "/" + MASTER_UPDATE_THREAD_NAME,
daemonThreadFactory(nodeName, MASTER_UPDATE_THREAD_NAME), daemonThreadFactory(nodeName, MASTER_UPDATE_THREAD_NAME),
@ -132,7 +132,7 @@ public class MasterService extends AbstractLifecycleComponent {
@SuppressWarnings("unchecked") @SuppressWarnings("unchecked")
class Batcher extends TaskBatcher { class Batcher extends TaskBatcher {
Batcher(Logger logger, PrioritizedEsThreadPoolExecutor threadExecutor) { Batcher(Logger logger, PrioritizedOpenSearchThreadPoolExecutor threadExecutor) {
super(logger, threadExecutor); super(logger, threadExecutor);
} }

View File

@ -24,7 +24,7 @@ import org.opensearch.common.Nullable;
import org.opensearch.common.Priority; import org.opensearch.common.Priority;
import org.opensearch.common.unit.TimeValue; import org.opensearch.common.unit.TimeValue;
import org.opensearch.common.util.concurrent.OpenSearchRejectedExecutionException; import org.opensearch.common.util.concurrent.OpenSearchRejectedExecutionException;
import org.opensearch.common.util.concurrent.PrioritizedEsThreadPoolExecutor; import org.opensearch.common.util.concurrent.PrioritizedOpenSearchThreadPoolExecutor;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Collections; import java.util.Collections;
@ -38,17 +38,17 @@ import java.util.function.Function;
import java.util.stream.Collectors; import java.util.stream.Collectors;
/** /**
* Batching support for {@link PrioritizedEsThreadPoolExecutor} * Batching support for {@link PrioritizedOpenSearchThreadPoolExecutor}
* Tasks that share the same batching key are batched (see {@link BatchedTask#batchingKey}) * Tasks that share the same batching key are batched (see {@link BatchedTask#batchingKey})
*/ */
public abstract class TaskBatcher { public abstract class TaskBatcher {
private final Logger logger; private final Logger logger;
private final PrioritizedEsThreadPoolExecutor threadExecutor; private final PrioritizedOpenSearchThreadPoolExecutor threadExecutor;
// package visible for tests // package visible for tests
final Map<Object, LinkedHashSet<BatchedTask>> tasksPerBatchingKey = new HashMap<>(); final Map<Object, LinkedHashSet<BatchedTask>> tasksPerBatchingKey = new HashMap<>();
public TaskBatcher(Logger logger, PrioritizedEsThreadPoolExecutor threadExecutor) { public TaskBatcher(Logger logger, PrioritizedOpenSearchThreadPoolExecutor threadExecutor) {
this.logger = logger; this.logger = logger;
this.threadExecutor = threadExecutor; this.threadExecutor = threadExecutor;
} }

View File

@ -578,7 +578,7 @@ public class GeoUtils {
} }
/** /**
* Checks that the precision is within range supported by elasticsearch - between 1 and 12 * Checks that the precision is within range supported by opensearch - between 1 and 12
* *
* Returns the precision value if it is in the range and throws an IllegalArgumentException if it * Returns the precision value if it is in the range and throws an IllegalArgumentException if it
* is outside the range. * is outside the range.

View File

@ -729,7 +729,7 @@ class InjectorImpl implements Injector, Lookups {
<T> Provider<T> getProviderOrThrow(final Key<T> key, Errors errors) throws ErrorsException { <T> Provider<T> getProviderOrThrow(final Key<T> key, Errors errors) throws ErrorsException {
final InternalFactory<? extends T> factory = getInternalFactory(key, errors); final InternalFactory<? extends T> factory = getInternalFactory(key, errors);
// ES: optimize for a common case of read only instance getting from the parent... // OpenSearch: optimize for a common case of read only instance getting from the parent...
if (factory instanceof InternalFactory.Instance) { if (factory instanceof InternalFactory.Instance) {
return new Provider<T>() { return new Provider<T>() {
@Override @Override

View File

@ -41,7 +41,7 @@ public class ModulesBuilder implements Iterable<Module> {
public Injector createInjector() { public Injector createInjector() {
Injector injector = Guice.createInjector(modules); Injector injector = Guice.createInjector(modules);
((InjectorImpl) injector).clearCache(); ((InjectorImpl) injector).clearCache();
// in ES, we always create all instances as if they are eager singletons // in OpenSearch, we always create all instances as if they are eager singletons
// this allows for considerable memory savings (no need to store construction info) as well as cycles // this allows for considerable memory savings (no need to store construction info) as well as cycles
((InjectorImpl) injector).readOnlyAllSingletons(); ((InjectorImpl) injector).readOnlyAllSingletons();
return injector; return injector;

View File

@ -51,7 +51,7 @@ import java.util.stream.Stream;
* <li>level - INFO, WARN etc</li> * <li>level - INFO, WARN etc</li>
* <li>component - logger name, most of the times class name</li> * <li>component - logger name, most of the times class name</li>
* <li>cluster.name - taken from sys:es.logs.cluster_name system property because it is always set</li> * <li>cluster.name - taken from sys:es.logs.cluster_name system property because it is always set</li>
* <li>node.name - taken from NodeNamePatternConverter, as it can be set in runtime as hostname when not set in elasticsearch.yml</li> * <li>node.name - taken from NodeNamePatternConverter, as it can be set in runtime as hostname when not set in opensearch.yml</li>
* <li>node_and_cluster_id - in json as node.id and cluster.uuid - taken from NodeAndClusterIdConverter and present * <li>node_and_cluster_id - in json as node.id and cluster.uuid - taken from NodeAndClusterIdConverter and present
* once clusterStateUpdate is first received</li> * once clusterStateUpdate is first received</li>
* <li>message - a json escaped message. Multiline messages will be converted to single line with new line explicitly * <li>message - a json escaped message. Multiline messages will be converted to single line with new line explicitly

View File

@ -138,7 +138,7 @@ public class LogConfigurator {
/** /**
* Sets the node name. This is called before logging is configured if the * Sets the node name. This is called before logging is configured if the
* node name is set in elasticsearch.yml. Otherwise it is called as soon * node name is set in opensearch.yml. Otherwise it is called as soon
* as the node id is available. * as the node id is available.
*/ */
public static void setNodeName(String nodeName) { public static void setNodeName(String nodeName) {

View File

@ -32,7 +32,7 @@ import org.apache.lucene.util.SetOnce;
* Converts {@code %node_name} in log4j patterns into the current node name. * Converts {@code %node_name} in log4j patterns into the current node name.
* We can't use a system property for this because the node name system * We can't use a system property for this because the node name system
* property is only set if the node name is explicitly defined in * property is only set if the node name is explicitly defined in
* elasticsearch.yml. * opensearch.yml.
*/ */
@Plugin(category = PatternConverter.CATEGORY, name = "NodeNamePatternConverter") @Plugin(category = PatternConverter.CATEGORY, name = "NodeNamePatternConverter")
@ConverterKeys({"node_name"}) @ConverterKeys({"node_name"})

View File

@ -67,7 +67,7 @@ public final class OpenSearchDirectoryReader extends FilterDirectoryReader {
* expose the given shard Id. * expose the given shard Id.
* *
* @param reader the reader to wrap * @param reader the reader to wrap
* @param shardId the shard ID to expose via the elasticsearch internal reader wrappers. * @param shardId the shard ID to expose via the opensearch internal reader wrappers.
*/ */
public static OpenSearchDirectoryReader wrap(DirectoryReader reader, ShardId shardId) throws IOException { public static OpenSearchDirectoryReader wrap(DirectoryReader reader, ShardId shardId) throws IOException {
return new OpenSearchDirectoryReader(reader, new SubReaderWrapper(shardId), shardId); return new OpenSearchDirectoryReader(reader, new SubReaderWrapper(shardId), shardId);

View File

@ -51,7 +51,7 @@ public abstract class BaseKeyStoreCommand extends KeyStoreAwareCommand {
throw new UserException(ExitCodes.DATA_ERROR, "Elasticsearch keystore not found at [" + throw new UserException(ExitCodes.DATA_ERROR, "Elasticsearch keystore not found at [" +
KeyStoreWrapper.keystorePath(env.configFile()) + "]. Use 'create' command to create one."); KeyStoreWrapper.keystorePath(env.configFile()) + "]. Use 'create' command to create one.");
} else if (options.has(forceOption) == false) { } else if (options.has(forceOption) == false) {
if (terminal.promptYesNo("The elasticsearch keystore does not exist. Do you want to create it?", false) == false) { if (terminal.promptYesNo("The opensearch keystore does not exist. Do you want to create it?", false) == false) {
terminal.println("Exiting without creating keystore."); terminal.println("Exiting without creating keystore.");
return; return;
} }

View File

@ -111,7 +111,7 @@ public class KeyStoreWrapper implements SecureSettings {
"~!@#$%^&*-_=+?").toCharArray(); "~!@#$%^&*-_=+?").toCharArray();
/** The name of the keystore file to read and write. */ /** The name of the keystore file to read and write. */
private static final String KEYSTORE_FILENAME = "elasticsearch.keystore"; private static final String KEYSTORE_FILENAME = "opensearch.keystore";
/** The version of the metadata written before the keystore data. */ /** The version of the metadata written before the keystore data. */
static final int FORMAT_VERSION = 4; static final int FORMAT_VERSION = 4;
@ -517,7 +517,7 @@ public class KeyStoreWrapper implements SecureSettings {
} catch (final AccessDeniedException e) { } catch (final AccessDeniedException e) {
final String message = String.format( final String message = String.format(
Locale.ROOT, Locale.ROOT,
"unable to create temporary keystore at [%s], write permissions required for [%s] or run [elasticsearch-keystore upgrade]", "unable to create temporary keystore at [%s], write permissions required for [%s] or run [opensearch-keystore upgrade]",
configDir.resolve(tmpFile), configDir.resolve(tmpFile),
configDir); configDir);
throw new UserException(ExitCodes.CONFIG, message, e); throw new UserException(ExitCodes.CONFIG, message, e);

View File

@ -86,7 +86,7 @@ public abstract class SecureSetting<T> extends Setting<T> {
if (secureSettings == null || secureSettings.getSettingNames().contains(getKey()) == false) { if (secureSettings == null || secureSettings.getSettingNames().contains(getKey()) == false) {
if (super.exists(settings)) { if (super.exists(settings)) {
throw new IllegalArgumentException("Setting [" + getKey() + "] is a secure setting" + throw new IllegalArgumentException("Setting [" + getKey() + "] is a secure setting" +
" and must be stored inside the Elasticsearch keystore, but was found inside elasticsearch.yml"); " and must be stored inside the Elasticsearch keystore, but was found inside opensearch.yml");
} }
return getFallback(settings); return getFallback(settings);
} }

View File

@ -65,7 +65,7 @@ import java.util.stream.Stream;
/** /**
* A setting. Encapsulates typical stuff like default value, parsing, and scope. * A setting. Encapsulates typical stuff like default value, parsing, and scope.
* Some (SettingsProperty.Dynamic) can by modified at run time using the API. * Some (SettingsProperty.Dynamic) can by modified at run time using the API.
* All settings inside elasticsearch or in any of the plugins should use this type-safe and generic settings infrastructure * All settings inside opensearch or in any of the plugins should use this type-safe and generic settings infrastructure
* together with {@link AbstractScopedSettings}. This class contains several utility methods that makes it straight forward * together with {@link AbstractScopedSettings}. This class contains several utility methods that makes it straight forward
* to add settings for the majority of the cases. For instance a simple boolean settings can be defined like this: * to add settings for the majority of the cases. For instance a simple boolean settings can be defined like this:
* <pre>{@code * <pre>{@code
@ -518,7 +518,7 @@ public class Setting<T> implements ToXContentObject {
SecureSettings secureSettings = settings.getSecureSettings(); SecureSettings secureSettings = settings.getSecureSettings();
if (secureSettings != null && secureSettings.getSettingNames().contains(getKey())) { if (secureSettings != null && secureSettings.getSettingNames().contains(getKey())) {
throw new IllegalArgumentException("Setting [" + getKey() + "] is a non-secure setting" + throw new IllegalArgumentException("Setting [" + getKey() + "] is a non-secure setting" +
" and must be stored inside elasticsearch.yml, but was found inside the Elasticsearch keystore"); " and must be stored inside opensearch.yml, but was found inside the Elasticsearch keystore");
} }
return settings.get(getKey(), defaultValue.apply(settings)); return settings.get(getKey(), defaultValue.apply(settings));
} }

View File

@ -108,7 +108,7 @@ public class SettingsModule implements Module {
builder.append(System.lineSeparator()); builder.append(System.lineSeparator());
int count = 0; int count = 0;
for (String word : ("Since elasticsearch 5.x index level settings can NOT be set on the nodes configuration like " + for (String word : ("Since elasticsearch 5.x index level settings can NOT be set on the nodes configuration like " +
"the elasticsearch.yaml, in system properties or command line arguments." + "the opensearch.yaml, in system properties or command line arguments." +
"In order to upgrade all indices the settings must be updated via the /${index}/_settings API. " + "In order to upgrade all indices the settings must be updated via the /${index}/_settings API. " +
"Unless all settings are dynamic all indices must be closed in order to apply the upgrade" + "Unless all settings are dynamic all indices must be closed in order to apply the upgrade" +
"Indices created in the future should use index templates to set default values." "Indices created in the future should use index templates to set default values."

View File

@ -166,7 +166,7 @@ public abstract class BaseFuture<V> implements Future<V> {
// call stack, so we rethrow it. // call stack, so we rethrow it.
// we want to notify the listeners we have with errors as well, as it breaks // we want to notify the listeners we have with errors as well, as it breaks
// how we work in ES in terms of using assertions // how we work in OpenSearch in terms of using assertions
// if (throwable instanceof Error) { // if (throwable instanceof Error) {
// throw (Error) throwable; // throw (Error) throwable;
// } // }

View File

@ -96,29 +96,29 @@ public class OpenSearchExecutors {
return NODE_PROCESSORS_SETTING.get(settings); return NODE_PROCESSORS_SETTING.get(settings);
} }
public static PrioritizedEsThreadPoolExecutor newSinglePrioritizing(String name, ThreadFactory threadFactory, public static PrioritizedOpenSearchThreadPoolExecutor newSinglePrioritizing(String name, ThreadFactory threadFactory,
ThreadContext contextHolder, ScheduledExecutorService timer) { ThreadContext contextHolder, ScheduledExecutorService timer) {
return new PrioritizedEsThreadPoolExecutor(name, 1, 1, 0L, TimeUnit.MILLISECONDS, threadFactory, contextHolder, timer); return new PrioritizedOpenSearchThreadPoolExecutor(name, 1, 1, 0L, TimeUnit.MILLISECONDS, threadFactory, contextHolder, timer);
} }
public static EsThreadPoolExecutor newScaling(String name, int min, int max, long keepAliveTime, TimeUnit unit, public static OpenSearchThreadPoolExecutor newScaling(String name, int min, int max, long keepAliveTime, TimeUnit unit,
ThreadFactory threadFactory, ThreadContext contextHolder) { ThreadFactory threadFactory, ThreadContext contextHolder) {
ExecutorScalingQueue<Runnable> queue = new ExecutorScalingQueue<>(); ExecutorScalingQueue<Runnable> queue = new ExecutorScalingQueue<>();
EsThreadPoolExecutor executor = OpenSearchThreadPoolExecutor executor =
new EsThreadPoolExecutor(name, min, max, keepAliveTime, unit, queue, threadFactory, new ForceQueuePolicy(), contextHolder); new OpenSearchThreadPoolExecutor(name, min, max, keepAliveTime, unit, queue, threadFactory, new ForceQueuePolicy(), contextHolder);
queue.executor = executor; queue.executor = executor;
return executor; return executor;
} }
public static EsThreadPoolExecutor newFixed(String name, int size, int queueCapacity, public static OpenSearchThreadPoolExecutor newFixed(String name, int size, int queueCapacity,
ThreadFactory threadFactory, ThreadContext contextHolder) { ThreadFactory threadFactory, ThreadContext contextHolder) {
BlockingQueue<Runnable> queue; BlockingQueue<Runnable> queue;
if (queueCapacity < 0) { if (queueCapacity < 0) {
queue = ConcurrentCollections.newBlockingQueue(); queue = ConcurrentCollections.newBlockingQueue();
} else { } else {
queue = new SizeBlockingQueue<>(ConcurrentCollections.<Runnable>newBlockingQueue(), queueCapacity); queue = new SizeBlockingQueue<>(ConcurrentCollections.<Runnable>newBlockingQueue(), queueCapacity);
} }
return new EsThreadPoolExecutor(name, size, size, 0, TimeUnit.MILLISECONDS, return new OpenSearchThreadPoolExecutor(name, size, size, 0, TimeUnit.MILLISECONDS,
queue, threadFactory, new OpenSearchAbortPolicy(), contextHolder); queue, threadFactory, new OpenSearchAbortPolicy(), contextHolder);
} }
@ -131,16 +131,16 @@ public class OpenSearchExecutors {
* @param maxQueueSize maximum queue size that the queue can be adjusted to * @param maxQueueSize maximum queue size that the queue can be adjusted to
* @param frameSize number of tasks during which stats are collected before adjusting queue size * @param frameSize number of tasks during which stats are collected before adjusting queue size
*/ */
public static EsThreadPoolExecutor newAutoQueueFixed(String name, int size, int initialQueueCapacity, int minQueueSize, public static OpenSearchThreadPoolExecutor newAutoQueueFixed(String name, int size, int initialQueueCapacity, int minQueueSize,
int maxQueueSize, int frameSize, TimeValue targetedResponseTime, int maxQueueSize, int frameSize, TimeValue targetedResponseTime,
ThreadFactory threadFactory, ThreadContext contextHolder) { ThreadFactory threadFactory, ThreadContext contextHolder) {
if (initialQueueCapacity <= 0) { if (initialQueueCapacity <= 0) {
throw new IllegalArgumentException("initial queue capacity for [" + name + "] executor must be positive, got: " + throw new IllegalArgumentException("initial queue capacity for [" + name + "] executor must be positive, got: " +
initialQueueCapacity); initialQueueCapacity);
} }
ResizableBlockingQueue<Runnable> queue = ResizableBlockingQueue<Runnable> queue =
new ResizableBlockingQueue<>(ConcurrentCollections.<Runnable>newBlockingQueue(), initialQueueCapacity); new ResizableBlockingQueue<>(ConcurrentCollections.<Runnable>newBlockingQueue(), initialQueueCapacity);
return new QueueResizingEsThreadPoolExecutor(name, size, size, 0, TimeUnit.MILLISECONDS, return new QueueResizingOpenSearchThreadPoolExecutor(name, size, size, 0, TimeUnit.MILLISECONDS,
queue, minQueueSize, maxQueueSize, TimedRunnable::new, frameSize, targetedResponseTime, threadFactory, queue, minQueueSize, maxQueueSize, TimedRunnable::new, frameSize, targetedResponseTime, threadFactory,
new OpenSearchAbortPolicy(), contextHolder); new OpenSearchAbortPolicy(), contextHolder);
} }
@ -249,7 +249,7 @@ public class OpenSearchExecutors {
public static String threadName(final String nodeName, final String namePrefix) { public static String threadName(final String nodeName, final String namePrefix) {
// TODO missing node names should only be allowed in tests // TODO missing node names should only be allowed in tests
return "elasticsearch" + (nodeName.isEmpty() ? "" : "[") + nodeName + (nodeName.isEmpty() ? "" : "]") + "[" + namePrefix + "]"; return "opensearch" + (nodeName.isEmpty() ? "" : "[") + nodeName + (nodeName.isEmpty() ? "" : "]") + "[" + namePrefix + "]";
} }
public static ThreadFactory daemonThreadFactory(Settings settings, String namePrefix) { public static ThreadFactory daemonThreadFactory(Settings settings, String namePrefix) {

View File

@ -30,7 +30,7 @@ import java.util.stream.Stream;
/** /**
* An extension to thread pool executor, allowing (in the future) to add specific additional stats to it. * An extension to thread pool executor, allowing (in the future) to add specific additional stats to it.
*/ */
public class EsThreadPoolExecutor extends ThreadPoolExecutor { public class OpenSearchThreadPoolExecutor extends ThreadPoolExecutor {
private final ThreadContext contextHolder; private final ThreadContext contextHolder;
private volatile ShutdownListener listener; private volatile ShutdownListener listener;
@ -45,15 +45,15 @@ public class EsThreadPoolExecutor extends ThreadPoolExecutor {
return name; return name;
} }
EsThreadPoolExecutor(String name, int corePoolSize, int maximumPoolSize, long keepAliveTime, TimeUnit unit, OpenSearchThreadPoolExecutor(String name, int corePoolSize, int maximumPoolSize, long keepAliveTime, TimeUnit unit,
BlockingQueue<Runnable> workQueue, ThreadFactory threadFactory, ThreadContext contextHolder) { BlockingQueue<Runnable> workQueue, ThreadFactory threadFactory, ThreadContext contextHolder) {
this(name, corePoolSize, maximumPoolSize, keepAliveTime, unit, workQueue, threadFactory, new OpenSearchAbortPolicy(), contextHolder); this(name, corePoolSize, maximumPoolSize, keepAliveTime, unit, workQueue, threadFactory, new OpenSearchAbortPolicy(), contextHolder);
} }
@SuppressForbidden(reason = "properly rethrowing errors, see OpenSearchExecutors.rethrowErrors") @SuppressForbidden(reason = "properly rethrowing errors, see OpenSearchExecutors.rethrowErrors")
EsThreadPoolExecutor(String name, int corePoolSize, int maximumPoolSize, long keepAliveTime, TimeUnit unit, OpenSearchThreadPoolExecutor(String name, int corePoolSize, int maximumPoolSize, long keepAliveTime, TimeUnit unit,
BlockingQueue<Runnable> workQueue, ThreadFactory threadFactory, XRejectedExecutionHandler handler, BlockingQueue<Runnable> workQueue, ThreadFactory threadFactory, XRejectedExecutionHandler handler,
ThreadContext contextHolder) { ThreadContext contextHolder) {
super(corePoolSize, maximumPoolSize, keepAliveTime, unit, workQueue, threadFactory, handler); super(corePoolSize, maximumPoolSize, keepAliveTime, unit, workQueue, threadFactory, handler);
this.name = name; this.name = name;
this.contextHolder = contextHolder; this.contextHolder = contextHolder;

View File

@ -41,15 +41,15 @@ import java.util.concurrent.atomic.AtomicLong;
* <p> * <p>
* Note, if two tasks have the same priority, the first to arrive will be executed first (FIFO style). * Note, if two tasks have the same priority, the first to arrive will be executed first (FIFO style).
*/ */
public class PrioritizedEsThreadPoolExecutor extends EsThreadPoolExecutor { public class PrioritizedOpenSearchThreadPoolExecutor extends OpenSearchThreadPoolExecutor {
private static final TimeValue NO_WAIT_TIME_VALUE = TimeValue.timeValueMillis(0); private static final TimeValue NO_WAIT_TIME_VALUE = TimeValue.timeValueMillis(0);
private final AtomicLong insertionOrder = new AtomicLong(); private final AtomicLong insertionOrder = new AtomicLong();
private final Queue<Runnable> current = ConcurrentCollections.newQueue(); private final Queue<Runnable> current = ConcurrentCollections.newQueue();
private final ScheduledExecutorService timer; private final ScheduledExecutorService timer;
public PrioritizedEsThreadPoolExecutor(String name, int corePoolSize, int maximumPoolSize, long keepAliveTime, TimeUnit unit, public PrioritizedOpenSearchThreadPoolExecutor(String name, int corePoolSize, int maximumPoolSize, long keepAliveTime, TimeUnit unit,
ThreadFactory threadFactory, ThreadContext contextHolder, ScheduledExecutorService timer) { ThreadFactory threadFactory, ThreadContext contextHolder, ScheduledExecutorService timer) {
super(name, corePoolSize, maximumPoolSize, keepAliveTime, unit, new PriorityBlockingQueue<>(), threadFactory, contextHolder); super(name, corePoolSize, maximumPoolSize, keepAliveTime, unit, new PriorityBlockingQueue<>(), threadFactory, contextHolder);
this.timer = timer; this.timer = timer;
} }

View File

@ -36,12 +36,12 @@ import java.util.function.Function;
* An extension to thread pool executor, which automatically adjusts the queue size of the * An extension to thread pool executor, which automatically adjusts the queue size of the
* {@code ResizableBlockingQueue} according to Little's Law. * {@code ResizableBlockingQueue} according to Little's Law.
*/ */
public final class QueueResizingEsThreadPoolExecutor extends EsThreadPoolExecutor { public final class QueueResizingOpenSearchThreadPoolExecutor extends OpenSearchThreadPoolExecutor {
// This is a random starting point alpha. TODO: revisit this with actual testing and/or make it configurable // This is a random starting point alpha. TODO: revisit this with actual testing and/or make it configurable
public static double EWMA_ALPHA = 0.3; public static double EWMA_ALPHA = 0.3;
private static final Logger logger = LogManager.getLogger(QueueResizingEsThreadPoolExecutor.class); private static final Logger logger = LogManager.getLogger(QueueResizingOpenSearchThreadPoolExecutor.class);
// The amount the queue size is adjusted by for each calcuation // The amount the queue size is adjusted by for each calcuation
private static final int QUEUE_ADJUSTMENT_AMOUNT = 50; private static final int QUEUE_ADJUSTMENT_AMOUNT = 50;
@ -58,11 +58,11 @@ public final class QueueResizingEsThreadPoolExecutor extends EsThreadPoolExecuto
private long startNs; private long startNs;
QueueResizingEsThreadPoolExecutor(String name, int corePoolSize, int maximumPoolSize, long keepAliveTime, TimeUnit unit, QueueResizingOpenSearchThreadPoolExecutor(String name, int corePoolSize, int maximumPoolSize, long keepAliveTime, TimeUnit unit,
ResizableBlockingQueue<Runnable> workQueue, int minQueueSize, int maxQueueSize, ResizableBlockingQueue<Runnable> workQueue, int minQueueSize, int maxQueueSize,
Function<Runnable, WrappedRunnable> runnableWrapper, final int tasksPerFrame, Function<Runnable, WrappedRunnable> runnableWrapper, final int tasksPerFrame,
TimeValue targetedResponseTime, ThreadFactory threadFactory, XRejectedExecutionHandler handler, TimeValue targetedResponseTime, ThreadFactory threadFactory, XRejectedExecutionHandler handler,
ThreadContext contextHolder) { ThreadContext contextHolder) {
super(name, corePoolSize, maximumPoolSize, keepAliveTime, unit, super(name, corePoolSize, maximumPoolSize, keepAliveTime, unit,
workQueue, threadFactory, handler, contextHolder); workQueue, threadFactory, handler, contextHolder);
this.runnableWrapper = runnableWrapper; this.runnableWrapper = runnableWrapper;

View File

@ -62,7 +62,7 @@ import static org.opensearch.http.HttpTransportSettings.SETTING_HTTP_MAX_WARNING
* have out of the box support for {@link ThreadContext} and all threads spawned will inherit the {@link ThreadContext} from the thread * have out of the box support for {@link ThreadContext} and all threads spawned will inherit the {@link ThreadContext} from the thread
* that it is forking from.". Network calls will also preserve the senders headers automatically. * that it is forking from.". Network calls will also preserve the senders headers automatically.
* <p> * <p>
* Consumers of ThreadContext usually don't need to interact with adding or stashing contexts. Every elasticsearch thread is managed by * Consumers of ThreadContext usually don't need to interact with adding or stashing contexts. Every opensearch thread is managed by
* a thread pool or executor being responsible for stashing and restoring the threads context. For instance if a network request is * a thread pool or executor being responsible for stashing and restoring the threads context. For instance if a network request is
* received, all headers are deserialized from the network and directly added as the headers of the threads {@link ThreadContext} * received, all headers are deserialized from the network and directly added as the headers of the threads {@link ThreadContext}
* (see {@link #readHeaders(StreamInput)}. In order to not modify the context that is currently active on this thread the network code * (see {@link #readHeaders(StreamInput)}. In order to not modify the context that is currently active on this thread the network code

View File

@ -72,7 +72,7 @@ public class XContentOpenSearchExtension implements XContentBuilderExtension {
public Map<Class<?>, XContentBuilder.Writer> getXContentWriters() { public Map<Class<?>, XContentBuilder.Writer> getXContentWriters() {
Map<Class<?>, XContentBuilder.Writer> writers = new HashMap<>(); Map<Class<?>, XContentBuilder.Writer> writers = new HashMap<>();
// Fully-qualified here to reduce ambiguity around our (ES') Version class // Fully-qualified here to reduce ambiguity around our (OpenSearch') Version class
writers.put(org.apache.lucene.util.Version.class, (b, v) -> b.value(Objects.toString(v))); writers.put(org.apache.lucene.util.Version.class, (b, v) -> b.value(Objects.toString(v)));
writers.put(DateTimeZone.class, (b, v) -> b.value(Objects.toString(v))); writers.put(DateTimeZone.class, (b, v) -> b.value(Objects.toString(v)));
writers.put(CachedDateTimeZone.class, (b, v) -> b.value(Objects.toString(v))); writers.put(CachedDateTimeZone.class, (b, v) -> b.value(Objects.toString(v)));

View File

@ -43,7 +43,7 @@ import org.opensearch.common.util.CollectionUtils;
import org.opensearch.common.util.concurrent.AbstractRunnable; import org.opensearch.common.util.concurrent.AbstractRunnable;
import org.opensearch.common.util.concurrent.ConcurrentCollections; import org.opensearch.common.util.concurrent.ConcurrentCollections;
import org.opensearch.common.util.concurrent.OpenSearchExecutors; import org.opensearch.common.util.concurrent.OpenSearchExecutors;
import org.opensearch.common.util.concurrent.EsThreadPoolExecutor; import org.opensearch.common.util.concurrent.OpenSearchThreadPoolExecutor;
import org.opensearch.common.util.concurrent.KeyedLock; import org.opensearch.common.util.concurrent.KeyedLock;
import org.opensearch.core.internal.io.IOUtils; import org.opensearch.core.internal.io.IOUtils;
import org.opensearch.discovery.SeedHostsProvider; import org.opensearch.discovery.SeedHostsProvider;
@ -108,7 +108,7 @@ public class UnicastZenPing implements ZenPing {
private final SeedHostsProvider hostsProvider; private final SeedHostsProvider hostsProvider;
protected final EsThreadPoolExecutor unicastZenPingExecutorService; protected final OpenSearchThreadPoolExecutor unicastZenPingExecutorService;
private final TimeValue resolveTimeout; private final TimeValue resolveTimeout;

View File

@ -46,7 +46,7 @@ import org.opensearch.common.collect.Tuple;
import org.opensearch.common.settings.Settings; import org.opensearch.common.settings.Settings;
import org.opensearch.common.util.concurrent.AbstractRunnable; import org.opensearch.common.util.concurrent.AbstractRunnable;
import org.opensearch.common.util.concurrent.OpenSearchExecutors; import org.opensearch.common.util.concurrent.OpenSearchExecutors;
import org.opensearch.common.util.concurrent.EsThreadPoolExecutor; import org.opensearch.common.util.concurrent.OpenSearchThreadPoolExecutor;
import org.opensearch.core.internal.io.IOUtils; import org.opensearch.core.internal.io.IOUtils;
import org.opensearch.discovery.DiscoveryModule; import org.opensearch.discovery.DiscoveryModule;
import org.opensearch.env.NodeMetadata; import org.opensearch.env.NodeMetadata;
@ -334,7 +334,7 @@ public class GatewayMetaState implements Closeable {
static final String THREAD_NAME = "AsyncLucenePersistedState#updateTask"; static final String THREAD_NAME = "AsyncLucenePersistedState#updateTask";
private final EsThreadPoolExecutor threadPoolExecutor; private final OpenSearchThreadPoolExecutor threadPoolExecutor;
private final PersistedState persistedState; private final PersistedState persistedState;
boolean newCurrentTermQueued = false; boolean newCurrentTermQueued = false;

View File

@ -125,7 +125,7 @@ public class TransportNodesListGatewayStartedShards extends
if (request.getCustomDataPath() != null) { if (request.getCustomDataPath() != null) {
customDataPath = request.getCustomDataPath(); customDataPath = request.getCustomDataPath();
} else { } else {
// TODO: Fallback for BWC with older ES versions. Remove once request.getCustomDataPath() always returns non-null // TODO: Fallback for BWC with older OpenSearch versions. Remove once request.getCustomDataPath() always returns non-null
final IndexMetadata metadata = clusterService.state().metadata().index(shardId.getIndex()); final IndexMetadata metadata = clusterService.state().metadata().index(shardId.getIndex());
if (metadata != null) { if (metadata != null) {
customDataPath = new IndexSettings(metadata, settings).customDataPath(); customDataPath = new IndexSettings(metadata, settings).customDataPath();

View File

@ -29,7 +29,7 @@ import org.opensearch.common.unit.ByteSizeUnit;
import org.opensearch.common.unit.ByteSizeValue; import org.opensearch.common.unit.ByteSizeValue;
/** /**
* A shard in elasticsearch is a Lucene index, and a Lucene index is broken * A shard in opensearch is a Lucene index, and a Lucene index is broken
* down into segments. Segments are internal storage elements in the index * down into segments. Segments are internal storage elements in the index
* where the index data is stored, and are immutable up to delete markers. * where the index data is stored, and are immutable up to delete markers.
* Segments are, periodically, merged into larger segments to keep the * Segments are, periodically, merged into larger segments to keep the

View File

@ -48,7 +48,7 @@ import java.util.function.Supplier;
* doing range searches. Therefore the {@code _seq_no} field is stored both * doing range searches. Therefore the {@code _seq_no} field is stored both
* as a numeric doc value and as numeric indexed field. * as a numeric doc value and as numeric indexed field.
* *
* This mapper also manages the primary term field, which has no ES named * This mapper also manages the primary term field, which has no OpenSearch named
* equivalent. The primary term is only used during collision after receiving * equivalent. The primary term is only used during collision after receiving
* identical seq# values for two document copies. The primary term is stored as * identical seq# values for two document copies. The primary term is stored as
* a doc value field without being indexed, since it is only intended for use * a doc value field without being indexed, since it is only intended for use

View File

@ -46,7 +46,7 @@ import org.opensearch.common.lucene.search.TopDocsAndMaxScore;
import org.opensearch.common.xcontent.XContentBuilder; import org.opensearch.common.xcontent.XContentBuilder;
import org.opensearch.common.xcontent.XContentParser; import org.opensearch.common.xcontent.XContentParser;
import org.opensearch.index.mapper.ObjectMapper; import org.opensearch.index.mapper.ObjectMapper;
import org.opensearch.index.search.ESToParentBlockJoinQuery; import org.opensearch.index.search.OpenSearchToParentBlockJoinQuery;
import org.opensearch.index.search.NestedHelper; import org.opensearch.index.search.NestedHelper;
import org.opensearch.search.SearchHit; import org.opensearch.search.SearchHit;
import org.opensearch.search.fetch.subphase.InnerHitsContext; import org.opensearch.search.fetch.subphase.InnerHitsContext;
@ -240,7 +240,7 @@ public class NestedQueryBuilder extends AbstractQueryBuilder<NestedQueryBuilder>
public static String scoreModeAsString(ScoreMode scoreMode) { public static String scoreModeAsString(ScoreMode scoreMode) {
if (scoreMode == ScoreMode.Total) { if (scoreMode == ScoreMode.Total) {
// Lucene uses 'total' but 'sum' is more consistent with other elasticsearch APIs // Lucene uses 'total' but 'sum' is more consistent with other opensearch APIs
return "sum"; return "sum";
} else { } else {
return scoreMode.name().toLowerCase(Locale.ROOT); return scoreMode.name().toLowerCase(Locale.ROOT);
@ -306,7 +306,7 @@ public class NestedQueryBuilder extends AbstractQueryBuilder<NestedQueryBuilder>
innerQuery = Queries.filtered(innerQuery, nestedObjectMapper.nestedTypeFilter()); innerQuery = Queries.filtered(innerQuery, nestedObjectMapper.nestedTypeFilter());
} }
return new ESToParentBlockJoinQuery(innerQuery, parentFilter, scoreMode, return new OpenSearchToParentBlockJoinQuery(innerQuery, parentFilter, scoreMode,
objectMapper == null ? null : objectMapper.fullPath()); objectMapper == null ? null : objectMapper.fullPath());
} }

View File

@ -139,7 +139,7 @@ public class SimpleQueryStringBuilder extends AbstractQueryBuilder<SimpleQuerySt
/** Whether or not the lenient flag has been set or not */ /** Whether or not the lenient flag has been set or not */
private boolean lenientSet = false; private boolean lenientSet = false;
/** Further search settings needed by the ES specific query string parser only. */ /** Further search settings needed by the OpenSearch specific query string parser only. */
private Settings settings = new Settings(); private Settings settings = new Settings();
/** Construct a new simple query with this query string. */ /** Construct a new simple query with this query string. */

View File

@ -233,7 +233,7 @@ public class SpanNearQueryBuilder extends AbstractQueryBuilder<SpanNearQueryBuil
builder.setSlop(slop); builder.setSlop(slop);
/* /*
* Lucene SpanNearQuery throws exceptions for certain use cases like adding gap to a * Lucene SpanNearQuery throws exceptions for certain use cases like adding gap to a
* unordered SpanNearQuery. Should ES have the same checks or wrap those thrown exceptions? * unordered SpanNearQuery. Should OpenSearch have the same checks or wrap those thrown exceptions?
*/ */
if (isGap) { if (isGap) {
int gap = ((SpanGapQueryBuilder) queryBuilder).width(); int gap = ((SpanGapQueryBuilder) queryBuilder).width();

View File

@ -86,8 +86,8 @@ public final class NestedHelper {
.map(BooleanClause::getQuery) .map(BooleanClause::getQuery)
.anyMatch(this::mightMatchNestedDocs); .anyMatch(this::mightMatchNestedDocs);
} }
} else if (query instanceof ESToParentBlockJoinQuery) { } else if (query instanceof OpenSearchToParentBlockJoinQuery) {
return ((ESToParentBlockJoinQuery) query).getPath() != null; return ((OpenSearchToParentBlockJoinQuery) query).getPath() != null;
} else { } else {
return true; return true;
} }

View File

@ -649,7 +649,7 @@ public class ReplicationTracker extends AbstractIndexShardComponent implements L
if (in.getVersion().onOrAfter(Version.V_6_3_0)) { if (in.getVersion().onOrAfter(Version.V_6_3_0)) {
this.tracked = in.readBoolean(); this.tracked = in.readBoolean();
} else { } else {
// Every in-sync shard copy is also tracked (see invariant). This was the case even in earlier ES versions. // Every in-sync shard copy is also tracked (see invariant). This was the case even in earlier OpenSearch versions.
// Non in-sync shard copies might be tracked or not. As this information here is only serialized during relocation hand-off, // Non in-sync shard copies might be tracked or not. As this information here is only serialized during relocation hand-off,
// after which replica recoveries cannot complete anymore (i.e. they cannot move from in-sync == false to in-sync == true), // after which replica recoveries cannot complete anymore (i.e. they cannot move from in-sync == false to in-sync == true),
// we can treat non in-sync replica shard copies as untracked. They will go through a fresh recovery against the new // we can treat non in-sync replica shard copies as untracked. They will go through a fresh recovery against the new

View File

@ -148,7 +148,7 @@ public class TransportNodesListShardStoreMetadata extends TransportNodesAction<T
if (request.getCustomDataPath() != null) { if (request.getCustomDataPath() != null) {
customDataPath = request.getCustomDataPath(); customDataPath = request.getCustomDataPath();
} else { } else {
// TODO: Fallback for BWC with older ES versions. Remove this once request.getCustomDataPath() always returns non-null // TODO: Fallback for BWC with older predecessor (ES) versions. Remove this once request.getCustomDataPath() always returns non-null
if (indexService != null) { if (indexService != null) {
customDataPath = indexService.getIndexSettings().customDataPath(); customDataPath = indexService.getIndexSettings().customDataPath();
} else { } else {

View File

@ -510,7 +510,7 @@ public final class RepositoryData {
builder.field(MIN_VERSION, SnapshotsService.INDEX_GEN_IN_REPO_DATA_VERSION.toString()); builder.field(MIN_VERSION, SnapshotsService.INDEX_GEN_IN_REPO_DATA_VERSION.toString());
builder.field(INDEX_METADATA_IDENTIFIERS, indexMetaDataGenerations.identifiers); builder.field(INDEX_METADATA_IDENTIFIERS, indexMetaDataGenerations.identifiers);
} else if (shouldWriteShardGens) { } else if (shouldWriteShardGens) {
// Add min version field to make it impossible for older ES versions to deserialize this object // Add min version field to make it impossible for older OpenSearch versions to deserialize this object
builder.field(MIN_VERSION, SnapshotsService.SHARD_GEN_IN_REPO_DATA_VERSION.toString()); builder.field(MIN_VERSION, SnapshotsService.SHARD_GEN_IN_REPO_DATA_VERSION.toString());
} }
builder.endObject(); builder.endObject();

View File

@ -59,7 +59,7 @@
* | | in the repository * | | in the repository
* | |- meta-20131010.dat - JSON Serialized {@link org.opensearch.cluster.metadata.IndexMetadata } for index "foo" * | |- meta-20131010.dat - JSON Serialized {@link org.opensearch.cluster.metadata.IndexMetadata } for index "foo"
* | |- 0/ - data for shard "0" of index "foo" * | |- 0/ - data for shard "0" of index "foo"
* | | |- __1 \ (files with numeric names were created by older ES versions) * | | |- __1 \ (files with numeric names were created by older preceding ES versions)
* | | |- __2 | * | | |- __2 |
* | | |- __VPO5oDMVT5y4Akv8T_AO_A |- files from different segments see snap-* for their mappings to real segment files * | | |- __VPO5oDMVT5y4Akv8T_AO_A |- files from different segments see snap-* for their mappings to real segment files
* | | |- __1gbJy18wS_2kv1qI7FgKuQ | * | | |- __1gbJy18wS_2kv1qI7FgKuQ |
@ -70,7 +70,7 @@
* | | |- snap-20131011.dat - SMILE serialized {@link org.opensearch.index.snapshots.blobstore.BlobStoreIndexShardSnapshot} for * | | |- snap-20131011.dat - SMILE serialized {@link org.opensearch.index.snapshots.blobstore.BlobStoreIndexShardSnapshot} for
* | | | snapshot "20131011" * | | | snapshot "20131011"
* | | |- index-123 - SMILE serialized {@link org.opensearch.index.snapshots.blobstore.BlobStoreIndexShardSnapshots} for * | | |- index-123 - SMILE serialized {@link org.opensearch.index.snapshots.blobstore.BlobStoreIndexShardSnapshots} for
* | | | the shard (files with numeric suffixes were created by older versions, newer ES versions use a uuid * | | | the shard (files with numeric suffixes were created by older versions, newer OpenSearch versions use a uuid
* | | | suffix instead) * | | | suffix instead)
* | | * | |
* | |- 1/ - data for shard "1" of index "foo" * | |- 1/ - data for shard "1" of index "foo"

View File

@ -95,8 +95,8 @@ public class RestAllocationAction extends AbstractCatAction {
final Table table = new Table(); final Table table = new Table();
table.startHeaders(); table.startHeaders();
table.addCell("shards", "alias:s;text-align:right;desc:number of shards on node"); table.addCell("shards", "alias:s;text-align:right;desc:number of shards on node");
table.addCell("disk.indices", "alias:di,diskIndices;text-align:right;desc:disk used by ES indices"); table.addCell("disk.indices", "alias:di,diskIndices;text-align:right;desc:disk used by OpenSearch indices");
table.addCell("disk.used", "alias:du,diskUsed;text-align:right;desc:disk used (total, not just ES)"); table.addCell("disk.used", "alias:du,diskUsed;text-align:right;desc:disk used (total, not just OpenSearch)");
table.addCell("disk.avail", "alias:da,diskAvail;text-align:right;desc:disk available"); table.addCell("disk.avail", "alias:da,diskAvail;text-align:right;desc:disk available");
table.addCell("disk.total", "alias:dt,diskTotal;text-align:right;desc:total capacity of all volumes"); table.addCell("disk.total", "alias:dt,diskTotal;text-align:right;desc:total capacity of all volumes");
table.addCell("disk.percent", "alias:dp,diskPercent;text-align:right;desc:percent disk used"); table.addCell("disk.percent", "alias:dp,diskPercent;text-align:right;desc:percent disk used");

View File

@ -61,7 +61,7 @@ public class ReverseNestedAggregator extends BucketsAggregator implements Single
@Override @Override
protected LeafBucketCollector getLeafCollector(LeafReaderContext ctx, final LeafBucketCollector sub) throws IOException { protected LeafBucketCollector getLeafCollector(LeafReaderContext ctx, final LeafBucketCollector sub) throws IOException {
// In ES if parent is deleted, then also the children are deleted, so the child docs this agg receives // In OpenSearch if parent is deleted, then also the children are deleted, so the child docs this agg receives
// must belong to parent docs that is alive. For this reason acceptedDocs can be null here. // must belong to parent docs that is alive. For this reason acceptedDocs can be null here.
final BitSet parentDocs = parentBitsetProducer.getBitSet(ctx); final BitSet parentDocs = parentBitsetProducer.getBitSet(ctx);
if (parentDocs == null) { if (parentDocs == null) {

View File

@ -95,7 +95,7 @@ public class DiversifiedBytesHashSamplerAggregator extends SamplerAggregator {
} }
// This class extends the DiversifiedTopDocsCollector and provides // This class extends the DiversifiedTopDocsCollector and provides
// a lookup from elasticsearch's ValuesSource // a lookup from opensearch's ValuesSource
class ValuesDiversifiedTopDocsCollector extends DiversifiedTopDocsCollector { class ValuesDiversifiedTopDocsCollector extends DiversifiedTopDocsCollector {
private SortedBinaryDocValues values; private SortedBinaryDocValues values;

View File

@ -101,7 +101,7 @@ public class DiversifiedMapSamplerAggregator extends SamplerAggregator {
} }
// This class extends the DiversifiedTopDocsCollector and provides // This class extends the DiversifiedTopDocsCollector and provides
// a lookup from elasticsearch's ValuesSource // a lookup from opensearch's ValuesSource
class ValuesDiversifiedTopDocsCollector extends DiversifiedTopDocsCollector { class ValuesDiversifiedTopDocsCollector extends DiversifiedTopDocsCollector {
private SortedBinaryDocValues values; private SortedBinaryDocValues values;

View File

@ -88,7 +88,7 @@ public class DiversifiedNumericSamplerAggregator extends SamplerAggregator {
} }
// This class extends the DiversifiedTopDocsCollector and provides // This class extends the DiversifiedTopDocsCollector and provides
// a lookup from elasticsearch's ValuesSource // a lookup from opensearch's ValuesSource
class ValuesDiversifiedTopDocsCollector extends DiversifiedTopDocsCollector { class ValuesDiversifiedTopDocsCollector extends DiversifiedTopDocsCollector {
private SortedNumericDocValues values; private SortedNumericDocValues values;

View File

@ -90,7 +90,7 @@ public class DiversifiedOrdinalsSamplerAggregator extends SamplerAggregator {
} }
// This class extends the DiversifiedTopDocsCollector and provides // This class extends the DiversifiedTopDocsCollector and provides
// a lookup from elasticsearch's ValuesSource // a lookup from opensearch's ValuesSource
class ValuesDiversifiedTopDocsCollector extends DiversifiedTopDocsCollector { class ValuesDiversifiedTopDocsCollector extends DiversifiedTopDocsCollector {

View File

@ -183,7 +183,7 @@ public class ContextIndexSearcher extends IndexSearcher implements Releasable {
} }
TopFieldDocs mergedTopDocs = (TopFieldDocs) manager.reduce(collectors); TopFieldDocs mergedTopDocs = (TopFieldDocs) manager.reduce(collectors);
// Lucene sets shards indexes during merging of topDocs from different collectors // Lucene sets shards indexes during merging of topDocs from different collectors
// We need to reset shard index; ES will set shard index later during reduce stage // We need to reset shard index; OpenSearch will set shard index later during reduce stage
for (ScoreDoc scoreDoc : mergedTopDocs.scoreDocs) { for (ScoreDoc scoreDoc : mergedTopDocs.scoreDocs) {
scoreDoc.shardIndex = -1; scoreDoc.shardIndex = -1;
} }

View File

@ -51,7 +51,7 @@ import org.opensearch.common.Booleans;
import org.opensearch.common.CheckedConsumer; import org.opensearch.common.CheckedConsumer;
import org.opensearch.common.lucene.Lucene; import org.opensearch.common.lucene.Lucene;
import org.opensearch.common.lucene.search.TopDocsAndMaxScore; import org.opensearch.common.lucene.search.TopDocsAndMaxScore;
import org.opensearch.common.util.concurrent.QueueResizingEsThreadPoolExecutor; import org.opensearch.common.util.concurrent.QueueResizingOpenSearchThreadPoolExecutor;
import org.opensearch.index.IndexSortConfig; import org.opensearch.index.IndexSortConfig;
import org.opensearch.index.mapper.DateFieldMapper.DateFieldType; import org.opensearch.index.mapper.DateFieldMapper.DateFieldType;
import org.opensearch.index.mapper.MappedFieldType; import org.opensearch.index.mapper.MappedFieldType;
@ -303,8 +303,8 @@ public class QueryPhase {
} }
ExecutorService executor = searchContext.indexShard().getThreadPool().executor(ThreadPool.Names.SEARCH); ExecutorService executor = searchContext.indexShard().getThreadPool().executor(ThreadPool.Names.SEARCH);
if (executor instanceof QueueResizingEsThreadPoolExecutor) { if (executor instanceof QueueResizingOpenSearchThreadPoolExecutor) {
QueueResizingEsThreadPoolExecutor rExecutor = (QueueResizingEsThreadPoolExecutor) executor; QueueResizingOpenSearchThreadPoolExecutor rExecutor = (QueueResizingOpenSearchThreadPoolExecutor) executor;
queryResult.nodeQueueSize(rExecutor.getCurrentQueueSize()); queryResult.nodeQueueSize(rExecutor.getCurrentQueueSize());
queryResult.serviceTimeEWMA((long) rExecutor.getTaskExecutionEWMA()); queryResult.serviceTimeEWMA((long) rExecutor.getTaskExecutionEWMA());
} }

View File

@ -32,8 +32,8 @@ import org.opensearch.common.settings.Settings;
import org.opensearch.common.unit.SizeValue; import org.opensearch.common.unit.SizeValue;
import org.opensearch.common.unit.TimeValue; import org.opensearch.common.unit.TimeValue;
import org.opensearch.common.util.concurrent.OpenSearchExecutors; import org.opensearch.common.util.concurrent.OpenSearchExecutors;
import org.opensearch.common.util.concurrent.EsThreadPoolExecutor;
import org.opensearch.common.util.concurrent.OpenSearchRejectedExecutionException; import org.opensearch.common.util.concurrent.OpenSearchRejectedExecutionException;
import org.opensearch.common.util.concurrent.OpenSearchThreadPoolExecutor;
import org.opensearch.common.util.concurrent.ThreadContext; import org.opensearch.common.util.concurrent.ThreadContext;
import org.opensearch.common.util.concurrent.XRejectedExecutionHandler; import org.opensearch.common.util.concurrent.XRejectedExecutionHandler;
import org.opensearch.common.xcontent.ToXContentFragment; import org.opensearch.common.xcontent.ToXContentFragment;
@ -606,7 +606,7 @@ public class ThreadPool implements ReportingService<ThreadPoolInfo>, Scheduler {
public final Info info; public final Info info;
ExecutorHolder(ExecutorService executor, Info info) { ExecutorHolder(ExecutorService executor, Info info) {
assert executor instanceof EsThreadPoolExecutor || executor == DIRECT_EXECUTOR; assert executor instanceof OpenSearchThreadPoolExecutor || executor == DIRECT_EXECUTOR;
this.executor = executor; this.executor = executor;
this.info = info; this.info = info;
} }

View File

@ -39,7 +39,7 @@ import org.opensearch.cluster.service.FakeThreadPoolMasterService;
import org.elasticsearch.cluster.service.MasterService; import org.elasticsearch.cluster.service.MasterService;
import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.ClusterSettings;
import org.opensearch.common.settings.Settings; import org.opensearch.common.settings.Settings;
import org.opensearch.common.util.concurrent.PrioritizedEsThreadPoolExecutor; import org.opensearch.common.util.concurrent.PrioritizedOpenSearchThreadPoolExecutor;
import org.opensearch.node.Node; import org.opensearch.node.Node;
import org.opensearch.test.ClusterServiceUtils; import org.opensearch.test.ClusterServiceUtils;
import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.OpenSearchTestCase;
@ -72,7 +72,7 @@ public class InternalClusterInfoServiceSchedulingTests extends OpenSearchTestCas
final ClusterApplierService clusterApplierService = new ClusterApplierService("test", settings, clusterSettings, threadPool) { final ClusterApplierService clusterApplierService = new ClusterApplierService("test", settings, clusterSettings, threadPool) {
@Override @Override
protected PrioritizedEsThreadPoolExecutor createThreadPoolExecutor() { protected PrioritizedOpenSearchThreadPoolExecutor createThreadPoolExecutor() {
return new MockSinglePrioritizingExecutor("mock-executor", deterministicTaskQueue, threadPool); return new MockSinglePrioritizingExecutor("mock-executor", deterministicTaskQueue, threadPool);
} }
}; };

View File

@ -35,12 +35,12 @@ import org.opensearch.cluster.service.ClusterService;
import org.opensearch.common.collect.Map; import org.opensearch.common.collect.Map;
import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.ClusterSettings;
import org.opensearch.common.settings.Settings; import org.opensearch.common.settings.Settings;
import org.elasticsearch.index.Index; import org.opensearch.index.Index;
import org.elasticsearch.index.mapper.ContentPath; import org.opensearch.index.mapper.ContentPath;
import org.elasticsearch.index.mapper.Mapper; import org.opensearch.index.mapper.Mapper;
import org.elasticsearch.index.mapper.Mapping; import org.opensearch.index.mapper.Mapping;
import org.elasticsearch.index.mapper.MetadataFieldMapper; import org.opensearch.index.mapper.MetadataFieldMapper;
import org.elasticsearch.index.mapper.RootObjectMapper; import org.opensearch.index.mapper.RootObjectMapper;
import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.OpenSearchTestCase;
import java.util.List; import java.util.List;

View File

@ -27,7 +27,7 @@ import org.opensearch.cluster.node.DiscoveryNode;
import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamInput;
import org.opensearch.common.lease.Releasable; import org.opensearch.common.lease.Releasable;
import org.opensearch.common.settings.Settings; import org.opensearch.common.settings.Settings;
import org.elasticsearch.monitor.StatusInfo; import org.opensearch.monitor.StatusInfo;
import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.OpenSearchTestCase;
import org.opensearch.test.transport.MockTransport; import org.opensearch.test.transport.MockTransport;
import org.opensearch.transport.ConnectTransportException; import org.opensearch.transport.ConnectTransportException;
@ -58,8 +58,8 @@ import java.util.concurrent.atomic.AtomicReference;
import static java.util.Collections.emptySet; import static java.util.Collections.emptySet;
import static org.opensearch.cluster.coordination.PreVoteCollector.REQUEST_PRE_VOTE_ACTION_NAME; import static org.opensearch.cluster.coordination.PreVoteCollector.REQUEST_PRE_VOTE_ACTION_NAME;
import static org.elasticsearch.monitor.StatusInfo.Status.HEALTHY; import static org.opensearch.monitor.StatusInfo.Status.HEALTHY;
import static org.elasticsearch.monitor.StatusInfo.Status.UNHEALTHY; import static org.opensearch.monitor.StatusInfo.Status.UNHEALTHY;
import static org.opensearch.node.Node.NODE_NAME_SETTING; import static org.opensearch.node.Node.NODE_NAME_SETTING;
import static org.opensearch.threadpool.ThreadPool.Names.SAME; import static org.opensearch.threadpool.ThreadPool.Names.SAME;
import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.equalTo;

View File

@ -26,7 +26,7 @@ import org.opensearch.cluster.metadata.ProcessClusterEventTimeoutException;
import org.opensearch.common.Priority; import org.opensearch.common.Priority;
import org.opensearch.common.collect.Tuple; import org.opensearch.common.collect.Tuple;
import org.opensearch.common.unit.TimeValue; import org.opensearch.common.unit.TimeValue;
import org.opensearch.common.util.concurrent.PrioritizedEsThreadPoolExecutor; import org.opensearch.common.util.concurrent.PrioritizedOpenSearchThreadPoolExecutor;
import org.junit.Before; import org.junit.Before;
import org.opensearch.cluster.service.TaskBatcher; import org.opensearch.cluster.service.TaskBatcher;
@ -61,7 +61,7 @@ public class TaskBatcherTests extends TaskExecutorTests {
class TestTaskBatcher extends TaskBatcher { class TestTaskBatcher extends TaskBatcher {
TestTaskBatcher(Logger logger, PrioritizedEsThreadPoolExecutor threadExecutor) { TestTaskBatcher(Logger logger, PrioritizedOpenSearchThreadPoolExecutor threadExecutor) {
super(logger, threadExecutor); super(logger, threadExecutor);
} }

View File

@ -26,7 +26,7 @@ import org.opensearch.common.lease.Releasable;
import org.opensearch.common.settings.Settings; import org.opensearch.common.settings.Settings;
import org.opensearch.common.unit.TimeValue; import org.opensearch.common.unit.TimeValue;
import org.opensearch.common.util.concurrent.OpenSearchExecutors; import org.opensearch.common.util.concurrent.OpenSearchExecutors;
import org.opensearch.common.util.concurrent.PrioritizedEsThreadPoolExecutor; import org.opensearch.common.util.concurrent.PrioritizedOpenSearchThreadPoolExecutor;
import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.OpenSearchTestCase;
import org.opensearch.cluster.service.SourcePrioritizedRunnable; import org.opensearch.cluster.service.SourcePrioritizedRunnable;
import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.TestThreadPool;
@ -50,7 +50,7 @@ import static org.hamcrest.core.Is.is;
public class TaskExecutorTests extends OpenSearchTestCase { public class TaskExecutorTests extends OpenSearchTestCase {
protected static ThreadPool threadPool; protected static ThreadPool threadPool;
protected PrioritizedEsThreadPoolExecutor threadExecutor; protected PrioritizedOpenSearchThreadPoolExecutor threadExecutor;
@BeforeClass @BeforeClass
public static void createThreadPool() { public static void createThreadPool() {

View File

@ -24,7 +24,7 @@ import org.opensearch.common.CheckedRunnable;
import org.opensearch.common.lease.Releasable; import org.opensearch.common.lease.Releasable;
import org.opensearch.common.settings.Settings; import org.opensearch.common.settings.Settings;
import org.opensearch.common.util.concurrent.OpenSearchRejectedExecutionException; import org.opensearch.common.util.concurrent.OpenSearchRejectedExecutionException;
import org.opensearch.common.util.concurrent.EsThreadPoolExecutor; import org.opensearch.common.util.concurrent.OpenSearchThreadPoolExecutor;
import org.opensearch.common.util.concurrent.ThreadContext; import org.opensearch.common.util.concurrent.ThreadContext;
import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.OpenSearchTestCase;
import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.TestThreadPool;
@ -77,10 +77,10 @@ public class IndexShardOperationPermitsTests extends OpenSearchTestCase {
.put("thread_pool." + ThreadPool.Names.WRITE + ".size", writeThreadPoolSize) .put("thread_pool." + ThreadPool.Names.WRITE + ".size", writeThreadPoolSize)
.put("thread_pool." + ThreadPool.Names.WRITE + ".queue_size", writeThreadPoolQueueSize) .put("thread_pool." + ThreadPool.Names.WRITE + ".queue_size", writeThreadPoolQueueSize)
.build()); .build());
assertThat(threadPool.executor(ThreadPool.Names.WRITE), instanceOf(EsThreadPoolExecutor.class)); assertThat(threadPool.executor(ThreadPool.Names.WRITE), instanceOf(OpenSearchThreadPoolExecutor.class));
assertThat(((EsThreadPoolExecutor) threadPool.executor(ThreadPool.Names.WRITE)).getCorePoolSize(), equalTo(writeThreadPoolSize)); assertThat(((OpenSearchThreadPoolExecutor) threadPool.executor(ThreadPool.Names.WRITE)).getCorePoolSize(), equalTo(writeThreadPoolSize));
assertThat(((EsThreadPoolExecutor) threadPool.executor(ThreadPool.Names.WRITE)).getMaximumPoolSize(), equalTo(writeThreadPoolSize)); assertThat(((OpenSearchThreadPoolExecutor) threadPool.executor(ThreadPool.Names.WRITE)).getMaximumPoolSize(), equalTo(writeThreadPoolSize));
assertThat(((EsThreadPoolExecutor) threadPool.executor(ThreadPool.Names.WRITE)).getQueue().remainingCapacity(), assertThat(((OpenSearchThreadPoolExecutor) threadPool.executor(ThreadPool.Names.WRITE)).getQueue().remainingCapacity(),
equalTo(writeThreadPoolQueueSize)); equalTo(writeThreadPoolQueueSize));
} }

View File

@ -28,7 +28,7 @@ import org.opensearch.common.breaker.NoopCircuitBreaker;
import org.opensearch.common.lucene.search.TopDocsAndMaxScore; import org.opensearch.common.lucene.search.TopDocsAndMaxScore;
import org.opensearch.common.util.BigArrays; import org.opensearch.common.util.BigArrays;
import org.opensearch.common.util.concurrent.OpenSearchExecutors; import org.opensearch.common.util.concurrent.OpenSearchExecutors;
import org.opensearch.common.util.concurrent.EsThreadPoolExecutor; import org.opensearch.common.util.concurrent.OpenSearchThreadPoolExecutor;
import org.opensearch.index.shard.ShardId; import org.opensearch.index.shard.ShardId;
import org.opensearch.search.DocValueFormat; import org.opensearch.search.DocValueFormat;
import org.opensearch.search.SearchShardTarget; import org.opensearch.search.SearchShardTarget;
@ -54,7 +54,7 @@ public class QueryPhaseResultConsumerTests extends OpenSearchTestCase {
private SearchPhaseController searchPhaseController; private SearchPhaseController searchPhaseController;
private ThreadPool threadPool; private ThreadPool threadPool;
private EsThreadPoolExecutor executor; private OpenSearchThreadPoolExecutor executor;
@Before @Before
public void setup() { public void setup() {

View File

@ -44,7 +44,7 @@ import org.opensearch.common.text.Text;
import org.opensearch.common.util.BigArrays; import org.opensearch.common.util.BigArrays;
import org.opensearch.common.util.concurrent.AtomicArray; import org.opensearch.common.util.concurrent.AtomicArray;
import org.opensearch.common.util.concurrent.OpenSearchExecutors; import org.opensearch.common.util.concurrent.OpenSearchExecutors;
import org.opensearch.common.util.concurrent.EsThreadPoolExecutor; import org.opensearch.common.util.concurrent.OpenSearchThreadPoolExecutor;
import org.opensearch.index.shard.ShardId; import org.opensearch.index.shard.ShardId;
import org.opensearch.search.DocValueFormat; import org.opensearch.search.DocValueFormat;
import org.opensearch.search.SearchHit; import org.opensearch.search.SearchHit;
@ -102,7 +102,7 @@ import static org.hamcrest.Matchers.lessThanOrEqualTo;
public class SearchPhaseControllerTests extends OpenSearchTestCase { public class SearchPhaseControllerTests extends OpenSearchTestCase {
private ThreadPool threadPool; private ThreadPool threadPool;
private EsThreadPoolExecutor fixedExecutor; private OpenSearchThreadPoolExecutor fixedExecutor;
private SearchPhaseController searchPhaseController; private SearchPhaseController searchPhaseController;
private List<Boolean> reductions; private List<Boolean> reductions;

View File

@ -51,7 +51,7 @@ public class OpenSearchExecutorsTests extends OpenSearchTestCase {
} }
public void testFixedForcedExecution() throws Exception { public void testFixedForcedExecution() throws Exception {
EsThreadPoolExecutor executor = OpenSearchThreadPoolExecutor executor =
OpenSearchExecutors.newFixed(getName(), 1, 1, OpenSearchExecutors.daemonThreadFactory("test"), threadContext); OpenSearchExecutors.newFixed(getName(), 1, 1, OpenSearchExecutors.daemonThreadFactory("test"), threadContext);
final CountDownLatch wait = new CountDownLatch(1); final CountDownLatch wait = new CountDownLatch(1);
@ -114,7 +114,7 @@ public class OpenSearchExecutorsTests extends OpenSearchTestCase {
} }
public void testFixedRejected() throws Exception { public void testFixedRejected() throws Exception {
EsThreadPoolExecutor executor = OpenSearchThreadPoolExecutor executor =
OpenSearchExecutors.newFixed(getName(), 1, 1, OpenSearchExecutors.daemonThreadFactory("test"), threadContext); OpenSearchExecutors.newFixed(getName(), 1, 1, OpenSearchExecutors.daemonThreadFactory("test"), threadContext);
final CountDownLatch wait = new CountDownLatch(1); final CountDownLatch wait = new CountDownLatch(1);
@ -247,7 +247,7 @@ public class OpenSearchExecutorsTests extends OpenSearchTestCase {
int queue = between(0, 100); int queue = between(0, 100);
int actions = queue + pool; int actions = queue + pool;
final CountDownLatch latch = new CountDownLatch(1); final CountDownLatch latch = new CountDownLatch(1);
EsThreadPoolExecutor executor = OpenSearchThreadPoolExecutor executor =
OpenSearchExecutors.newFixed(getName(), pool, queue, OpenSearchExecutors.daemonThreadFactory("dummy"), threadContext); OpenSearchExecutors.newFixed(getName(), pool, queue, OpenSearchExecutors.daemonThreadFactory("dummy"), threadContext);
try { try {
for (int i = 0; i < actions; i++) { for (int i = 0; i < actions; i++) {
@ -279,7 +279,7 @@ public class OpenSearchExecutorsTests extends OpenSearchTestCase {
assertFalse("Thread pool registering as terminated when it isn't", e.isExecutorShutdown()); assertFalse("Thread pool registering as terminated when it isn't", e.isExecutorShutdown());
String message = e.getMessage(); String message = e.getMessage();
assertThat(message, containsString("of dummy runnable")); assertThat(message, containsString("of dummy runnable"));
assertThat(message, containsString("on EsThreadPoolExecutor[name = " + getName())); assertThat(message, containsString("on OpenSearchThreadPoolExecutor[name = " + getName()));
assertThat(message, containsString("queue capacity = " + queue)); assertThat(message, containsString("queue capacity = " + queue));
assertThat(message, containsString("[Running")); assertThat(message, containsString("[Running"));
/* /*
@ -319,7 +319,7 @@ public class OpenSearchExecutorsTests extends OpenSearchTestCase {
assertTrue("Thread pool not registering as terminated when it is", e.isExecutorShutdown()); assertTrue("Thread pool not registering as terminated when it is", e.isExecutorShutdown());
String message = e.getMessage(); String message = e.getMessage();
assertThat(message, containsString("of dummy runnable")); assertThat(message, containsString("of dummy runnable"));
assertThat(message, containsString("on EsThreadPoolExecutor[name = " + getName())); assertThat(message, containsString("on OpenSearchThreadPoolExecutor[name = " + getName()));
assertThat(message, containsString("queue capacity = " + queue)); assertThat(message, containsString("queue capacity = " + queue));
assertThat(message, containsString("[Terminated")); assertThat(message, containsString("[Terminated"));
assertThat(message, containsString("active threads = 0")); assertThat(message, containsString("active threads = 0"));
@ -337,7 +337,7 @@ public class OpenSearchExecutorsTests extends OpenSearchTestCase {
threadContext.putHeader("foo", "bar"); threadContext.putHeader("foo", "bar");
final Integer one = Integer.valueOf(1); final Integer one = Integer.valueOf(1);
threadContext.putTransient("foo", one); threadContext.putTransient("foo", one);
EsThreadPoolExecutor executor = OpenSearchThreadPoolExecutor executor =
OpenSearchExecutors.newFixed(getName(), pool, queue, OpenSearchExecutors.daemonThreadFactory("dummy"), threadContext); OpenSearchExecutors.newFixed(getName(), pool, queue, OpenSearchExecutors.daemonThreadFactory("dummy"), threadContext);
try { try {
executor.execute(() -> { executor.execute(() -> {
@ -368,7 +368,7 @@ public class OpenSearchExecutorsTests extends OpenSearchTestCase {
int queue = between(0, 100); int queue = between(0, 100);
final CountDownLatch latch = new CountDownLatch(1); final CountDownLatch latch = new CountDownLatch(1);
final CountDownLatch executed = new CountDownLatch(1); final CountDownLatch executed = new CountDownLatch(1);
EsThreadPoolExecutor executor = OpenSearchThreadPoolExecutor executor =
OpenSearchExecutors.newFixed(getName(), pool, queue, OpenSearchExecutors.daemonThreadFactory("dummy"), threadContext); OpenSearchExecutors.newFixed(getName(), pool, queue, OpenSearchExecutors.daemonThreadFactory("dummy"), threadContext);
try { try {
Runnable r = () -> { Runnable r = () -> {

View File

@ -190,7 +190,7 @@ public class PrioritizedExecutorsTests extends OpenSearchTestCase {
public void testTimeout() throws Exception { public void testTimeout() throws Exception {
ScheduledExecutorService timer = Executors.newSingleThreadScheduledExecutor(OpenSearchExecutors.daemonThreadFactory(getTestName())); ScheduledExecutorService timer = Executors.newSingleThreadScheduledExecutor(OpenSearchExecutors.daemonThreadFactory(getTestName()));
PrioritizedEsThreadPoolExecutor executor = PrioritizedOpenSearchThreadPoolExecutor executor =
OpenSearchExecutors.newSinglePrioritizing(getName(), OpenSearchExecutors.daemonThreadFactory(getTestName()), holder, timer); OpenSearchExecutors.newSinglePrioritizing(getName(), OpenSearchExecutors.daemonThreadFactory(getTestName()), holder, timer);
final CountDownLatch invoked = new CountDownLatch(1); final CountDownLatch invoked = new CountDownLatch(1);
final CountDownLatch block = new CountDownLatch(1); final CountDownLatch block = new CountDownLatch(1);
@ -211,7 +211,7 @@ public class PrioritizedExecutorsTests extends OpenSearchTestCase {
} }
}); });
invoked.await(); invoked.await();
PrioritizedEsThreadPoolExecutor.Pending[] pending = executor.getPending(); PrioritizedOpenSearchThreadPoolExecutor.Pending[] pending = executor.getPending();
assertThat(pending.length, equalTo(1)); assertThat(pending.length, equalTo(1));
assertThat(pending[0].task.toString(), equalTo("the blocking")); assertThat(pending[0].task.toString(), equalTo("the blocking"));
assertThat(pending[0].executing, equalTo(true)); assertThat(pending[0].executing, equalTo(true));
@ -254,7 +254,7 @@ public class PrioritizedExecutorsTests extends OpenSearchTestCase {
ThreadPool threadPool = new TestThreadPool("test"); ThreadPool threadPool = new TestThreadPool("test");
final ScheduledThreadPoolExecutor timer = (ScheduledThreadPoolExecutor) threadPool.scheduler(); final ScheduledThreadPoolExecutor timer = (ScheduledThreadPoolExecutor) threadPool.scheduler();
final AtomicBoolean timeoutCalled = new AtomicBoolean(); final AtomicBoolean timeoutCalled = new AtomicBoolean();
PrioritizedEsThreadPoolExecutor executor = PrioritizedOpenSearchThreadPoolExecutor executor =
OpenSearchExecutors.newSinglePrioritizing(getName(), OpenSearchExecutors.daemonThreadFactory(getTestName()), holder, timer); OpenSearchExecutors.newSinglePrioritizing(getName(), OpenSearchExecutors.daemonThreadFactory(getTestName()), holder, timer);
final CountDownLatch invoked = new CountDownLatch(1); final CountDownLatch invoked = new CountDownLatch(1);
executor.execute(new Runnable() { executor.execute(new Runnable() {

View File

@ -44,8 +44,8 @@ public class QueueResizingOpenSearchThreadPoolExecutorTests extends OpenSearchTe
int threads = randomIntBetween(1, 3); int threads = randomIntBetween(1, 3);
int measureWindow = 3; int measureWindow = 3;
logger.info("--> auto-queue with a measurement window of {} tasks", measureWindow); logger.info("--> auto-queue with a measurement window of {} tasks", measureWindow);
QueueResizingEsThreadPoolExecutor executor = QueueResizingOpenSearchThreadPoolExecutor executor =
new QueueResizingEsThreadPoolExecutor( new QueueResizingOpenSearchThreadPoolExecutor(
"test-threadpool", threads, threads, 1000, "test-threadpool", threads, threads, 1000,
TimeUnit.MILLISECONDS, queue, 10, 1000, fastWrapper(), TimeUnit.MILLISECONDS, queue, 10, 1000, fastWrapper(),
measureWindow, TimeValue.timeValueMillis(1), OpenSearchExecutors.daemonThreadFactory("queuetest"), measureWindow, TimeValue.timeValueMillis(1), OpenSearchExecutors.daemonThreadFactory("queuetest"),
@ -75,8 +75,8 @@ public class QueueResizingOpenSearchThreadPoolExecutorTests extends OpenSearchTe
int threads = randomIntBetween(1, 10); int threads = randomIntBetween(1, 10);
int measureWindow = randomIntBetween(100, 200); int measureWindow = randomIntBetween(100, 200);
logger.info("--> auto-queue with a measurement window of {} tasks", measureWindow); logger.info("--> auto-queue with a measurement window of {} tasks", measureWindow);
QueueResizingEsThreadPoolExecutor executor = QueueResizingOpenSearchThreadPoolExecutor executor =
new QueueResizingEsThreadPoolExecutor( new QueueResizingOpenSearchThreadPoolExecutor(
"test-threadpool", threads, threads, 1000, "test-threadpool", threads, threads, 1000,
TimeUnit.MILLISECONDS, queue, 10, 3000, fastWrapper(), TimeUnit.MILLISECONDS, queue, 10, 3000, fastWrapper(),
measureWindow, TimeValue.timeValueMillis(1), OpenSearchExecutors.daemonThreadFactory("queuetest"), measureWindow, TimeValue.timeValueMillis(1), OpenSearchExecutors.daemonThreadFactory("queuetest"),
@ -103,8 +103,8 @@ public class QueueResizingOpenSearchThreadPoolExecutorTests extends OpenSearchTe
int threads = randomIntBetween(1, 10); int threads = randomIntBetween(1, 10);
int measureWindow = randomIntBetween(100, 200); int measureWindow = randomIntBetween(100, 200);
logger.info("--> auto-queue with a measurement window of {} tasks", measureWindow); logger.info("--> auto-queue with a measurement window of {} tasks", measureWindow);
QueueResizingEsThreadPoolExecutor executor = QueueResizingOpenSearchThreadPoolExecutor executor =
new QueueResizingEsThreadPoolExecutor( new QueueResizingOpenSearchThreadPoolExecutor(
"test-threadpool", threads, threads, 1000, "test-threadpool", threads, threads, 1000,
TimeUnit.MILLISECONDS, queue, 10, 3000, slowWrapper(), measureWindow, TimeValue.timeValueMillis(1), TimeUnit.MILLISECONDS, queue, 10, 3000, slowWrapper(), measureWindow, TimeValue.timeValueMillis(1),
OpenSearchExecutors.daemonThreadFactory("queuetest"), new OpenSearchAbortPolicy(), context); OpenSearchExecutors.daemonThreadFactory("queuetest"), new OpenSearchAbortPolicy(), context);
@ -131,8 +131,8 @@ public class QueueResizingOpenSearchThreadPoolExecutorTests extends OpenSearchTe
int measureWindow = randomIntBetween(10, 100); int measureWindow = randomIntBetween(10, 100);
int min = randomIntBetween(4981, 4999); int min = randomIntBetween(4981, 4999);
logger.info("--> auto-queue with a measurement window of {} tasks", measureWindow); logger.info("--> auto-queue with a measurement window of {} tasks", measureWindow);
QueueResizingEsThreadPoolExecutor executor = QueueResizingOpenSearchThreadPoolExecutor executor =
new QueueResizingEsThreadPoolExecutor( new QueueResizingOpenSearchThreadPoolExecutor(
"test-threadpool", threads, threads, 1000, "test-threadpool", threads, threads, 1000,
TimeUnit.MILLISECONDS, queue, min, 100000, slowWrapper(), measureWindow, TimeValue.timeValueMillis(1), TimeUnit.MILLISECONDS, queue, min, 100000, slowWrapper(), measureWindow, TimeValue.timeValueMillis(1),
OpenSearchExecutors.daemonThreadFactory("queuetest"), new OpenSearchAbortPolicy(), context); OpenSearchExecutors.daemonThreadFactory("queuetest"), new OpenSearchAbortPolicy(), context);
@ -160,8 +160,8 @@ public class QueueResizingOpenSearchThreadPoolExecutorTests extends OpenSearchTe
int measureWindow = randomIntBetween(10, 100); int measureWindow = randomIntBetween(10, 100);
int max = randomIntBetween(5010, 5024); int max = randomIntBetween(5010, 5024);
logger.info("--> auto-queue with a measurement window of {} tasks", measureWindow); logger.info("--> auto-queue with a measurement window of {} tasks", measureWindow);
QueueResizingEsThreadPoolExecutor executor = QueueResizingOpenSearchThreadPoolExecutor executor =
new QueueResizingEsThreadPoolExecutor( new QueueResizingOpenSearchThreadPoolExecutor(
"test-threadpool", threads, threads, 1000, "test-threadpool", threads, threads, 1000,
TimeUnit.MILLISECONDS, queue, 10, max, fastWrapper(), measureWindow, TimeValue.timeValueMillis(1), TimeUnit.MILLISECONDS, queue, 10, max, fastWrapper(), measureWindow, TimeValue.timeValueMillis(1),
OpenSearchExecutors.daemonThreadFactory("queuetest"), new OpenSearchAbortPolicy(), context); OpenSearchExecutors.daemonThreadFactory("queuetest"), new OpenSearchAbortPolicy(), context);
@ -185,8 +185,8 @@ public class QueueResizingOpenSearchThreadPoolExecutorTests extends OpenSearchTe
new ResizableBlockingQueue<>(ConcurrentCollections.<Runnable>newBlockingQueue(), new ResizableBlockingQueue<>(ConcurrentCollections.<Runnable>newBlockingQueue(),
100); 100);
QueueResizingEsThreadPoolExecutor executor = QueueResizingOpenSearchThreadPoolExecutor executor =
new QueueResizingEsThreadPoolExecutor( new QueueResizingOpenSearchThreadPoolExecutor(
"test-threadpool", 1, 1, 1000, "test-threadpool", 1, 1, 1000,
TimeUnit.MILLISECONDS, queue, 10, 200, fastWrapper(), 10, TimeValue.timeValueMillis(1), TimeUnit.MILLISECONDS, queue, 10, 200, fastWrapper(), 10, TimeValue.timeValueMillis(1),
OpenSearchExecutors.daemonThreadFactory("queuetest"), new OpenSearchAbortPolicy(), context); OpenSearchExecutors.daemonThreadFactory("queuetest"), new OpenSearchAbortPolicy(), context);
@ -226,8 +226,8 @@ public class QueueResizingOpenSearchThreadPoolExecutorTests extends OpenSearchTe
new ResizableBlockingQueue<>(ConcurrentCollections.<Runnable>newBlockingQueue(), new ResizableBlockingQueue<>(ConcurrentCollections.<Runnable>newBlockingQueue(),
100); 100);
QueueResizingEsThreadPoolExecutor executor = QueueResizingOpenSearchThreadPoolExecutor executor =
new QueueResizingEsThreadPoolExecutor( new QueueResizingOpenSearchThreadPoolExecutor(
"test-threadpool", 1, 1, 1000, "test-threadpool", 1, 1, 1000,
TimeUnit.MILLISECONDS, queue, 10, 200, exceptionalWrapper(), 10, TimeValue.timeValueMillis(1), TimeUnit.MILLISECONDS, queue, 10, 200, exceptionalWrapper(), 10, TimeValue.timeValueMillis(1),
OpenSearchExecutors.daemonThreadFactory("queuetest"), new OpenSearchAbortPolicy(), context); OpenSearchExecutors.daemonThreadFactory("queuetest"), new OpenSearchAbortPolicy(), context);
@ -258,7 +258,7 @@ public class QueueResizingOpenSearchThreadPoolExecutorTests extends OpenSearchTe
} }
/** Execute a blank task {@code times} times for the executor */ /** Execute a blank task {@code times} times for the executor */
private void executeTask(QueueResizingEsThreadPoolExecutor executor, int times) { private void executeTask(QueueResizingOpenSearchThreadPoolExecutor executor, int times) {
logger.info("--> executing a task [{}] times", times); logger.info("--> executing a task [{}] times", times);
for (int i = 0; i < times; i++) { for (int i = 0; i < times; i++) {
executor.execute(() -> {}); executor.execute(() -> {});

View File

@ -144,7 +144,7 @@ import org.opensearch.common.transport.TransportAddress;
import org.opensearch.common.util.BigArrays; import org.opensearch.common.util.BigArrays;
import org.elasticsearch.common.util.PageCacheRecycler; import org.elasticsearch.common.util.PageCacheRecycler;
import org.opensearch.common.util.concurrent.AbstractRunnable; import org.opensearch.common.util.concurrent.AbstractRunnable;
import org.opensearch.common.util.concurrent.PrioritizedEsThreadPoolExecutor; import org.opensearch.common.util.concurrent.PrioritizedOpenSearchThreadPoolExecutor;
import org.opensearch.common.util.concurrent.ThreadContext; import org.opensearch.common.util.concurrent.ThreadContext;
import org.opensearch.common.xcontent.NamedXContentRegistry; import org.opensearch.common.xcontent.NamedXContentRegistry;
import org.opensearch.env.Environment; import org.opensearch.env.Environment;
@ -1414,7 +1414,7 @@ public class SnapshotResiliencyTests extends OpenSearchTestCase {
clusterService = new ClusterService(settings, clusterSettings, masterService, clusterService = new ClusterService(settings, clusterSettings, masterService,
new ClusterApplierService(node.getName(), settings, clusterSettings, threadPool) { new ClusterApplierService(node.getName(), settings, clusterSettings, threadPool) {
@Override @Override
protected PrioritizedEsThreadPoolExecutor createThreadPoolExecutor() { protected PrioritizedOpenSearchThreadPoolExecutor createThreadPoolExecutor() {
return new MockSinglePrioritizingExecutor(node.getName(), deterministicTaskQueue, threadPool); return new MockSinglePrioritizingExecutor(node.getName(), deterministicTaskQueue, threadPool);
} }

View File

@ -21,7 +21,7 @@ package org.opensearch.threadpool;
import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.ClusterSettings;
import org.opensearch.common.settings.Settings; import org.opensearch.common.settings.Settings;
import org.opensearch.common.util.concurrent.EsThreadPoolExecutor; import org.opensearch.common.util.concurrent.OpenSearchThreadPoolExecutor;
import java.util.HashMap; import java.util.HashMap;
import java.util.Map; import java.util.Map;
@ -79,23 +79,23 @@ public class ScalingThreadPoolTests extends OpenSearchThreadPoolTestCase {
runScalingThreadPoolTest(builder.build(), (clusterSettings, threadPool) -> { runScalingThreadPoolTest(builder.build(), (clusterSettings, threadPool) -> {
final Executor executor = threadPool.executor(threadPoolName); final Executor executor = threadPool.executor(threadPoolName);
assertThat(executor, instanceOf(EsThreadPoolExecutor.class)); assertThat(executor, instanceOf(OpenSearchThreadPoolExecutor.class));
final EsThreadPoolExecutor esThreadPoolExecutor = (EsThreadPoolExecutor)executor; final OpenSearchThreadPoolExecutor openSearchThreadPoolExecutor = (OpenSearchThreadPoolExecutor)executor;
final ThreadPool.Info info = info(threadPool, threadPoolName); final ThreadPool.Info info = info(threadPool, threadPoolName);
assertThat(info.getName(), equalTo(threadPoolName)); assertThat(info.getName(), equalTo(threadPoolName));
assertThat(info.getThreadPoolType(), equalTo(ThreadPool.ThreadPoolType.SCALING)); assertThat(info.getThreadPoolType(), equalTo(ThreadPool.ThreadPoolType.SCALING));
assertThat(info.getKeepAlive().seconds(), equalTo(keepAlive)); assertThat(info.getKeepAlive().seconds(), equalTo(keepAlive));
assertThat(esThreadPoolExecutor.getKeepAliveTime(TimeUnit.SECONDS), equalTo(keepAlive)); assertThat(openSearchThreadPoolExecutor.getKeepAliveTime(TimeUnit.SECONDS), equalTo(keepAlive));
assertNull(info.getQueueSize()); assertNull(info.getQueueSize());
assertThat(esThreadPoolExecutor.getQueue().remainingCapacity(), equalTo(Integer.MAX_VALUE)); assertThat(openSearchThreadPoolExecutor.getQueue().remainingCapacity(), equalTo(Integer.MAX_VALUE));
assertThat(info.getMin(), equalTo(core)); assertThat(info.getMin(), equalTo(core));
assertThat(esThreadPoolExecutor.getCorePoolSize(), equalTo(core)); assertThat(openSearchThreadPoolExecutor.getCorePoolSize(), equalTo(core));
assertThat(info.getMax(), equalTo(expectedMax)); assertThat(info.getMax(), equalTo(expectedMax));
assertThat(esThreadPoolExecutor.getMaximumPoolSize(), equalTo(expectedMax)); assertThat(openSearchThreadPoolExecutor.getMaximumPoolSize(), equalTo(expectedMax));
}); });
if (processorsUsed > availableProcessors) { if (processorsUsed > availableProcessors) {

View File

@ -22,7 +22,7 @@ package org.opensearch.threadpool;
import org.opensearch.common.settings.Settings; import org.opensearch.common.settings.Settings;
import org.opensearch.common.unit.TimeValue; import org.opensearch.common.unit.TimeValue;
import org.opensearch.common.util.concurrent.OpenSearchExecutors; import org.opensearch.common.util.concurrent.OpenSearchExecutors;
import org.opensearch.common.util.concurrent.EsThreadPoolExecutor; import org.opensearch.common.util.concurrent.OpenSearchThreadPoolExecutor;
import org.opensearch.threadpool.ThreadPool.Names; import org.opensearch.threadpool.ThreadPool.Names;
import java.lang.reflect.Field; import java.lang.reflect.Field;
@ -105,16 +105,16 @@ public class UpdateThreadPoolSettingsTests extends OpenSearchThreadPoolTestCase
.put("thread_pool." + threadPoolName + ".size", expectedSize) .put("thread_pool." + threadPoolName + ".size", expectedSize)
.build(); .build();
threadPool = new ThreadPool(nodeSettings); threadPool = new ThreadPool(nodeSettings);
assertThat(threadPool.executor(threadPoolName), instanceOf(EsThreadPoolExecutor.class)); assertThat(threadPool.executor(threadPoolName), instanceOf(OpenSearchThreadPoolExecutor.class));
assertEquals(info(threadPool, threadPoolName).getThreadPoolType(), ThreadPool.ThreadPoolType.FIXED); assertEquals(info(threadPool, threadPoolName).getThreadPoolType(), ThreadPool.ThreadPoolType.FIXED);
assertThat(threadPool.executor(threadPoolName), instanceOf(EsThreadPoolExecutor.class)); assertThat(threadPool.executor(threadPoolName), instanceOf(OpenSearchThreadPoolExecutor.class));
assertThat(((EsThreadPoolExecutor) threadPool.executor(threadPoolName)).getCorePoolSize(), equalTo(expectedSize)); assertThat(((OpenSearchThreadPoolExecutor) threadPool.executor(threadPoolName)).getCorePoolSize(), equalTo(expectedSize));
assertThat(((EsThreadPoolExecutor) threadPool.executor(threadPoolName)).getMaximumPoolSize(), equalTo(expectedSize)); assertThat(((OpenSearchThreadPoolExecutor) threadPool.executor(threadPoolName)).getMaximumPoolSize(), equalTo(expectedSize));
assertThat(info(threadPool, threadPoolName).getMin(), equalTo(expectedSize)); assertThat(info(threadPool, threadPoolName).getMin(), equalTo(expectedSize));
assertThat(info(threadPool, threadPoolName).getMax(), equalTo(expectedSize)); assertThat(info(threadPool, threadPoolName).getMax(), equalTo(expectedSize));
// keep alive does not apply to fixed thread pools // keep alive does not apply to fixed thread pools
assertThat(((EsThreadPoolExecutor) threadPool.executor(threadPoolName)).getKeepAliveTime(TimeUnit.MINUTES), equalTo(0L)); assertThat(((OpenSearchThreadPoolExecutor) threadPool.executor(threadPoolName)).getKeepAliveTime(TimeUnit.MINUTES), equalTo(0L));
} finally { } finally {
terminateThreadPoolIfNeeded(threadPool); terminateThreadPoolIfNeeded(threadPool);
} }
@ -139,7 +139,7 @@ public class UpdateThreadPoolSettingsTests extends OpenSearchThreadPoolTestCase
final long expectedKeepAlive = "generic".equals(threadPoolName) ? 30 : 300; final long expectedKeepAlive = "generic".equals(threadPoolName) ? 30 : 300;
assertThat(info(threadPool, threadPoolName).getKeepAlive().seconds(), equalTo(expectedKeepAlive)); assertThat(info(threadPool, threadPoolName).getKeepAlive().seconds(), equalTo(expectedKeepAlive));
assertEquals(info(threadPool, threadPoolName).getThreadPoolType(), ThreadPool.ThreadPoolType.SCALING); assertEquals(info(threadPool, threadPoolName).getThreadPoolType(), ThreadPool.ThreadPoolType.SCALING);
assertThat(threadPool.executor(threadPoolName), instanceOf(EsThreadPoolExecutor.class)); assertThat(threadPool.executor(threadPoolName), instanceOf(OpenSearchThreadPoolExecutor.class));
} finally { } finally {
terminateThreadPoolIfNeeded(threadPool); terminateThreadPoolIfNeeded(threadPool);
} }

View File

@ -59,7 +59,7 @@ import org.opensearch.common.unit.TimeValue;
import org.opensearch.common.util.BigArrays; import org.opensearch.common.util.BigArrays;
import org.opensearch.common.util.MockBigArrays; import org.opensearch.common.util.MockBigArrays;
import org.opensearch.common.util.MockPageCacheRecycler; import org.opensearch.common.util.MockPageCacheRecycler;
import org.opensearch.common.util.concurrent.PrioritizedEsThreadPoolExecutor; import org.opensearch.common.util.concurrent.PrioritizedOpenSearchThreadPoolExecutor;
import org.opensearch.discovery.DiscoveryModule; import org.opensearch.discovery.DiscoveryModule;
import org.opensearch.discovery.SeedHostsProvider; import org.opensearch.discovery.SeedHostsProvider;
import org.opensearch.env.NodeEnvironment; import org.opensearch.env.NodeEnvironment;
@ -1366,7 +1366,7 @@ public class AbstractCoordinatorTestCase extends OpenSearchTestCase {
} }
@Override @Override
protected PrioritizedEsThreadPoolExecutor createThreadPoolExecutor() { protected PrioritizedOpenSearchThreadPoolExecutor createThreadPoolExecutor() {
return new MockSinglePrioritizingExecutor(nodeName, deterministicTaskQueue, threadPool); return new MockSinglePrioritizingExecutor(nodeName, deterministicTaskQueue, threadPool);
} }

View File

@ -19,16 +19,16 @@
package org.opensearch.cluster.coordination; package org.opensearch.cluster.coordination;
import org.opensearch.common.util.concurrent.OpenSearchExecutors; import org.opensearch.common.util.concurrent.OpenSearchExecutors;
import org.opensearch.common.util.concurrent.PrioritizedEsThreadPoolExecutor; import org.opensearch.common.util.concurrent.PrioritizedOpenSearchThreadPoolExecutor;
import org.opensearch.threadpool.ThreadPool; import org.opensearch.threadpool.ThreadPool;
import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeUnit;
/** /**
* Mock single threaded {@link PrioritizedEsThreadPoolExecutor} based on {@link DeterministicTaskQueue}, * Mock single threaded {@link PrioritizedOpenSearchThreadPoolExecutor} based on {@link DeterministicTaskQueue},
* simulating the behaviour of an executor returned by {@link OpenSearchExecutors#newSinglePrioritizing}. * simulating the behaviour of an executor returned by {@link OpenSearchExecutors#newSinglePrioritizing}.
*/ */
public class MockSinglePrioritizingExecutor extends PrioritizedEsThreadPoolExecutor { public class MockSinglePrioritizingExecutor extends PrioritizedOpenSearchThreadPoolExecutor {
public MockSinglePrioritizingExecutor(String name, DeterministicTaskQueue deterministicTaskQueue, ThreadPool threadPool) { public MockSinglePrioritizingExecutor(String name, DeterministicTaskQueue deterministicTaskQueue, ThreadPool threadPool) {
super(name, 0, 1, 0L, TimeUnit.MILLISECONDS, super(name, 0, 1, 0L, TimeUnit.MILLISECONDS,

View File

@ -29,7 +29,7 @@ import org.opensearch.common.settings.ClusterSettings;
import org.opensearch.common.settings.Settings; import org.opensearch.common.settings.Settings;
import org.opensearch.common.unit.TimeValue; import org.opensearch.common.unit.TimeValue;
import org.opensearch.common.util.concurrent.OpenSearchExecutors; import org.opensearch.common.util.concurrent.OpenSearchExecutors;
import org.opensearch.common.util.concurrent.PrioritizedEsThreadPoolExecutor; import org.opensearch.common.util.concurrent.PrioritizedOpenSearchThreadPoolExecutor;
import org.opensearch.common.util.concurrent.ThreadContext; import org.opensearch.common.util.concurrent.ThreadContext;
import org.opensearch.node.Node; import org.opensearch.node.Node;
import org.opensearch.threadpool.ThreadPool; import org.opensearch.threadpool.ThreadPool;
@ -61,8 +61,8 @@ public class FakeThreadPoolMasterService extends MasterService {
} }
@Override @Override
protected PrioritizedEsThreadPoolExecutor createThreadPoolExecutor() { protected PrioritizedOpenSearchThreadPoolExecutor createThreadPoolExecutor() {
return new PrioritizedEsThreadPoolExecutor(name, 1, 1, 1, TimeUnit.SECONDS, OpenSearchExecutors.daemonThreadFactory(name), return new PrioritizedOpenSearchThreadPoolExecutor(name, 1, 1, 1, TimeUnit.SECONDS, OpenSearchExecutors.daemonThreadFactory(name),
null, null) { null, null) {
@Override @Override

View File

@ -24,7 +24,7 @@ import org.opensearch.action.support.PlainActionFuture;
import org.opensearch.common.settings.Settings; import org.opensearch.common.settings.Settings;
import org.opensearch.common.util.concurrent.AbstractRunnable; import org.opensearch.common.util.concurrent.AbstractRunnable;
import org.opensearch.common.util.concurrent.OpenSearchExecutors; import org.opensearch.common.util.concurrent.OpenSearchExecutors;
import org.opensearch.common.util.concurrent.EsThreadPoolExecutor; import org.opensearch.common.util.concurrent.OpenSearchThreadPoolExecutor;
import org.opensearch.common.util.concurrent.ThreadContext; import org.opensearch.common.util.concurrent.ThreadContext;
import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.OpenSearchTestCase;
import org.junit.AfterClass; import org.junit.AfterClass;
@ -38,7 +38,7 @@ import java.util.concurrent.CountDownLatch;
*/ */
public class OpenSearchIndexInputTestCase extends OpenSearchTestCase { public class OpenSearchIndexInputTestCase extends OpenSearchTestCase {
private static EsThreadPoolExecutor executor; private static OpenSearchThreadPoolExecutor executor;
@BeforeClass @BeforeClass
public static void createExecutor() { public static void createExecutor() {

View File

@ -1856,7 +1856,7 @@ public abstract class OpenSearchIntegTestCase extends OpenSearchTestCase {
.putList(DISCOVERY_SEED_PROVIDERS_SETTING.getKey(), "file"); .putList(DISCOVERY_SEED_PROVIDERS_SETTING.getKey(), "file");
if (rarely()) { if (rarely()) {
// Sometimes adjust the minimum search thread pool size, causing // Sometimes adjust the minimum search thread pool size, causing
// QueueResizingEsThreadPoolExecutor to be used instead of a regular // QueueResizingOpenSearchThreadPoolExecutor to be used instead of a regular
// fixed thread pool // fixed thread pool
builder.put("thread_pool.search.min_queue_size", 100); builder.put("thread_pool.search.min_queue_size", 100);
} }

View File

@ -19,7 +19,7 @@
package org.opensearch.cluster.coordination; package org.opensearch.cluster.coordination;
import org.opensearch.common.Priority; import org.opensearch.common.Priority;
import org.opensearch.common.util.concurrent.PrioritizedEsThreadPoolExecutor; import org.opensearch.common.util.concurrent.PrioritizedOpenSearchThreadPoolExecutor;
import org.opensearch.common.util.concurrent.PrioritizedRunnable; import org.opensearch.common.util.concurrent.PrioritizedRunnable;
import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.OpenSearchTestCase;
@ -29,7 +29,7 @@ public class MockSinglePrioritizingExecutorTests extends OpenSearchTestCase {
public void testPrioritizedEsThreadPoolExecutor() { public void testPrioritizedEsThreadPoolExecutor() {
final DeterministicTaskQueue taskQueue = DeterministicTaskQueueTests.newTaskQueue(); final DeterministicTaskQueue taskQueue = DeterministicTaskQueueTests.newTaskQueue();
final PrioritizedEsThreadPoolExecutor executor = new MockSinglePrioritizingExecutor("test", taskQueue, taskQueue.getThreadPool()); final PrioritizedOpenSearchThreadPoolExecutor executor = new MockSinglePrioritizingExecutor("test", taskQueue, taskQueue.getThreadPool());
final AtomicBoolean called1 = new AtomicBoolean(); final AtomicBoolean called1 = new AtomicBoolean();
final AtomicBoolean called2 = new AtomicBoolean(); final AtomicBoolean called2 = new AtomicBoolean();
executor.execute(new PrioritizedRunnable(Priority.NORMAL) { executor.execute(new PrioritizedRunnable(Priority.NORMAL) {