[Docs] Fix common word repetitions (#39703)

This commit is contained in:
Christoph Büscher 2019-04-25 20:47:03 +02:00
parent 6d7110edf5
commit 52495843cc
31 changed files with 35 additions and 35 deletions

View File

@ -34,7 +34,7 @@ import java.util.Objects;
* Job processed record counts.
* <p>
* The getInput... methods return the actual number of
* fields/records sent the the API including invalid records.
* fields/records sent the API including invalid records.
* The getProcessed... methods are the number sent to the
* Engine.
* <p>

View File

@ -15,7 +15,7 @@ to determine if a bucket should be retained or filtered out.
==== Return
boolean::
True if the the bucket should be retained, false if the bucket should be filtered out.
True if the bucket should be retained, false if the bucket should be filtered out.
==== API
@ -78,4 +78,4 @@ GET /seats/_search
// TEST[setup:seats]
<1> The `buckets_path` points to the max aggregations (`max_cost`) and adds `max` variables
to the `params` map
<2> The user-specified `base_cost` is also added to the `params` map
<2> The user-specified `base_cost` is also added to the `params` map

View File

@ -367,7 +367,7 @@ PUT twitter/_doc/1?version=2&version_type=external
near real time aspects of search operations. If no version is provided,
then the operation is executed without any version checks.
The above will succeed since the the supplied version of 2 is higher than
The above will succeed since the supplied version of 2 is higher than
the current document version of 1. If the document was already updated
and its version was set to 2 or higher, the indexing command will fail
and result in a conflict (409 http status code).

View File

@ -227,7 +227,7 @@ information for the step that's being performed on the index.
<1> Status of the step that's in progress.
If the index is in the ERROR step, something went wrong while executing a
step in the policy and and you will need to take action for the index to proceed
step in the policy and you will need to take action for the index to proceed
to the next step. To help you diagnose the problem, the explain response shows
the step that failed and the step info provides information about the error.

View File

@ -431,7 +431,7 @@ In Elasticsearch 7.0, each API will support typeless requests,
and specifying a type will produce a deprecation warning.
NOTE: Typeless APIs work even if the target index contains a custom type.
For example, if an index has the the custom type name `my_type`, we can add
For example, if an index has the custom type name `my_type`, we can add
documents to it using typeless `index` calls, and load documents with typeless
`get` calls.

View File

@ -98,7 +98,7 @@ public class DocsClientYamlTestSuiteIT extends ESClientYamlSuiteTestCase {
}
/**
* Compares the the results of running two analyzers against many random
* Compares the results of running two analyzers against many random
* strings. The goal is to figure out if two anlayzers are "the same" by
* comparing their results. This is far from perfect but should be fairly
* accurate, especially for gross things like missing {@code decimal_digit}

View File

@ -182,7 +182,7 @@ public final class DissectParser {
* all of the bytes then progress the main cursor. Repeat till the end of the input string. Since the string being searching for
* (the delimiter) is generally small and rare the naive approach is efficient.
*
* In this case the the string that is walked is the input string, and the string being searched for is the current delimiter.
* In this case the string that is walked is the input string, and the string being searched for is the current delimiter.
* For example for a dissect pattern of {@code %{a},%{b}:%{c}} the delimiters (comma then colon) are searched for in the
* input string. At class construction the list of keys+delimiters are found (dissectPairs), which allows the use of that ordered
* list to know which delimiter to use for the search. The delimiters is progressed once the current delimiter is matched.

View File

@ -67,7 +67,7 @@ public class DissectParserTests extends ESTestCase {
assertMatch("%{a} » %{b}»%{c}€%{d}", "foo » bar»baz€quux",
Arrays.asList("a", "b", "c", "d"), Arrays.asList("foo", "bar", "baz", "quux"));
assertMatch("%{a} %{b} %{+a}", "foo bar baz quux", Arrays.asList("a", "b"), Arrays.asList("foo baz quux", "bar"), " ");
//Logstash supports implicit ordering based anchored by the the key without the '+'
//Logstash supports implicit ordering based anchored by the key without the '+'
//This implementation will only honor implicit ordering for appending right to left else explicit order (/N) is required.
//The results of this test differ from Logstash.
assertMatch("%{+a} %{a} %{+a} %{b}", "December 31 1999 quux",

View File

@ -397,7 +397,7 @@ final class PemUtils {
* defined in RFC 1423. RFC 1423 only defines DES-CBS and triple DES (EDE) in CBC mode. AES in CBC mode is also widely used though ( 3
* different variants of 128, 192, 256 bit keys )
*
* @param dekHeaderValue The value of the the DEK-Info PEM header
* @param dekHeaderValue The value of the DEK-Info PEM header
* @param password The password with which the key is encrypted
* @return a cipher of the appropriate algorithm and parameters to be used for decryption
* @throws GeneralSecurityException if the algorithm is not available in the used security provider, or if the key is inappropriate

View File

@ -92,7 +92,7 @@ enum DateFormat {
int year = LocalDate.now(ZoneOffset.UTC).getYear();
DateFormatter dateFormatter = DateFormatter.forPattern(format)
.withLocale(locale);
// if UTC zone is set here, the the time zone specified in the format will be ignored, leading to wrong dates
// if UTC zone is set here, the time zone specified in the format will be ignored, leading to wrong dates
if (isUtc == false) {
dateFormatter = dateFormatter.withZone(zoneId);
}

View File

@ -86,7 +86,7 @@ public class RankEvalRequest extends ActionRequest implements IndicesRequest.Rep
}
/**
* Set the the specification of the ranking evaluation.
* Set the specification of the ranking evaluation.
*/
public void setRankEvalSpec(RankEvalSpec task) {
this.rankingEvaluationSpec = task;

View File

@ -120,7 +120,7 @@ final class SoftDeletesPolicy {
* localCheckpointOfSafeCommit.
* - Changes APIs are driven by a combination of the global checkpoint, retention operations, and retention leases. Here we
* prefer using the global checkpoint instead of the maximum sequence number because only operations up to the global
* checkpoint are exposed in the the changes APIs.
* checkpoint are exposed in the changes APIs.
*/
// calculate the minimum sequence number to retain based on retention leases

View File

@ -171,7 +171,7 @@ public abstract class Mapper implements ToXContentFragment, Iterable<Mapper> {
public abstract String name();
/**
* Returns a name representing the the type of this mapper.
* Returns a name representing the type of this mapper.
*/
public abstract String typeName();

View File

@ -162,7 +162,7 @@ public class PersistentTasksExecutorIT extends ESIntegTestCase {
TaskInfo taskInfo = client().admin().cluster().prepareListTasks().setActions(TestPersistentTasksExecutor.NAME + "[c]")
.get().getTasks().get(0);
// Verifying the the task runs on the new node
// Verifying the task runs on the new node
assertThat(taskInfo.getTaskId().getNodeId(), equalTo(newNodeId));
internalCluster().stopRandomNode(settings -> "test".equals(settings.get("node.attr.test_attr")));
@ -202,7 +202,7 @@ public class PersistentTasksExecutorIT extends ESIntegTestCase {
TaskInfo taskInfo = client().admin().cluster().prepareListTasks().setActions(TestPersistentTasksExecutor.NAME + "[c]")
.get().getTasks().get(0);
// Verifying the the task can now be assigned
// Verifying the task can now be assigned
assertThat(taskInfo.getTaskId().getNodeId(), notNullValue());
// Remove the persistent task

View File

@ -269,7 +269,7 @@ public class PersistentTasksNodeServiceTests extends ESTestCase {
newClusterState = addTask(state, "test", null, "this_node");
coordinator.clusterChanged(new ClusterChangedEvent("test", newClusterState, state));
// Check the the task is know to the task manager
// Check the task is know to the task manager
assertThat(taskManager.getTasks().size(), equalTo(1));
AllocatedPersistentTask runningTask = (AllocatedPersistentTask)taskManager.getTasks().values().iterator().next();
String persistentId = runningTask.getPersistentTaskId();
@ -305,7 +305,7 @@ public class PersistentTasksNodeServiceTests extends ESTestCase {
executor.get(0).task.markAsFailed(new IOException("test"));
}
// Check the the task is now removed from task manager
// Check the task is now removed from task manager
assertThat(taskManager.getTasks().values(), empty());
}

View File

@ -99,7 +99,7 @@ are five possible modes an action can be associated with:
send it. In this mode, the action might be throttled if the
current state of the watch indicates it should be.
| `force_simulate` | Similar to the the `simulate` mode, except the action is
| `force_simulate` | Similar to the `simulate` mode, except the action is
not be throttled even if the current state of the watch
indicates it should be.

View File

@ -89,7 +89,7 @@ the following table:
{es} offers a number of algorithms for securely hashing credentials in memory and
on disk. However, only the `PBKDF2` family of algorithms is compliant with FIPS
140-2 for password hashing. You must set the the `cache.hash_algo` realm settings
140-2 for password hashing. You must set the `cache.hash_algo` realm settings
and the `xpack.security.authc.password_hashing.algorithm` setting to one of the
available `PBKDF2` values.
See <<hashing-settings>>.

View File

@ -68,7 +68,7 @@ public class CcrRepositoryRetentionLeaseTests extends ESTestCase {
final String retentionLeaseId =
retentionLeaseId("local-cluster", followerShardId.getIndex(), "remote-cluster", leaderShardId.getIndex());
// simulate that the the retention lease already exists on the leader, and verify that we attempt to renew it
// simulate that the retention lease already exists on the leader, and verify that we attempt to renew it
final Client remoteClient = mock(Client.class);
final ArgumentCaptor<RetentionLeaseActions.AddRequest> addRequestCaptor =
ArgumentCaptor.forClass(RetentionLeaseActions.AddRequest.class);
@ -133,7 +133,7 @@ public class CcrRepositoryRetentionLeaseTests extends ESTestCase {
final String retentionLeaseId =
retentionLeaseId("local-cluster", followerShardId.getIndex(), "remote-cluster", leaderShardId.getIndex());
// simulate that the the retention lease already exists on the leader, expires before we renew, and verify that we attempt to add it
// simulate that the retention lease already exists on the leader, expires before we renew, and verify that we attempt to add it
final Client remoteClient = mock(Client.class);
final ArgumentCaptor<RetentionLeaseActions.AddRequest> addRequestCaptor =
ArgumentCaptor.forClass(RetentionLeaseActions.AddRequest.class);

View File

@ -45,7 +45,7 @@ import java.util.function.Supplier;
* <p>
* This is a filter snapshot repository that only snapshots the minimal required information
* that is needed to recreate the index. In other words instead of snapshotting the entire shard
* with all it's lucene indexed fields, doc values, points etc. it only snapshots the the stored
* with all it's lucene indexed fields, doc values, points etc. it only snapshots the stored
* fields including _source and _routing as well as the live docs in oder to distinguish between
* live and deleted docs.
* </p>

View File

@ -24,7 +24,7 @@ import java.util.Objects;
* Job processed record counts.
* <p>
* The getInput... methods return the actual number of
* fields/records sent the the API including invalid records.
* fields/records sent the API including invalid records.
* The getProcessed... methods are the number sent to the
* Engine.
* <p>

View File

@ -197,7 +197,7 @@ public class SchedulerEngine {
} catch (final Throwable t) {
/*
* Allowing the throwable to escape here will lead to be it being caught in FutureTask#run and set as the outcome of this
* task; however, we never inspect the the outcomes of these scheduled tasks and so allowing the throwable to escape
* task; however, we never inspect the outcomes of these scheduled tasks and so allowing the throwable to escape
* unhandled here could lead to us losing fatal errors. Instead, we rely on ExceptionsHelper#maybeDieOnAnotherThread to
* appropriately dispatch any error to the uncaught exception handler. We should never see an exception here as these do
* not escape from SchedulerEngine#notifyListeners.

View File

@ -277,7 +277,7 @@ public final class IndicesPermission {
private final Set<BytesReference> query;
// by default certain restricted indices are exempted when granting privileges, as they should generally be hidden for ordinary
// users. Setting this flag true eliminates the special status for the purpose of this permission - restricted indices still have
// to be covered by the the "indices"
// to be covered by the "indices"
private final boolean allowRestrictedIndices;
public Group(IndexPrivilege privilege, FieldPermissions fieldPermissions, @Nullable Set<BytesReference> query,

View File

@ -374,7 +374,7 @@ public class PemUtils {
* defined in RFC 1423. RFC 1423 only defines DES-CBS and triple DES (EDE) in CBC mode. AES in CBC mode is also widely used though ( 3
* different variants of 128, 192, 256 bit keys )
*
* @param dekHeaderValue The value of the the DEK-Info PEM header
* @param dekHeaderValue The value of the DEK-Info PEM header
* @param password The password with which the key is encrypted
* @return a cipher of the appropriate algorithm and parameters to be used for decryption
* @throws GeneralSecurityException if the algorithm is not available in the used security provider, or if the key is inappropriate

View File

@ -87,7 +87,7 @@ public class PermissionsIT extends ESRestTestCase {
/**
* Tests that a policy that simply deletes an index after 0s succeeds when an index
* with user `test_admin` is created referencing a policy created by `test_ilm` when both
* users have read/write permissions on the the index. The goal is to verify that one
* users have read/write permissions on the index. The goal is to verify that one
* does not need to be the same user who created both the policy and the index to have the
* index be properly managed by ILM.
*/

View File

@ -43,7 +43,7 @@ public class TypedChainTaskExecutor<T> {
* {@code true} means continue on to the next task.
* Must be able to handle null values.
* @param failureShortCircuitPredicate The predicate on whether to short circuit execution on a give exception.
* {@code true} means that no more tasks should execute and the the listener::onFailure should be
* {@code true} means that no more tasks should execute and the listener::onFailure should be
* called.
*/
public TypedChainTaskExecutor(ExecutorService executorService,

View File

@ -233,7 +233,7 @@ public class MonitoringService extends AbstractLifecycleComponent {
final Collection<MonitoringDoc> results = new ArrayList<>();
for (Collector collector : collectors) {
if (isStarted() == false) {
// Do not collect more data if the the monitoring service is stopping
// Do not collect more data if the monitoring service is stopping
// otherwise some collectors might just fail.
return;
}

View File

@ -19,7 +19,7 @@ import java.util.List;
* <p>
* By telling the {@code MultiHttpResource} to become dirty, it effectively marks all of its sub-resources dirty as well.
* <p>
* Sub-resources should be the sole responsibility of the the {@code MultiHttpResource}; there should not be something using them directly
* Sub-resources should be the sole responsibility of the {@code MultiHttpResource}; there should not be something using them directly
* if they are included in a {@code MultiHttpResource}.
*/
public class MultiHttpResource extends HttpResource {

View File

@ -349,7 +349,7 @@ public final class TokenService {
}
/**
* Gets the UserToken with given id by fetching the the corresponding token document
* Gets the UserToken with given id by fetching the corresponding token document
*/
void getUserTokenFromId(String userTokenId, ActionListener<UserToken> listener) {
if (securityIndex.isAvailable() == false) {

View File

@ -73,7 +73,7 @@ import static org.elasticsearch.xpack.core.ClientHelper.SECURITY_ORIGIN;
import static org.elasticsearch.xpack.core.ClientHelper.executeAsyncWithOrigin;
/**
* Manages the lifecycle of a single index, mapping and and data upgrades/migrations.
* Manages the lifecycle of a single index, mapping and data upgrades/migrations.
*/
public class SecurityIndexManager implements ClusterStateListener {

View File

@ -61,7 +61,7 @@ public class TransportGetWatchAction extends WatcherTransportAction<GetWatchRequ
try (XContentBuilder builder = jsonBuilder()) {
// When we return the watch via the Get Watch REST API, we want to return the watch as was specified in
// the put api, we don't include the status in the watch source itself, but as a separate top level field,
// so that it indicates the the status is managed by watcher itself.
// so that it indicates the status is managed by watcher itself.
ZonedDateTime now = clock.instant().atZone(ZoneOffset.UTC);
Watch watch = parser.parseWithSecrets(request.getId(), true, getResponse.getSourceAsBytesRef(), now,
XContentType.JSON, getResponse.getSeqNo(), getResponse.getPrimaryTerm());

View File

@ -179,7 +179,7 @@ public class HttpSecretsIntegrationTests extends AbstractWatcherIntegrationTestC
.get();
// verifying the basic auth password is stored encrypted in the index when security
// is enabled, when it's not enabled, the the passowrd should be stored in plain text
// is enabled, when it's not enabled, the password should be stored in plain text
GetResponse response = client().prepareGet().setIndex(Watch.INDEX).setId("_id").get();
assertThat(response, notNullValue());
assertThat(response.getId(), is("_id"));