Fix a variety of typos and misspelled words (#32792)
This commit is contained in:
parent
ee21067a41
commit
d45fe43a68
|
@ -320,7 +320,7 @@ have to test Elasticsearch.
|
|||
#### Configurations
|
||||
|
||||
Gradle organizes dependencies and build artifacts into "configurations" and
|
||||
allows you to use these configurations arbitrarilly. Here are some of the most
|
||||
allows you to use these configurations arbitrarily. Here are some of the most
|
||||
common configurations in our build and how we use them:
|
||||
|
||||
<dl>
|
||||
|
|
|
@ -250,7 +250,7 @@ Pass arbitrary jvm arguments.
|
|||
|
||||
Running backwards compatibility tests is disabled by default since it
|
||||
requires a release version of elasticsearch to be present on the test system.
|
||||
To run backwards compatibilty tests untar or unzip a release and run the tests
|
||||
To run backwards compatibility tests untar or unzip a release and run the tests
|
||||
with the following command:
|
||||
|
||||
---------------------------------------------------------------------------
|
||||
|
|
|
@ -122,7 +122,7 @@ class VersionCollection {
|
|||
if (isReleased(version) == false) {
|
||||
// caveat 1 - This should only ever contain 2 non released branches in flight. An example is 6.x is frozen,
|
||||
// and 6.2 is cut but not yet released there is some simple logic to make sure that in the case of more than 2,
|
||||
// it will bail. The order is that the minor snapshot is fufilled first, and then the staged minor snapshot
|
||||
// it will bail. The order is that the minor snapshot is fulfilled first, and then the staged minor snapshot
|
||||
if (nextMinorSnapshot == null) {
|
||||
// it has not been set yet
|
||||
nextMinorSnapshot = replaceAsSnapshot(version)
|
||||
|
|
|
@ -72,7 +72,7 @@ public class RestTestsFromSnippetsTask extends SnippetsTask {
|
|||
|
||||
/**
|
||||
* Root directory containing all the files generated by this task. It is
|
||||
* contained withing testRoot.
|
||||
* contained within testRoot.
|
||||
*/
|
||||
File outputRoot() {
|
||||
return new File(testRoot, '/rest-api-spec/test')
|
||||
|
|
|
@ -337,7 +337,7 @@ class NodeInfo {
|
|||
case 'deb':
|
||||
return new File(baseDir, "${distro}-extracted/etc/elasticsearch")
|
||||
default:
|
||||
throw new InvalidUserDataException("Unkown distribution: ${distro}")
|
||||
throw new InvalidUserDataException("Unknown distribution: ${distro}")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -22,7 +22,7 @@ task sample {
|
|||
// dependsOn buildResources.outputDir
|
||||
// for now it's just
|
||||
dependsOn buildResources
|
||||
// we have to refference it at configuration time in order to be picked up
|
||||
// we have to reference it at configuration time in order to be picked up
|
||||
ext.checkstyle_suppressions = buildResources.copy('checkstyle_suppressions.xml')
|
||||
doLast {
|
||||
println "This task is using ${file(checkstyle_suppressions)}"
|
||||
|
|
|
@ -215,7 +215,7 @@ public class Detector implements ToXContentObject {
|
|||
}
|
||||
|
||||
/**
|
||||
* Excludes frequently-occuring metrics from the analysis;
|
||||
* Excludes frequently-occurring metrics from the analysis;
|
||||
* can apply to 'by' field, 'over' field, or both
|
||||
*
|
||||
* @return the value that the user set
|
||||
|
|
|
@ -228,7 +228,7 @@ final class JvmOptionsParser {
|
|||
// no range is present, apply the JVM option to the specified major version only
|
||||
upper = lower;
|
||||
} else if (end == null) {
|
||||
// a range of the form \\d+- is present, apply the JVM option to all major versions larger than the specifed one
|
||||
// a range of the form \\d+- is present, apply the JVM option to all major versions larger than the specified one
|
||||
upper = Integer.MAX_VALUE;
|
||||
} else {
|
||||
// a range of the form \\d+-\\d+ is present, apply the JVM option to the specified range of major versions
|
||||
|
|
|
@ -307,7 +307,7 @@ You can also customize the response consumer used to buffer the asynchronous
|
|||
responses. The default consumer will buffer up to 100MB of response on the
|
||||
JVM heap. If the response is larger then the request will fail. You could,
|
||||
for example, lower the maximum size which might be useful if you are running
|
||||
in a heap constrained environment like the exmaple above.
|
||||
in a heap constrained environment like the example above.
|
||||
|
||||
Once you've created the singleton you can use it when making requests:
|
||||
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
|
||||
Painless has a strict whitelist for methods and classes to ensure all
|
||||
painless scripts are secure. Most of these methods are exposed directly
|
||||
from the Java Runtime Enviroment (JRE) while others are part of
|
||||
from the Java Runtime Environment (JRE) while others are part of
|
||||
Elasticsearch or Painless itself. Below is a list of all available
|
||||
classes grouped with their respected methods. Clicking on the method
|
||||
name takes you to the documentation for that specific method. Methods
|
||||
|
|
|
@ -32,7 +32,7 @@ PUT _snapshot/my_hdfs_repository
|
|||
"type": "hdfs",
|
||||
"settings": {
|
||||
"uri": "hdfs://namenode:8020/",
|
||||
"path": "elasticsearch/respositories/my_hdfs_repository",
|
||||
"path": "elasticsearch/repositories/my_hdfs_repository",
|
||||
"conf.dfs.client.read.shortcircuit": "true"
|
||||
}
|
||||
}
|
||||
|
@ -149,7 +149,7 @@ PUT _snapshot/my_hdfs_repository
|
|||
"type": "hdfs",
|
||||
"settings": {
|
||||
"uri": "hdfs://namenode:8020/",
|
||||
"path": "/user/elasticsearch/respositories/my_hdfs_repository",
|
||||
"path": "/user/elasticsearch/repositories/my_hdfs_repository",
|
||||
"security.principal": "elasticsearch@REALM"
|
||||
}
|
||||
}
|
||||
|
@ -167,7 +167,7 @@ PUT _snapshot/my_hdfs_repository
|
|||
"type": "hdfs",
|
||||
"settings": {
|
||||
"uri": "hdfs://namenode:8020/",
|
||||
"path": "/user/elasticsearch/respositories/my_hdfs_repository",
|
||||
"path": "/user/elasticsearch/repositories/my_hdfs_repository",
|
||||
"security.principal": "elasticsearch/_HOST@REALM"
|
||||
}
|
||||
}
|
||||
|
|
|
@ -72,7 +72,7 @@ parameter or in the `filename` field in an input YAML file.
|
|||
You can optionally provide IP addresses or DNS names for each instance. If
|
||||
neither IP addresses nor DNS names are specified, the Elastic stack products
|
||||
cannot perform hostname verification and you might need to configure the
|
||||
`verfication_mode` security setting to `certificate` only. For more information
|
||||
`verification_mode` security setting to `certificate` only. For more information
|
||||
about this setting, see <<security-settings>>.
|
||||
|
||||
All certificates that are generated by this command are signed by a CA. You can
|
||||
|
|
|
@ -207,7 +207,7 @@ The `forecasts_stats` object shows statistics about forecasts. It has the follow
|
|||
(object) Counts per forecast status, for example: {"finished" : 2}.
|
||||
|
||||
NOTE: `memory_bytes`, `records`, `processing_time_ms` and `status` require at least 1 forecast, otherwise
|
||||
these fields are ommitted.
|
||||
these fields are omitted.
|
||||
|
||||
[float]
|
||||
[[ml-stats-node]]
|
||||
|
|
|
@ -45,7 +45,7 @@ For more information, see
|
|||
==== Examples
|
||||
|
||||
Imagine we have an index named `sensor-1` full of raw data. We know that the data will grow over time, so there
|
||||
will be a `sensor-2`, `sensor-3`, etc. Let's create a Rollup job that targets the index pattern `sensor-*` to accomodate
|
||||
will be a `sensor-2`, `sensor-3`, etc. Let's create a Rollup job that targets the index pattern `sensor-*` to accommodate
|
||||
this future scaling:
|
||||
|
||||
[source,js]
|
||||
|
|
|
@ -44,7 +44,7 @@ from `artifacts.elastic.co/maven` by adding it to the repositories list:
|
|||
=== Setup
|
||||
|
||||
The driver main class is `org.elasticsearch.xpack.sql.jdbc.jdbc.JdbcDriver`.
|
||||
Note the driver implements the JDBC 4.0 +Service Provider+ mechanism meaning it is registerd automatically
|
||||
Note the driver implements the JDBC 4.0 +Service Provider+ mechanism meaning it is registered automatically
|
||||
as long as its available in the classpath.
|
||||
|
||||
Once registered, the driver understands the following syntax as an URL:
|
||||
|
|
|
@ -143,7 +143,7 @@
|
|||
* described by later documentation.
|
||||
* <p>
|
||||
* Storebable nodes have three methods for writing -- setup, load, and store. These methods
|
||||
* are used in conjuction with a parent node aware of the storeable node (lhs) that has a node
|
||||
* are used in conjunction with a parent node aware of the storeable node (lhs) that has a node
|
||||
* representing a value to store (rhs). The setup method is always once called before a store
|
||||
* to give storeable nodes a chance to write any prefixes they may have and any values such as
|
||||
* array indices before the store happens. Load is called on a storeable node that must also
|
||||
|
@ -152,7 +152,7 @@
|
|||
* Sub nodes are partial nodes that require a parent to work correctly. These nodes can really
|
||||
* represent anything the parent node would like to split up into logical pieces and don't really
|
||||
* have any distinct set of rules. The currently existing subnodes all have ANode as a super class
|
||||
* somewhere in their class heirachy so the parent node can defer some analysis and writing to
|
||||
* somewhere in their class hierarchy so the parent node can defer some analysis and writing to
|
||||
* the sub node.
|
||||
*/
|
||||
package org.elasticsearch.painless.node;
|
||||
|
|
|
@ -434,7 +434,7 @@ public class PainlessDocGenerator {
|
|||
if (classPackage.startsWith("org.apache.lucene")) {
|
||||
return "lucene-core";
|
||||
}
|
||||
throw new IllegalArgumentException("Unrecognized packge: " + classPackage);
|
||||
throw new IllegalArgumentException("Unrecognized package: " + classPackage);
|
||||
}
|
||||
|
||||
private static void emitGeneratedWarning(PrintStream stream) {
|
||||
|
|
|
@ -83,7 +83,7 @@ public class PainlessExecuteRequestTests extends AbstractStreamableXContentTestC
|
|||
QueryBuilder query = randomBoolean() ? new MatchAllQueryBuilder() : null;
|
||||
// TODO: pass down XContextType to createTestInstance() method.
|
||||
// otherwise the document itself is different causing test failures.
|
||||
// This should be done in a seperate change as the test instance is created before xcontent type is randomly picked and
|
||||
// This should be done in a separate change as the test instance is created before xcontent type is randomly picked and
|
||||
// all the createTestInstance() methods need to be changed, which will make this a big chnage
|
||||
// BytesReference doc = randomBoolean() ? new BytesArray("{}") : null;
|
||||
BytesReference doc = null;
|
||||
|
|
|
@ -42,8 +42,8 @@ import java.util.stream.Collectors;
|
|||
|
||||
/**
|
||||
* Returns the results for a {@link RankEvalRequest}.<br>
|
||||
* The repsonse contains a detailed section for each evaluation query in the request and
|
||||
* possible failures that happened when executin individual queries.
|
||||
* The response contains a detailed section for each evaluation query in the request and
|
||||
* possible failures that happened when execution individual queries.
|
||||
**/
|
||||
public class RankEvalResponse extends ActionResponse implements ToXContentObject {
|
||||
|
||||
|
|
|
@ -481,7 +481,7 @@ public class AsyncBulkByScrollActionTests extends ESTestCase {
|
|||
|
||||
/**
|
||||
* Execute a bulk retry test case. The total number of failures is random and the number of retries attempted is set to
|
||||
* testRequest.getMaxRetries and controled by the failWithRejection parameter.
|
||||
* testRequest.getMaxRetries and controlled by the failWithRejection parameter.
|
||||
*/
|
||||
private void bulkRetryTestCase(boolean failWithRejection) throws Exception {
|
||||
int totalFailures = randomIntBetween(1, testRequest.getMaxRetries());
|
||||
|
|
|
@ -122,7 +122,7 @@ public class CancelTests extends ReindexTestCase {
|
|||
logger.debug("waiting for updates to be blocked");
|
||||
boolean blocked = awaitBusy(
|
||||
() -> ALLOWED_OPERATIONS.hasQueuedThreads() && ALLOWED_OPERATIONS.availablePermits() == 0,
|
||||
1, TimeUnit.MINUTES); // 10 seconds is usually fine but on heavilly loaded machines this can wake a while
|
||||
1, TimeUnit.MINUTES); // 10 seconds is usually fine but on heavily loaded machines this can take a while
|
||||
assertTrue("updates blocked", blocked);
|
||||
|
||||
// Status should show the task running
|
||||
|
|
|
@ -60,7 +60,7 @@ public class ICUCollationKeywordFieldMapperIT extends ESIntegTestCase {
|
|||
String index = "foo";
|
||||
String type = "mytype";
|
||||
|
||||
String[] equilavent = {"I WİLL USE TURKİSH CASING", "ı will use turkish casıng"};
|
||||
String[] equivalent = {"I WİLL USE TURKİSH CASING", "ı will use turkish casıng"};
|
||||
|
||||
XContentBuilder builder = jsonBuilder()
|
||||
.startObject().startObject("properties")
|
||||
|
@ -75,8 +75,8 @@ public class ICUCollationKeywordFieldMapperIT extends ESIntegTestCase {
|
|||
|
||||
// both values should collate to same value
|
||||
indexRandom(true,
|
||||
client().prepareIndex(index, type, "1").setSource("{\"collate\":\"" + equilavent[0] + "\"}", XContentType.JSON),
|
||||
client().prepareIndex(index, type, "2").setSource("{\"collate\":\"" + equilavent[1] + "\"}", XContentType.JSON)
|
||||
client().prepareIndex(index, type, "1").setSource("{\"collate\":\"" + equivalent[0] + "\"}", XContentType.JSON),
|
||||
client().prepareIndex(index, type, "2").setSource("{\"collate\":\"" + equivalent[1] + "\"}", XContentType.JSON)
|
||||
);
|
||||
|
||||
// searching for either of the terms should return both results since they collate to the same value
|
||||
|
@ -85,7 +85,7 @@ public class ICUCollationKeywordFieldMapperIT extends ESIntegTestCase {
|
|||
.types(type)
|
||||
.source(new SearchSourceBuilder()
|
||||
.fetchSource(false)
|
||||
.query(QueryBuilders.termQuery("collate", randomBoolean() ? equilavent[0] : equilavent[1]))
|
||||
.query(QueryBuilders.termQuery("collate", randomBoolean() ? equivalent[0] : equivalent[1]))
|
||||
.sort("collate")
|
||||
.sort("_id", SortOrder.DESC) // secondary sort should kick in because both will collate to same value
|
||||
);
|
||||
|
@ -100,7 +100,7 @@ public class ICUCollationKeywordFieldMapperIT extends ESIntegTestCase {
|
|||
String index = "foo";
|
||||
String type = "mytype";
|
||||
|
||||
String[] equilavent = {"a", "C", "a", "B"};
|
||||
String[] equivalent = {"a", "C", "a", "B"};
|
||||
|
||||
XContentBuilder builder = jsonBuilder()
|
||||
.startObject().startObject("properties")
|
||||
|
@ -114,9 +114,9 @@ public class ICUCollationKeywordFieldMapperIT extends ESIntegTestCase {
|
|||
|
||||
// everything should be indexed fine, no exceptions
|
||||
indexRandom(true,
|
||||
client().prepareIndex(index, type, "1").setSource("{\"collate\":[\"" + equilavent[0] + "\", \""
|
||||
+ equilavent[1] + "\"]}", XContentType.JSON),
|
||||
client().prepareIndex(index, type, "2").setSource("{\"collate\":\"" + equilavent[2] + "\"}", XContentType.JSON)
|
||||
client().prepareIndex(index, type, "1").setSource("{\"collate\":[\"" + equivalent[0] + "\", \""
|
||||
+ equivalent[1] + "\"]}", XContentType.JSON),
|
||||
client().prepareIndex(index, type, "2").setSource("{\"collate\":\"" + equivalent[2] + "\"}", XContentType.JSON)
|
||||
);
|
||||
|
||||
// using sort mode = max, values B and C will be used for the sort
|
||||
|
@ -161,7 +161,7 @@ public class ICUCollationKeywordFieldMapperIT extends ESIntegTestCase {
|
|||
String index = "foo";
|
||||
String type = "mytype";
|
||||
|
||||
String[] equilavent = {"I W\u0049\u0307LL USE TURKİSH CASING", "ı will use turkish casıng"};
|
||||
String[] equivalent = {"I W\u0049\u0307LL USE TURKİSH CASING", "ı will use turkish casıng"};
|
||||
|
||||
XContentBuilder builder = jsonBuilder()
|
||||
.startObject().startObject("properties")
|
||||
|
@ -176,8 +176,8 @@ public class ICUCollationKeywordFieldMapperIT extends ESIntegTestCase {
|
|||
assertAcked(client().admin().indices().prepareCreate(index).addMapping(type, builder));
|
||||
|
||||
indexRandom(true,
|
||||
client().prepareIndex(index, type, "1").setSource("{\"collate\":\"" + equilavent[0] + "\"}", XContentType.JSON),
|
||||
client().prepareIndex(index, type, "2").setSource("{\"collate\":\"" + equilavent[1] + "\"}", XContentType.JSON)
|
||||
client().prepareIndex(index, type, "1").setSource("{\"collate\":\"" + equivalent[0] + "\"}", XContentType.JSON),
|
||||
client().prepareIndex(index, type, "2").setSource("{\"collate\":\"" + equivalent[1] + "\"}", XContentType.JSON)
|
||||
);
|
||||
|
||||
// searching for either of the terms should return both results since they collate to the same value
|
||||
|
@ -186,7 +186,7 @@ public class ICUCollationKeywordFieldMapperIT extends ESIntegTestCase {
|
|||
.types(type)
|
||||
.source(new SearchSourceBuilder()
|
||||
.fetchSource(false)
|
||||
.query(QueryBuilders.termQuery("collate", randomBoolean() ? equilavent[0] : equilavent[1]))
|
||||
.query(QueryBuilders.termQuery("collate", randomBoolean() ? equivalent[0] : equivalent[1]))
|
||||
.sort("collate")
|
||||
.sort("_id", SortOrder.DESC) // secondary sort should kick in because both will collate to same value
|
||||
);
|
||||
|
@ -204,7 +204,7 @@ public class ICUCollationKeywordFieldMapperIT extends ESIntegTestCase {
|
|||
String index = "foo";
|
||||
String type = "mytype";
|
||||
|
||||
String[] equilavent = {"TESTING", "testing"};
|
||||
String[] equivalent = {"TESTING", "testing"};
|
||||
|
||||
XContentBuilder builder = jsonBuilder()
|
||||
.startObject().startObject("properties")
|
||||
|
@ -219,8 +219,8 @@ public class ICUCollationKeywordFieldMapperIT extends ESIntegTestCase {
|
|||
assertAcked(client().admin().indices().prepareCreate(index).addMapping(type, builder));
|
||||
|
||||
indexRandom(true,
|
||||
client().prepareIndex(index, type, "1").setSource("{\"collate\":\"" + equilavent[0] + "\"}", XContentType.JSON),
|
||||
client().prepareIndex(index, type, "2").setSource("{\"collate\":\"" + equilavent[1] + "\"}", XContentType.JSON)
|
||||
client().prepareIndex(index, type, "1").setSource("{\"collate\":\"" + equivalent[0] + "\"}", XContentType.JSON),
|
||||
client().prepareIndex(index, type, "2").setSource("{\"collate\":\"" + equivalent[1] + "\"}", XContentType.JSON)
|
||||
);
|
||||
|
||||
SearchRequest request = new SearchRequest()
|
||||
|
@ -228,7 +228,7 @@ public class ICUCollationKeywordFieldMapperIT extends ESIntegTestCase {
|
|||
.types(type)
|
||||
.source(new SearchSourceBuilder()
|
||||
.fetchSource(false)
|
||||
.query(QueryBuilders.termQuery("collate", randomBoolean() ? equilavent[0] : equilavent[1]))
|
||||
.query(QueryBuilders.termQuery("collate", randomBoolean() ? equivalent[0] : equivalent[1]))
|
||||
.sort("collate")
|
||||
.sort("_id", SortOrder.DESC) // secondary sort should kick in because both will collate to same value
|
||||
);
|
||||
|
@ -247,7 +247,7 @@ public class ICUCollationKeywordFieldMapperIT extends ESIntegTestCase {
|
|||
String index = "foo";
|
||||
String type = "mytype";
|
||||
|
||||
String[] equilavent = {"foo-bar", "foo bar"};
|
||||
String[] equivalent = {"foo-bar", "foo bar"};
|
||||
|
||||
XContentBuilder builder = jsonBuilder()
|
||||
.startObject().startObject("properties")
|
||||
|
@ -262,8 +262,8 @@ public class ICUCollationKeywordFieldMapperIT extends ESIntegTestCase {
|
|||
assertAcked(client().admin().indices().prepareCreate(index).addMapping(type, builder));
|
||||
|
||||
indexRandom(true,
|
||||
client().prepareIndex(index, type, "1").setSource("{\"collate\":\"" + equilavent[0] + "\"}", XContentType.JSON),
|
||||
client().prepareIndex(index, type, "2").setSource("{\"collate\":\"" + equilavent[1] + "\"}", XContentType.JSON)
|
||||
client().prepareIndex(index, type, "1").setSource("{\"collate\":\"" + equivalent[0] + "\"}", XContentType.JSON),
|
||||
client().prepareIndex(index, type, "2").setSource("{\"collate\":\"" + equivalent[1] + "\"}", XContentType.JSON)
|
||||
);
|
||||
|
||||
SearchRequest request = new SearchRequest()
|
||||
|
@ -271,7 +271,7 @@ public class ICUCollationKeywordFieldMapperIT extends ESIntegTestCase {
|
|||
.types(type)
|
||||
.source(new SearchSourceBuilder()
|
||||
.fetchSource(false)
|
||||
.query(QueryBuilders.termQuery("collate", randomBoolean() ? equilavent[0] : equilavent[1]))
|
||||
.query(QueryBuilders.termQuery("collate", randomBoolean() ? equivalent[0] : equivalent[1]))
|
||||
.sort("collate")
|
||||
.sort("_id", SortOrder.DESC) // secondary sort should kick in because both will collate to same value
|
||||
);
|
||||
|
@ -467,7 +467,7 @@ public class ICUCollationKeywordFieldMapperIT extends ESIntegTestCase {
|
|||
RuleBasedCollator tailoredCollator = new RuleBasedCollator(baseCollator.getRules() + DIN5007_2_tailorings);
|
||||
String tailoredRules = tailoredCollator.getRules();
|
||||
|
||||
String[] equilavent = {"Töne", "Toene"};
|
||||
String[] equivalent = {"Töne", "Toene"};
|
||||
|
||||
XContentBuilder builder = jsonBuilder()
|
||||
.startObject().startObject("properties")
|
||||
|
@ -481,8 +481,8 @@ public class ICUCollationKeywordFieldMapperIT extends ESIntegTestCase {
|
|||
assertAcked(client().admin().indices().prepareCreate(index).addMapping(type, builder));
|
||||
|
||||
indexRandom(true,
|
||||
client().prepareIndex(index, type, "1").setSource("{\"collate\":\"" + equilavent[0] + "\"}", XContentType.JSON),
|
||||
client().prepareIndex(index, type, "2").setSource("{\"collate\":\"" + equilavent[1] + "\"}", XContentType.JSON)
|
||||
client().prepareIndex(index, type, "1").setSource("{\"collate\":\"" + equivalent[0] + "\"}", XContentType.JSON),
|
||||
client().prepareIndex(index, type, "2").setSource("{\"collate\":\"" + equivalent[1] + "\"}", XContentType.JSON)
|
||||
);
|
||||
|
||||
SearchRequest request = new SearchRequest()
|
||||
|
@ -490,7 +490,7 @@ public class ICUCollationKeywordFieldMapperIT extends ESIntegTestCase {
|
|||
.types(type)
|
||||
.source(new SearchSourceBuilder()
|
||||
.fetchSource(false)
|
||||
.query(QueryBuilders.termQuery("collate", randomBoolean() ? equilavent[0] : equilavent[1]))
|
||||
.query(QueryBuilders.termQuery("collate", randomBoolean() ? equivalent[0] : equivalent[1]))
|
||||
.sort("collate", SortOrder.ASC)
|
||||
.sort("_id", SortOrder.DESC) // secondary sort should kick in because both will collate to same value
|
||||
);
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
# tests that the custom suggester works
|
||||
|
||||
# the issue that prompted serializing Suggestion as a registered named writeable was not revealed until
|
||||
# a user found that it would fail when reducing suggestions in a multi node envrionment
|
||||
# a user found that it would fail when reducing suggestions in a multi node environment
|
||||
# https://github.com/elastic/elasticsearch/issues/26585
|
||||
"test custom suggester":
|
||||
- do:
|
||||
|
|
|
@ -748,7 +748,7 @@ os_parsers:
|
|||
# possibility of false positive when different marketing names share same NT kernel
|
||||
# e.g. windows server 2003 and windows xp
|
||||
# lots of ua strings have Windows NT 4.1 !?!?!?!? !?!? !? !????!?! !!! ??? !?!?! ?
|
||||
# (very) roughly ordered in terms of frequency of occurence of regex (win xp currently most frequent, etc)
|
||||
# (very) roughly ordered in terms of frequency of occurrence of regex (win xp currently most frequent, etc)
|
||||
##########
|
||||
|
||||
# ie mobile desktop mode
|
||||
|
@ -2848,7 +2848,7 @@ device_parsers:
|
|||
device_replacement: 'Micromax $1'
|
||||
brand_replacement: 'Micromax'
|
||||
model_replacement: '$1'
|
||||
# be carefull here with Acer e.g. A500
|
||||
# be careful here with Acer e.g. A500
|
||||
- regex: '; *(A\d{2}|A[12]\d{2}|A90S|A110Q) Build'
|
||||
regex_flag: 'i'
|
||||
device_replacement: 'Micromax $1'
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
---
|
||||
"Create things in the cluster state that we'll validate are there after the ugprade":
|
||||
"Create things in the cluster state that we'll validate are there after the upgrade":
|
||||
- do:
|
||||
snapshot.create_repository:
|
||||
repository: my_repo
|
||||
|
|
|
@ -190,7 +190,7 @@ setup() {
|
|||
@test "[SYSTEMD] start Elasticsearch with custom JVM options" {
|
||||
assert_file_exist $ESENVFILE
|
||||
# The custom config directory is not under /tmp or /var/tmp because
|
||||
# systemd's private temp directory functionaly means different
|
||||
# systemd's private temp directory functionally means different
|
||||
# processes can have different views of what's in these directories
|
||||
local temp=`mktemp -p /etc -d`
|
||||
cp "$ESCONFIG"/elasticsearch.yml "$temp"
|
||||
|
|
|
@ -97,7 +97,7 @@ fi
|
|||
|
||||
rm -rf "$ESPLUGINS"
|
||||
# The custom plugins directory is not under /tmp or /var/tmp because
|
||||
# systemd's private temp directory functionaly means different
|
||||
# systemd's private temp directory functionally means different
|
||||
# processes can have different views of what's in these directories
|
||||
local es_plugins=$(mktemp -p /var -d -t 'plugins.XXXX')
|
||||
chown -R elasticsearch:elasticsearch "$es_plugins"
|
||||
|
|
|
@ -556,7 +556,7 @@ run_elasticsearch_tests() {
|
|||
move_config() {
|
||||
local oldConfig="$ESCONFIG"
|
||||
# The custom config directory is not under /tmp or /var/tmp because
|
||||
# systemd's private temp directory functionaly means different
|
||||
# systemd's private temp directory functionally means different
|
||||
# processes can have different views of what's in these directories
|
||||
export ESCONFIG="${1:-$(mktemp -p /etc -d -t 'config.XXXX')}"
|
||||
echo "Moving configuration directory from $oldConfig to $ESCONFIG"
|
||||
|
|
|
@ -328,7 +328,7 @@ public final class SearchPhaseController extends AbstractComponent {
|
|||
SearchPhaseResult searchResultProvider = resultsLookup.apply(shardDoc.shardIndex);
|
||||
if (searchResultProvider == null) {
|
||||
// this can happen if we are hitting a shard failure during the fetch phase
|
||||
// in this case we referenced the shard result via teh ScoreDoc but never got a
|
||||
// in this case we referenced the shard result via the ScoreDoc but never got a
|
||||
// result from fetch.
|
||||
// TODO it would be nice to assert this in the future
|
||||
continue;
|
||||
|
@ -380,7 +380,7 @@ public final class SearchPhaseController extends AbstractComponent {
|
|||
SearchPhaseResult fetchResultProvider = resultsLookup.apply(shardDoc.shardIndex);
|
||||
if (fetchResultProvider == null) {
|
||||
// this can happen if we are hitting a shard failure during the fetch phase
|
||||
// in this case we referenced the shard result via teh ScoreDoc but never got a
|
||||
// in this case we referenced the shard result via the ScoreDoc but never got a
|
||||
// result from fetch.
|
||||
// TODO it would be nice to assert this in the future
|
||||
continue;
|
||||
|
|
|
@ -84,7 +84,7 @@ final class Natives {
|
|||
*/
|
||||
static String getShortPathName(final String path) {
|
||||
if (!JNA_AVAILABLE) {
|
||||
logger.warn("cannot obtain short path for [{}] because JNA is not avilable", path);
|
||||
logger.warn("cannot obtain short path for [{}] because JNA is not available", path);
|
||||
return path;
|
||||
}
|
||||
return JNANatives.getShortPathName(path);
|
||||
|
@ -123,7 +123,7 @@ final class Natives {
|
|||
|
||||
static void trySetMaxSizeVirtualMemory() {
|
||||
if (!JNA_AVAILABLE) {
|
||||
logger.warn("cannot getrlimit RLIMIT_AS beacuse JNA is not available");
|
||||
logger.warn("cannot getrlimit RLIMIT_AS because JNA is not available");
|
||||
return;
|
||||
}
|
||||
JNANatives.trySetMaxSizeVirtualMemory();
|
||||
|
|
|
@ -367,7 +367,7 @@ public class IndexNameExpressionResolver extends AbstractComponent {
|
|||
}
|
||||
aliases.add(alias);
|
||||
} else {
|
||||
// If not, we have a non required alias for this index - no futher checking needed
|
||||
// If not, we have a non required alias for this index - no further checking needed
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -135,11 +135,11 @@ public class MetaDataIndexUpgradeService extends AbstractComponent {
|
|||
private void checkMappingsCompatibility(IndexMetaData indexMetaData) {
|
||||
try {
|
||||
|
||||
// We cannot instantiate real analysis server or similiarity service at this point because the node
|
||||
// We cannot instantiate real analysis server or similarity service at this point because the node
|
||||
// might not have been started yet. However, we don't really need real analyzers or similarities at
|
||||
// this stage - so we can fake it using constant maps accepting every key.
|
||||
// This is ok because all used similarities and analyzers for this index were known before the upgrade.
|
||||
// Missing analyzers and similarities plugin will still trigger the apropriate error during the
|
||||
// Missing analyzers and similarities plugin will still trigger the appropriate error during the
|
||||
// actual upgrade.
|
||||
|
||||
IndexSettings indexSettings = new IndexSettings(indexMetaData, this.settings);
|
||||
|
|
|
@ -200,7 +200,7 @@ public class TemplateUpgradeService extends AbstractComponent implements Cluster
|
|||
if (anyUpgradeFailed.get()) {
|
||||
logger.info("Templates were partially upgraded to version {}", Version.CURRENT);
|
||||
} else {
|
||||
logger.info("Templates were upgraded successfuly to version {}", Version.CURRENT);
|
||||
logger.info("Templates were upgraded successfully to version {}", Version.CURRENT);
|
||||
}
|
||||
// Check upgraders are satisfied after the update completed. If they still
|
||||
// report that changes are required, this might indicate a bug or that something
|
||||
|
|
|
@ -265,7 +265,7 @@ public class IndexShardRoutingTable implements Iterable<ShardRouting> {
|
|||
|
||||
/**
|
||||
* Returns an iterator over active and initializing shards, ordered by the adaptive replica
|
||||
* selection forumla. Making sure though that its random within the active shards of the same
|
||||
* selection formula. Making sure though that its random within the active shards of the same
|
||||
* (or missing) rank, and initializing shards are the last to iterate through.
|
||||
*/
|
||||
public ShardIterator activeInitializingShardsRankedIt(@Nullable ResponseCollectorService collector,
|
||||
|
|
|
@ -51,7 +51,7 @@ public interface DateMathParser {
|
|||
*
|
||||
* Examples are
|
||||
*
|
||||
* <code>2014-11-18||-2y</code> substracts two years from the input date
|
||||
* <code>2014-11-18||-2y</code> subtracts two years from the input date
|
||||
* <code>now/m</code> rounds the current time to minute granularity
|
||||
*
|
||||
* Supported rounding units are
|
||||
|
|
|
@ -41,7 +41,7 @@ public class PreConfiguredCharFilter extends PreConfiguredAnalysisComponent<Char
|
|||
}
|
||||
|
||||
/**
|
||||
* Create a pre-configured char filter that may not vary at all, provide access to the elasticsearch verison
|
||||
* Create a pre-configured char filter that may not vary at all, provide access to the elasticsearch version
|
||||
*/
|
||||
public static PreConfiguredCharFilter singletonWithVersion(String name, boolean useFilterForMultitermQueries,
|
||||
BiFunction<Reader, org.elasticsearch.Version, Reader> create) {
|
||||
|
|
|
@ -150,7 +150,7 @@ public abstract class AbstractBulkByScrollRequest<Self extends AbstractBulkByScr
|
|||
e = addValidationError("stored_fields is not supported in this context", e);
|
||||
}
|
||||
if (maxRetries < 0) {
|
||||
e = addValidationError("retries cannnot be negative", e);
|
||||
e = addValidationError("retries cannot be negative", e);
|
||||
}
|
||||
if (false == (size == -1 || size > 0)) {
|
||||
e = addValidationError(
|
||||
|
|
|
@ -791,7 +791,7 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
|
|||
+ "]";
|
||||
ensureWriteAllowed(origin);
|
||||
// When there is a single type, the unique identifier is only composed of the _id,
|
||||
// so there is no way to differenciate foo#1 from bar#1. This is especially an issue
|
||||
// so there is no way to differentiate foo#1 from bar#1. This is especially an issue
|
||||
// if a user first deletes foo#1 and then indexes bar#1: since we do not encode the
|
||||
// _type in the uid it might look like we are reindexing the same document, which
|
||||
// would fail if bar#1 is indexed with a lower version than foo#1 was deleted with.
|
||||
|
|
|
@ -1047,7 +1047,7 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref
|
|||
}
|
||||
|
||||
/**
|
||||
* returns the history uuid the store points at, or null if not existant.
|
||||
* returns the history uuid the store points at, or null if nonexistent.
|
||||
*/
|
||||
public String getHistoryUUID() {
|
||||
return commitUserData.get(Engine.HISTORY_UUID_KEY);
|
||||
|
|
|
@ -85,12 +85,12 @@ public class TranslogStats implements Streamable, ToXContentFragment {
|
|||
return numberOfOperations;
|
||||
}
|
||||
|
||||
/** the size of the generations in the translog that weren't yet to comitted to lucene */
|
||||
/** the size of the generations in the translog that weren't yet to committed to lucene */
|
||||
public long getUncommittedSizeInBytes() {
|
||||
return uncommittedSizeInBytes;
|
||||
}
|
||||
|
||||
/** the number of operations in generations of the translog that weren't yet to comitted to lucene */
|
||||
/** the number of operations in generations of the translog that weren't yet to committed to lucene */
|
||||
public int getUncommittedOperations() {
|
||||
return uncommittedOperations;
|
||||
}
|
||||
|
|
|
@ -50,7 +50,7 @@ public class FilterAggregatorFactory extends AggregatorFactory<FilterAggregatorF
|
|||
* Returns the {@link Weight} for this filter aggregation, creating it if
|
||||
* necessary. This is done lazily so that the {@link Weight} is only created
|
||||
* if the aggregation collects documents reducing the overhead of the
|
||||
* aggregation in teh case where no documents are collected.
|
||||
* aggregation in the case where no documents are collected.
|
||||
*
|
||||
* Note that as aggregations are initialsed and executed in a serial manner,
|
||||
* no concurrency considerations are necessary here.
|
||||
|
|
|
@ -53,7 +53,7 @@ public final class FragmentBuilderHelper {
|
|||
if (!fragInfo.getSubInfos().isEmpty() && containsBrokenAnalysis(fieldType.indexAnalyzer())) {
|
||||
/* This is a special case where broken analysis like WDF is used for term-vector creation at index-time
|
||||
* which can potentially mess up the offsets. To prevent a SAIIOBException we need to resort
|
||||
* the fragments based on their offsets rather than using soley the positions as it is done in
|
||||
* the fragments based on their offsets rather than using solely the positions as it is done in
|
||||
* the FastVectorHighlighter. Yet, this is really a lucene problem and should be fixed in lucene rather
|
||||
* than in this hack... aka. "we are are working on in!" */
|
||||
final List<SubInfo> subInfos = fragInfo.getSubInfos();
|
||||
|
|
|
@ -150,7 +150,7 @@ public interface Transport extends LifecycleComponent {
|
|||
}
|
||||
|
||||
/**
|
||||
* This class represents a response context that encapsulates the actual response handler, the action and the conneciton it was
|
||||
* This class represents a response context that encapsulates the actual response handler, the action and the connection it was
|
||||
* executed on.
|
||||
*/
|
||||
final class ResponseContext<T extends TransportResponse> {
|
||||
|
|
|
@ -149,7 +149,7 @@ public class BulkRequestTests extends ESTestCase {
|
|||
IllegalArgumentException exc = expectThrows(IllegalArgumentException.class,
|
||||
() -> bulkRequest.add(bulkAction.getBytes(StandardCharsets.UTF_8), 0, bulkAction.length(), null, null, XContentType.JSON));
|
||||
assertThat(exc.getMessage(),
|
||||
containsString("Malformed action/metadata line [5], expected a simple value for field [_unkown] but found [START_ARRAY]"));
|
||||
containsString("Malformed action/metadata line [5], expected a simple value for field [_unknown] but found [START_ARRAY]"));
|
||||
}
|
||||
|
||||
public void testSimpleBulk8() throws Exception {
|
||||
|
|
|
@ -1020,7 +1020,7 @@ public class TransportReplicationActionTests extends ESTestCase {
|
|||
// publish a new state (same as the old state with the version incremented)
|
||||
setState(clusterService, stateWithNodes);
|
||||
|
||||
// Assert that the request was retried, this time successfull
|
||||
// Assert that the request was retried, this time successful
|
||||
assertTrue("action should have been successfully called on retry but was not", calledSuccessfully.get());
|
||||
transportService.stop();
|
||||
}
|
||||
|
|
|
@ -599,7 +599,7 @@ public class OperationRoutingTests extends ESTestCase{
|
|||
collector.addNodeStatistics("node_1", 4, TimeValue.timeValueMillis(300).nanos(), TimeValue.timeValueMillis(250).nanos());
|
||||
groupIterator = opRouting.searchShards(state, indexNames, null, null, collector, outstandingRequests);
|
||||
shardChoice = groupIterator.get(0).nextOrNull();
|
||||
// finally, node 2 is choosen instead
|
||||
// finally, node 2 is chosen instead
|
||||
assertThat(shardChoice.currentNodeId(), equalTo("node_2"));
|
||||
|
||||
IOUtils.close(clusterService);
|
||||
|
|
|
@ -63,7 +63,7 @@ public class FilterAllocationDeciderTests extends ESAllocationTestCase {
|
|||
"node2").build());
|
||||
RoutingTable routingTable = state.routingTable();
|
||||
|
||||
// we can initally only allocate on node2
|
||||
// we can initially only allocate on node2
|
||||
assertEquals(routingTable.index("idx").shard(0).shards().get(0).state(), INITIALIZING);
|
||||
assertEquals(routingTable.index("idx").shard(0).shards().get(0).currentNodeId(), "node2");
|
||||
routingTable = service.applyFailedShard(state, routingTable.index("idx").shard(0).shards().get(0), randomBoolean()).routingTable();
|
||||
|
|
|
@ -37,7 +37,7 @@ public class MultiFieldCopyToMapperTests extends ESTestCase {
|
|||
public void testExceptionForCopyToInMultiFields() throws IOException {
|
||||
XContentBuilder mapping = createMappinmgWithCopyToInMultiField();
|
||||
|
||||
// first check that for newer versions we throw exception if copy_to is found withing multi field
|
||||
// first check that for newer versions we throw exception if copy_to is found within multi field
|
||||
MapperService mapperService = MapperTestUtils.newMapperService(xContentRegistry(), createTempDir(), Settings.EMPTY, "test");
|
||||
try {
|
||||
mapperService.parse("type", new CompressedXContent(Strings.toString(mapping)), true);
|
||||
|
|
|
@ -579,7 +579,7 @@ public class IndexShardIT extends ESSingleNodeTestCase {
|
|||
// Generate a couple of segments
|
||||
client().prepareIndex("test", "_doc", "1").setSource("{\"foo\":\"" + randomAlphaOfLength(100) + "\"}", XContentType.JSON)
|
||||
.setRefreshPolicy(IMMEDIATE).get();
|
||||
// Use routing so 2 documents are guarenteed to be on the same shard
|
||||
// Use routing so 2 documents are guaranteed to be on the same shard
|
||||
String routing = randomAlphaOfLength(5);
|
||||
client().prepareIndex("test", "_doc", "2").setSource("{\"foo\":\"" + randomAlphaOfLength(100) + "\"}", XContentType.JSON)
|
||||
.setRefreshPolicy(IMMEDIATE).setRouting(routing).get();
|
||||
|
|
|
@ -2323,7 +2323,7 @@ public class TranslogTests extends ESTestCase {
|
|||
@Override
|
||||
void deleteReaderFiles(TranslogReader reader) {
|
||||
if (fail.fail()) {
|
||||
// simulate going OOM and dieing just at the wrong moment.
|
||||
// simulate going OOM and dying just at the wrong moment.
|
||||
throw new RuntimeException("simulated");
|
||||
} else {
|
||||
super.deleteReaderFiles(reader);
|
||||
|
|
|
@ -69,7 +69,7 @@ public class ProcessProbeTests extends ESTestCase {
|
|||
|
||||
ProcessStats.Mem mem = stats.getMem();
|
||||
assertNotNull(mem);
|
||||
// Commited total virtual memory can return -1 if not supported, let's see which platforms fail
|
||||
// Committed total virtual memory can return -1 if not supported, let's see which platforms fail
|
||||
assertThat(mem.getTotalVirtual().getBytes(), greaterThan(0L));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -113,7 +113,7 @@ public class InternalScriptedMetricTests extends InternalAggregationTestCase<Int
|
|||
*/
|
||||
@Override
|
||||
protected ScriptService mockScriptService() {
|
||||
// mock script always retuns the size of the input aggs list as result
|
||||
// mock script always returns the size of the input aggs list as result
|
||||
@SuppressWarnings("unchecked")
|
||||
MockScriptEngine scriptEngine = new MockScriptEngine(MockScriptEngine.NAME,
|
||||
Collections.singletonMap(REDUCE_SCRIPT_NAME, script -> ((List<Object>) script.get("states")).size()),
|
||||
|
|
|
@ -170,7 +170,7 @@ public class InternalTopHitsTests extends InternalAggregationTestCase<InternalTo
|
|||
case STRING_VAL:
|
||||
return new BytesRef(randomAlphaOfLength(5));
|
||||
default:
|
||||
throw new UnsupportedOperationException("Unkown SortField.Type: " + type);
|
||||
throw new UnsupportedOperationException("Unknown SortField.Type: " + type);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -159,7 +159,7 @@ public class FieldSortBuilderTests extends AbstractSortTestCase<FieldSortBuilder
|
|||
}
|
||||
|
||||
/**
|
||||
* Test that missing values get transfered correctly to the SortField
|
||||
* Test that missing values get transferred correctly to the SortField
|
||||
*/
|
||||
public void testBuildSortFieldMissingValue() throws IOException {
|
||||
QueryShardContext shardContextMock = createMockShardContext();
|
||||
|
@ -190,7 +190,7 @@ public class FieldSortBuilderTests extends AbstractSortTestCase<FieldSortBuilder
|
|||
}
|
||||
|
||||
/**
|
||||
* Test that the sort builder order gets transfered correctly to the SortField
|
||||
* Test that the sort builder order gets transferred correctly to the SortField
|
||||
*/
|
||||
public void testBuildSortFieldOrder() throws IOException {
|
||||
QueryShardContext shardContextMock = createMockShardContext();
|
||||
|
@ -214,7 +214,7 @@ public class FieldSortBuilderTests extends AbstractSortTestCase<FieldSortBuilder
|
|||
}
|
||||
|
||||
/**
|
||||
* Test that the sort builder mode gets transfered correctly to the SortField
|
||||
* Test that the sort builder mode gets transferred correctly to the SortField
|
||||
*/
|
||||
public void testMultiValueMode() throws IOException {
|
||||
QueryShardContext shardContextMock = createMockShardContext();
|
||||
|
@ -249,7 +249,7 @@ public class FieldSortBuilderTests extends AbstractSortTestCase<FieldSortBuilder
|
|||
comparatorSource = (XFieldComparatorSource) sortField.getComparatorSource();
|
||||
assertEquals(MultiValueMode.MEDIAN, comparatorSource.sortMode());
|
||||
|
||||
// sort mode should also be set by build() implicitely to MIN or MAX if not set explicitely on builder
|
||||
// sort mode should also be set by build() implicitly to MIN or MAX if not set explicitly on builder
|
||||
sortBuilder = new FieldSortBuilder("value");
|
||||
sortField = sortBuilder.build(shardContextMock).field;
|
||||
assertThat(sortField, instanceOf(SortedNumericSortField.class));
|
||||
|
|
|
@ -445,7 +445,7 @@ public class GeoDistanceSortBuilderTests extends AbstractSortTestCase<GeoDistanc
|
|||
}
|
||||
|
||||
/**
|
||||
* Test that the sort builder order gets transfered correctly to the SortField
|
||||
* Test that the sort builder order gets transferred correctly to the SortField
|
||||
*/
|
||||
public void testBuildSortFieldOrder() throws IOException {
|
||||
QueryShardContext shardContextMock = createMockShardContext();
|
||||
|
@ -460,7 +460,7 @@ public class GeoDistanceSortBuilderTests extends AbstractSortTestCase<GeoDistanc
|
|||
}
|
||||
|
||||
/**
|
||||
* Test that the sort builder mode gets transfered correctly to the SortField
|
||||
* Test that the sort builder mode gets transferred correctly to the SortField
|
||||
*/
|
||||
public void testMultiValueMode() throws IOException {
|
||||
QueryShardContext shardContextMock = createMockShardContext();
|
||||
|
|
|
@ -267,7 +267,7 @@ public class ScriptSortBuilderTests extends AbstractSortTestCase<ScriptSortBuild
|
|||
}
|
||||
|
||||
/**
|
||||
* Test that the sort builder mode gets transfered correctly to the SortField
|
||||
* Test that the sort builder mode gets transferred correctly to the SortField
|
||||
*/
|
||||
public void testMultiValueMode() throws IOException {
|
||||
QueryShardContext shardContextMock = createMockShardContext();
|
||||
|
|
|
@ -2,5 +2,5 @@
|
|||
{"field1": "value0"}
|
||||
{"index": {"_index": "test", "_type": "doc", "_id": 1}}
|
||||
{"field1": "value1"}
|
||||
{"index": {"_index": "test", "_type": "doc", "_id": 2, "_unkown": ["foo", "bar"]}}
|
||||
{"index": {"_index": "test", "_type": "doc", "_id": 2, "_unknown": ["foo", "bar"]}}
|
||||
{"field1": "value2"}
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
"person":{
|
||||
"dynamic_templates":[
|
||||
{
|
||||
"tempalte_1":{
|
||||
"template_1":{
|
||||
"match":"multi*",
|
||||
"mapping":{
|
||||
"type":"{dynamic_type}",
|
||||
|
|
|
@ -43,7 +43,7 @@ public class TestThreadInfoPatternConverterTests extends ESTestCase {
|
|||
// Test threads get the test name
|
||||
assertEquals(getTestName(), threadInfo(Thread.currentThread().getName()));
|
||||
|
||||
// Suite initalization gets "suite"
|
||||
// Suite initialization gets "suite"
|
||||
assertEquals("suite", suiteInfo);
|
||||
|
||||
// And stuff that doesn't match anything gets wrapped in [] so we can see it
|
||||
|
|
|
@ -482,7 +482,7 @@ public class DoSectionTests extends AbstractClientYamlTestFragmentParserTestCase
|
|||
" type: test_type\n" +
|
||||
"warnings:\n" +
|
||||
" - some test warning they are typically pretty long\n" +
|
||||
" - some other test warning somtimes they have [in] them"
|
||||
" - some other test warning sometimes they have [in] them"
|
||||
);
|
||||
|
||||
DoSection doSection = DoSection.parse(parser);
|
||||
|
@ -496,7 +496,7 @@ public class DoSectionTests extends AbstractClientYamlTestFragmentParserTestCase
|
|||
assertThat(doSection.getApiCallSection().getBodies().size(), equalTo(0));
|
||||
assertThat(doSection.getExpectedWarningHeaders(), equalTo(Arrays.asList(
|
||||
"some test warning they are typically pretty long",
|
||||
"some other test warning somtimes they have [in] them")));
|
||||
"some other test warning sometimes they have [in] them")));
|
||||
|
||||
parser = createParser(YamlXContent.yamlXContent,
|
||||
"indices.get_field_mapping:\n" +
|
||||
|
|
|
@ -99,7 +99,7 @@ public class InternalTestClusterTests extends ESTestCase {
|
|||
|
||||
/**
|
||||
* a set of settings that are expected to have different values betweem clusters, even they have been initialized with the same
|
||||
* base settins.
|
||||
* base settings.
|
||||
*/
|
||||
static final Set<String> clusterUniqueSettings = new HashSet<>();
|
||||
|
||||
|
|
|
@ -61,7 +61,7 @@ configuring the `http` attachment type, you must specify the request URL. The
|
|||
`reporting` attachment type is a special type to include PDF rendered dashboards
|
||||
from kibana. This type is consistently polling the kibana app if the dashboard
|
||||
rendering is done, preventing long running HTTP connections, that are potentially
|
||||
killed by firewalls or load balancers inbetween.
|
||||
killed by firewalls or load balancers in-between.
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
|
|
|
@ -35,7 +35,7 @@ import java.util.List;
|
|||
* <p>
|
||||
* Optionally, each hop can contain a "guiding query" that further limits the set of documents considered.
|
||||
* In our weblog example above we might choose to constrain the second hop to only look at log records that
|
||||
* had a reponse code of 404.
|
||||
* had a response code of 404.
|
||||
* </p>
|
||||
* <p>
|
||||
* If absent, the list of {@link VertexRequest}s is inherited from the prior Hop's list to avoid repeating
|
||||
|
|
|
@ -123,7 +123,7 @@ public class XPackPlugin extends XPackClientPlugin implements ScriptPlugin, Exte
|
|||
//private final Environment env;
|
||||
protected boolean transportClientMode;
|
||||
protected final Licensing licensing;
|
||||
// These should not be directly accessed as they cannot be overriden in tests. Please use the getters so they can be overridden.
|
||||
// These should not be directly accessed as they cannot be overridden in tests. Please use the getters so they can be overridden.
|
||||
private static final SetOnce<XPackLicenseState> licenseState = new SetOnce<>();
|
||||
private static final SetOnce<SSLService> sslService = new SetOnce<>();
|
||||
private static final SetOnce<LicenseService> licenseService = new SetOnce<>();
|
||||
|
|
|
@ -385,7 +385,7 @@ public class Detector implements ToXContentObject, Writeable {
|
|||
}
|
||||
|
||||
/**
|
||||
* Excludes frequently-occuring metrics from the analysis;
|
||||
* Excludes frequently-occurring metrics from the analysis;
|
||||
* can apply to 'by' field, 'over' field, or both
|
||||
*
|
||||
* @return the value that the user set
|
||||
|
|
|
@ -106,7 +106,7 @@ public abstract class WatchRecord implements ToXContentObject {
|
|||
}
|
||||
if (executionResult.conditionResult().met()) {
|
||||
final Collection<ActionWrapperResult> values = executionResult.actionsResults().values();
|
||||
// acknowledged as state wins because the user had explicitely set this, where as throttled may happen due to execution
|
||||
// acknowledged as state wins because the user had explicitly set this, where as throttled may happen due to execution
|
||||
if (values.stream().anyMatch((r) -> r.action().status() == Action.Result.Status.ACKNOWLEDGED)) {
|
||||
return ExecutionState.ACKNOWLEDGED;
|
||||
} else if (values.stream().anyMatch((r) -> r.action().status() == Action.Result.Status.THROTTLED)) {
|
||||
|
|
|
@ -193,7 +193,7 @@ public class TransportGraphExploreAction extends HandledTransportAction<GraphExp
|
|||
// A single sample pool of docs is built at the root of the aggs tree.
|
||||
// For quality's sake it might have made more sense to sample top docs
|
||||
// for each of the terms from the previous hop (e.g. an initial query for "beatles"
|
||||
// may have seperate doc-sample pools for significant root terms "john", "paul", "yoko" etc)
|
||||
// may have separate doc-sample pools for significant root terms "john", "paul", "yoko" etc)
|
||||
// but I found this dramatically slowed down execution - each pool typically had different docs which
|
||||
// each had non-overlapping sets of terms that needed frequencies looking up for significant terms.
|
||||
// A common sample pool reduces the specialization that can be given to each root term but
|
||||
|
|
|
@ -11,7 +11,7 @@ import org.elasticsearch.test.ESSingleNodeTestCase;
|
|||
import org.elasticsearch.xpack.core.ml.MachineLearningField;
|
||||
|
||||
/**
|
||||
* An extention to {@link ESSingleNodeTestCase} that adds node settings specifically needed for ML test cases.
|
||||
* An extension to {@link ESSingleNodeTestCase} that adds node settings specifically needed for ML test cases.
|
||||
*/
|
||||
public abstract class MlSingleNodeTestCase extends ESSingleNodeTestCase {
|
||||
|
||||
|
|
|
@ -184,7 +184,7 @@ public class BasicDistributedJobsIT extends BaseMlIntegTestCase {
|
|||
@TestLogging("org.elasticsearch.xpack.persistent:TRACE,org.elasticsearch.cluster.service:DEBUG,org.elasticsearch.xpack.ml.action:DEBUG")
|
||||
public void testDedicatedMlNode() throws Exception {
|
||||
internalCluster().ensureAtMostNumDataNodes(0);
|
||||
// start 2 non ml node that will never get a job allocated. (but ml apis are accessable from this node)
|
||||
// start 2 non ml node that will never get a job allocated. (but ml apis are accessible from this node)
|
||||
internalCluster().startNode(Settings.builder().put(MachineLearning.ML_ENABLED.getKey(), false));
|
||||
internalCluster().startNode(Settings.builder().put(MachineLearning.ML_ENABLED.getKey(), false));
|
||||
// start ml node
|
||||
|
|
|
@ -262,7 +262,7 @@ public class RollupJobTask extends AllocatedPersistentTask implements SchedulerE
|
|||
updatePersistentTaskState(state,
|
||||
ActionListener.wrap(
|
||||
(task) -> {
|
||||
logger.debug("Succesfully updated state for rollup job [" + job.getConfig().getId() + "] to ["
|
||||
logger.debug("Successfully updated state for rollup job [" + job.getConfig().getId() + "] to ["
|
||||
+ state.getIndexerState() + "][" + state.getPosition() + "]");
|
||||
listener.onResponse(new StartRollupJobAction.Response(true));
|
||||
},
|
||||
|
@ -308,7 +308,7 @@ public class RollupJobTask extends AllocatedPersistentTask implements SchedulerE
|
|||
updatePersistentTaskState(state,
|
||||
ActionListener.wrap(
|
||||
(task) -> {
|
||||
logger.debug("Succesfully updated state for rollup job [" + job.getConfig().getId()
|
||||
logger.debug("Successfully updated state for rollup job [" + job.getConfig().getId()
|
||||
+ "] to [" + state.getIndexerState() + "]");
|
||||
listener.onResponse(new StopRollupJobAction.Response(true));
|
||||
},
|
||||
|
|
|
@ -202,7 +202,7 @@ public class RollupJobTaskTests extends ESTestCase {
|
|||
} else if (c == 1) {
|
||||
assertThat(((RollupJobStatus) taskState).getIndexerState(), equalTo(IndexerState.STOPPED));
|
||||
} else {
|
||||
fail("Should not have updated persistent statuse > 2 times");
|
||||
fail("Should not have updated persistent statuses > 2 times");
|
||||
}
|
||||
listener.onResponse(new PersistentTasksCustomMetaData.PersistentTask<>("foo", RollupField.TASK_NAME, job, 1,
|
||||
new PersistentTasksCustomMetaData.Assignment("foo", "foo")));
|
||||
|
@ -688,7 +688,7 @@ public class RollupJobTaskTests extends ESTestCase {
|
|||
} else if (c == 2) {
|
||||
assertThat(((RollupJobStatus) taskState).getIndexerState(), equalTo(IndexerState.STOPPED));
|
||||
} else {
|
||||
fail("Should not have updated persistent statuse > 3 times");
|
||||
fail("Should not have updated persistent statuses > 3 times");
|
||||
}
|
||||
listener.onResponse(new PersistentTasksCustomMetaData.PersistentTask<>("foo", RollupField.TASK_NAME, job, 1,
|
||||
new PersistentTasksCustomMetaData.Assignment("foo", "foo")));
|
||||
|
|
|
@ -115,7 +115,7 @@ public class LdapSessionFactory extends SessionFactory {
|
|||
* Securely escapes the username and inserts it into the template using MessageFormat
|
||||
*
|
||||
* @param username username to insert into the DN template. Any commas, equals or plus will be escaped.
|
||||
* @return DN (distinquished name) build from the template.
|
||||
* @return DN (distinguished name) build from the template.
|
||||
*/
|
||||
String buildDnFromTemplate(String username, String template) {
|
||||
//this value must be escaped to avoid manipulation of the template DN.
|
||||
|
|
|
@ -151,7 +151,7 @@ public interface UserRoleMapper {
|
|||
* {@link ExpressionModel} class can take a custom {@code Predicate} that tests whether the data in the model
|
||||
* matches the {@link FieldExpression.FieldValue value} in the expression.
|
||||
*
|
||||
* The string constructor parameter may or may not actaully parse as a DN - the "dn" field <em>should</em>
|
||||
* The string constructor parameter may or may not actually parse as a DN - the "dn" field <em>should</em>
|
||||
* always be a DN, however groups will be a DN if they're from an LDAP/AD realm, but often won't be for a SAML realm.
|
||||
*
|
||||
* Because the {@link FieldExpression.FieldValue} might be a pattern ({@link CharacterRunAutomaton automaton}),
|
||||
|
|
|
@ -53,7 +53,7 @@ public interface ServerTransportFilter {
|
|||
throws IOException;
|
||||
|
||||
/**
|
||||
* The server trasnport filter that should be used in nodes as it ensures that an incoming
|
||||
* The server transport filter that should be used in nodes as it ensures that an incoming
|
||||
* request is properly authenticated and authorized
|
||||
*/
|
||||
class NodeProfile implements ServerTransportFilter {
|
||||
|
|
|
@ -31,7 +31,7 @@ import static org.hamcrest.Matchers.not;
|
|||
/**
|
||||
* This test ensures, that the plugin template upgrader can add and remove
|
||||
* templates when started within security, as this requires certain
|
||||
* system priviliges
|
||||
* system privileges
|
||||
*/
|
||||
@ClusterScope(maxNumDataNodes = 1, scope = Scope.SUITE, numClientNodes = 0)
|
||||
public class TemplateUpgraderTests extends SecurityIntegTestCase {
|
||||
|
|
|
@ -40,7 +40,7 @@ public class LdapMetaDataResolverTests extends ESTestCase {
|
|||
new Attribute("cn", "Clint Barton"),
|
||||
new Attribute("uid", "hawkeye"),
|
||||
new Attribute("email", "clint.barton@shield.gov"),
|
||||
new Attribute("memberOf", "cn=staff,ou=groups,dc=exmaple,dc=com", "cn=admin,ou=groups,dc=exmaple,dc=com")
|
||||
new Attribute("memberOf", "cn=staff,ou=groups,dc=example,dc=com", "cn=admin,ou=groups,dc=example,dc=com")
|
||||
);
|
||||
final Map<String, Object> map = resolve(attributes);
|
||||
assertThat(map.size(), equalTo(2));
|
||||
|
|
|
@ -1636,7 +1636,7 @@ public class SamlAuthenticatorTests extends SamlTestCase {
|
|||
/*
|
||||
Permutation 7 - Mangle the contents of the response to be
|
||||
<Response>
|
||||
<Extentions>
|
||||
<Extensions>
|
||||
<ForgedAssertion><?ForgedAssertion>
|
||||
<LegitimateAssertion>
|
||||
<LegitimateAssertionSignature></LegitimateAssertionSignature>
|
||||
|
@ -1645,16 +1645,16 @@ public class SamlAuthenticatorTests extends SamlTestCase {
|
|||
*/
|
||||
final Element response = (Element) legitimateDocument.
|
||||
getElementsByTagNameNS(SAML20P_NS, "Response").item(0);
|
||||
final Element extentions = legitimateDocument.createElement("Extensions");
|
||||
final Element extensions = legitimateDocument.createElement("Extensions");
|
||||
final Element assertion = (Element) legitimateDocument.
|
||||
getElementsByTagNameNS(SAML20_NS, "Assertion").item(0);
|
||||
response.insertBefore(extentions, assertion);
|
||||
response.insertBefore(extensions, assertion);
|
||||
final Element forgedAssertion = (Element) assertion.cloneNode(true);
|
||||
forgedAssertion.setAttribute("ID", "_forged_assertion_id");
|
||||
final Element forgedSignature = (Element) forgedAssertion.
|
||||
getElementsByTagNameNS("http://www.w3.org/2000/09/xmldsig#", "Signature").item(0);
|
||||
forgedAssertion.removeChild(forgedSignature);
|
||||
extentions.appendChild(forgedAssertion);
|
||||
extensions.appendChild(forgedAssertion);
|
||||
final SamlToken forgedToken = token(SamlUtils.toString((legitimateDocument.getDocumentElement())));
|
||||
final ElasticsearchSecurityException exception = expectSamlException(() -> authenticator.authenticate(forgedToken));
|
||||
assertThat(exception.getMessage(), containsString("Failed to parse SAML"));
|
||||
|
|
|
@ -121,7 +121,7 @@ public class IpFilteringUpdateTests extends SecurityIntegTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
// issue #762, occured because in the above test we use HTTP and transport
|
||||
// issue #762, occurred because in the above test we use HTTP and transport
|
||||
public void testThatDisablingIpFilterWorksAsExpected() throws Exception {
|
||||
Settings settings = Settings.builder()
|
||||
.put("xpack.security.transport.filter.deny", "127.0.0.8")
|
||||
|
|
|
@ -23,7 +23,7 @@ role3:
|
|||
cluster: ALL
|
||||
indices: '*': ALL
|
||||
|
||||
# invalid role indices privilegs
|
||||
# invalid role indices privileges
|
||||
role4:
|
||||
cluster: ALL
|
||||
indices:
|
||||
|
|
|
@ -43,7 +43,7 @@ import javax.sql.DataSource;
|
|||
* This class tries to cater to both audiences - use the legacy, Writer way if needed though strive to use the
|
||||
* proper typical approach, that of specifying intention and output (file) in the URL.
|
||||
*
|
||||
* For this reason the {@link System#out} and {@link System#err} are being refered in this class though are used only
|
||||
* For this reason the {@link System#out} and {@link System#err} are being referred in this class though are used only
|
||||
* when needed.
|
||||
*/
|
||||
public final class Debug {
|
||||
|
|
|
@ -23,7 +23,7 @@ public class FunctionDefinition {
|
|||
private final List<String> aliases;
|
||||
private final Class<? extends Function> clazz;
|
||||
/**
|
||||
* Is this a datetime function comaptible with {@code EXTRACT}.
|
||||
* Is this a datetime function compatible with {@code EXTRACT}.
|
||||
*/
|
||||
private final boolean datetime;
|
||||
private final Builder builder;
|
||||
|
@ -60,7 +60,7 @@ public class FunctionDefinition {
|
|||
}
|
||||
|
||||
/**
|
||||
* Is this a datetime function comaptible with {@code EXTRACT}.
|
||||
* Is this a datetime function compatible with {@code EXTRACT}.
|
||||
*/
|
||||
boolean datetime() {
|
||||
return datetime;
|
||||
|
|
|
@ -15,7 +15,7 @@ abstract class StringFunctionUtils {
|
|||
*
|
||||
* @param s the original String
|
||||
* @param start starting position for the substring within the original string. 0-based index position
|
||||
* @param length length in characters of the substracted substring
|
||||
* @param length length in characters of the subtracted substring
|
||||
* @return the resulting String
|
||||
*/
|
||||
static String substring(String s, int start, int length) {
|
||||
|
|
|
@ -291,7 +291,7 @@ abstract class ExpressionBuilder extends IdentifierBuilder {
|
|||
}
|
||||
return new Neg(source(ctx.operator), value);
|
||||
default:
|
||||
throw new ParsingException(loc, "Unknown arithemtic {}", ctx.operator.getText());
|
||||
throw new ParsingException(loc, "Unknown arithmetic {}", ctx.operator.getText());
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -314,7 +314,7 @@ abstract class ExpressionBuilder extends IdentifierBuilder {
|
|||
case SqlBaseParser.MINUS:
|
||||
return new Sub(loc, left, right);
|
||||
default:
|
||||
throw new ParsingException(loc, "Unknown arithemtic {}", ctx.operator.getText());
|
||||
throw new ParsingException(loc, "Unknown arithmetic {}", ctx.operator.getText());
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -172,7 +172,7 @@ setup:
|
|||
"job_id":"datafeeds-crud-1",
|
||||
"indexes":["index-foo"],
|
||||
"types":["type-bar"],
|
||||
"query":{"match_all_mispelled":{}}
|
||||
"query":{"match_all_misspelled":{}}
|
||||
}
|
||||
|
||||
---
|
||||
|
|
|
@ -34,7 +34,7 @@ setup:
|
|||
# read the monitoring indices.
|
||||
- do:
|
||||
xpack.security.put_role:
|
||||
name: "unkown_agent_role"
|
||||
name: "unknown_agent_role"
|
||||
body: >
|
||||
{
|
||||
"cluster": ["monitor"],
|
||||
|
@ -51,7 +51,7 @@ setup:
|
|||
body: >
|
||||
{
|
||||
"password": "s3krit",
|
||||
"roles" : [ "unkown_agent_role" ]
|
||||
"roles" : [ "unknown_agent_role" ]
|
||||
}
|
||||
|
||||
---
|
||||
|
@ -70,7 +70,7 @@ teardown:
|
|||
ignore: 404
|
||||
- do:
|
||||
xpack.security.delete_role:
|
||||
name: "unkown_agent_role"
|
||||
name: "unknown_agent_role"
|
||||
ignore: 404
|
||||
|
||||
---
|
||||
|
|
|
@ -177,7 +177,7 @@ setup:
|
|||
- is_true: acknowledged
|
||||
|
||||
---
|
||||
"Test delete non-existant job":
|
||||
"Test delete non-existent job":
|
||||
|
||||
- do:
|
||||
catch: /the task with id does_not_exist doesn't exist/
|
||||
|
|
|
@ -37,7 +37,7 @@ setup:
|
|||
}
|
||||
|
||||
---
|
||||
"Test start non-existant job":
|
||||
"Test start nonexistent job":
|
||||
|
||||
- do:
|
||||
catch: /Task for Rollup Job \[does_not_exist\] not found/
|
||||
|
|
|
@ -37,7 +37,7 @@ setup:
|
|||
}
|
||||
|
||||
---
|
||||
"Test stop non-existant job":
|
||||
"Test stop nonexistent job":
|
||||
|
||||
- do:
|
||||
catch: /Task for Rollup Job \[does_not_exist\] not found/
|
||||
|
|
|
@ -91,7 +91,7 @@ teardown:
|
|||
ignore: 404
|
||||
|
||||
---
|
||||
"Test shared index seperating user by using DLS":
|
||||
"Test shared index separating user by using DLS":
|
||||
- do:
|
||||
headers:
|
||||
Authorization: "Basic am9lOngtcGFjay10ZXN0LXBhc3N3b3Jk"
|
||||
|
|
|
@ -12,7 +12,7 @@ teardown:
|
|||
ignore: 404
|
||||
|
||||
---
|
||||
"Ensure that ack status is reset after unsuccesful execution":
|
||||
"Ensure that ack status is reset after unsuccessful execution":
|
||||
|
||||
- do:
|
||||
xpack.watcher.put_watch:
|
||||
|
|
|
@ -47,7 +47,7 @@ public class IndexUpgradeIT extends IndexUpgradeIntegTestCase {
|
|||
|
||||
public void testIndexUpgradeInfoLicense() throws Exception {
|
||||
// This test disables all licenses and generates a new one using dev private key
|
||||
// in non-snapshot builds we are using produciton public key for license verification
|
||||
// in non-snapshot builds we are using production public key for license verification
|
||||
// which makes this test to fail
|
||||
assumeTrue("License is only valid when tested against snapshot/test keys", Build.CURRENT.isSnapshot());
|
||||
assertAcked(client().admin().indices().prepareCreate("test").get());
|
||||
|
|
|
@ -143,7 +143,7 @@ final class WatcherIndexingListener extends AbstractComponent implements Indexin
|
|||
*
|
||||
* @param shardId The shard id object of the document being processed
|
||||
* @param index The index operation
|
||||
* @param ex The exception occured during indexing
|
||||
* @param ex The exception occurred during indexing
|
||||
*/
|
||||
@Override
|
||||
public void postIndex(ShardId shardId, Engine.Index index, Exception ex) {
|
||||
|
|
|
@ -122,7 +122,7 @@ public class ExecutionService extends AbstractComponent {
|
|||
|
||||
/**
|
||||
* Pause the execution of the watcher executor, and empty the state.
|
||||
* Pausing means, that no new watch executions will be done unless this pausing is explicitely unset.
|
||||
* Pausing means, that no new watch executions will be done unless this pausing is explicitly unset.
|
||||
* This is important when watcher is stopped, so that scheduled watches do not accidentally get executed.
|
||||
* This should not be used when we need to reload watcher based on some cluster state changes, then just calling
|
||||
* {@link #clearExecutionsAndQueue()} is the way to go
|
||||
|
@ -338,7 +338,7 @@ public class ExecutionService extends AbstractComponent {
|
|||
public void updateWatchStatus(Watch watch) throws IOException {
|
||||
// at the moment we store the status together with the watch,
|
||||
// so we just need to update the watch itself
|
||||
// we do not want to update the status.state field, as it might have been deactivated inbetween
|
||||
// we do not want to update the status.state field, as it might have been deactivated in-between
|
||||
Map<String, String> parameters = MapBuilder.<String, String>newMapBuilder()
|
||||
.put(Watch.INCLUDE_STATUS_KEY, "true")
|
||||
.put(WatchStatus.INCLUDE_STATE, "false")
|
||||
|
|
|
@ -117,7 +117,7 @@ public class SlackActionTests extends ESTestCase {
|
|||
hasError = true;
|
||||
break;
|
||||
case 1:
|
||||
when(response.status()).thenReturn(randomIntBetween(300, 600)); // error reponse
|
||||
when(response.status()).thenReturn(randomIntBetween(300, 600)); // error response
|
||||
messages.add(SentMessages.SentMessage.responded(randomAlphaOfLength(10), message, request, response));
|
||||
hasError = true;
|
||||
break;
|
||||
|
|
|
@ -522,7 +522,7 @@ public class HttpClientTests extends ESTestCase {
|
|||
});
|
||||
HttpRequest request = HttpRequest.builder("localhost", serverSocket.getLocalPort()).path("/").build();
|
||||
expectThrows(ClientProtocolException.class, () -> httpClient.execute(request));
|
||||
assertThat("A server side exception occured, but shouldn't", hasExceptionHappened.get(), is(nullValue()));
|
||||
assertThat("A server side exception occurred, but shouldn't", hasExceptionHappened.get(), is(nullValue()));
|
||||
} finally {
|
||||
terminate(executor);
|
||||
}
|
||||
|
|
|
@ -83,7 +83,7 @@ public class DeleteWatchTests extends AbstractWatcherIntegrationTestCase {
|
|||
// watch has been executed successfully
|
||||
String state = ObjectPath.eval("state", source);
|
||||
assertThat(state, is("executed"));
|
||||
// no exception occured
|
||||
// no exception occurred
|
||||
assertThat(source, not(hasKey("exception")));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -105,7 +105,7 @@ public class ExecuteWatchTests extends AbstractWatcherIntegrationTestCase {
|
|||
}
|
||||
|
||||
if (mode.force()) {
|
||||
// since we're forcing, lets ack the action, such that it'd suppoed to be throttled
|
||||
// since we're forcing, lets ack the action, such that it'd supposed to be throttled
|
||||
// but forcing will ignore the throttling
|
||||
|
||||
// lets wait for the watch to be ackable
|
||||
|
|
|
@ -62,7 +62,7 @@ teardown:
|
|||
ignore: 404
|
||||
|
||||
---
|
||||
"Test shared index seperating user by using DLS role query with user's username":
|
||||
"Test shared index separating user by using DLS role query with user's username":
|
||||
- do:
|
||||
xpack.security.put_role:
|
||||
name: "small_companies_role"
|
||||
|
@ -130,7 +130,7 @@ teardown:
|
|||
- match: { hits.hits.0._source.user.username: john}
|
||||
|
||||
---
|
||||
"Test shared index seperating user by using DLS role query with user's metadata":
|
||||
"Test shared index separating user by using DLS role query with user's metadata":
|
||||
- do:
|
||||
xpack.security.put_role:
|
||||
name: "small_companies_role"
|
||||
|
|
|
@ -213,7 +213,7 @@ public class RestSqlSecurityIT extends SqlSecurityTestCase {
|
|||
/**
|
||||
* Test the hijacking a scroll fails. This test is only implemented for
|
||||
* REST because it is the only API where it is simple to hijack a scroll.
|
||||
* It should excercise the same code as the other APIs but if we were truly
|
||||
* It should exercise the same code as the other APIs but if we were truly
|
||||
* paranoid we'd hack together something to test the others as well.
|
||||
*/
|
||||
public void testHijackScrollFails() throws Exception {
|
||||
|
|
|
@ -264,7 +264,7 @@ public abstract class SqlSecurityTestCase extends ESRestTestCase {
|
|||
createAuditLogAsserter()
|
||||
.expectSqlCompositeAction("test_admin", "test")
|
||||
/* Scrolling doesn't have to access the index again, at least not through sql.
|
||||
* If we asserted query and scroll logs then we would see the scoll. */
|
||||
* If we asserted query and scroll logs then we would see the scroll. */
|
||||
.expect(true, SQL_ACTION_NAME, "test_admin", empty())
|
||||
.expect(true, SQL_ACTION_NAME, "test_admin", empty())
|
||||
.expectSqlCompositeAction("only_a", "test")
|
||||
|
|
Loading…
Reference in New Issue