Merge branch 'master' into line
This commit is contained in:
commit
9b2b4744f5
|
@ -418,8 +418,7 @@ class ClusterFormationTasks {
|
|||
// argument are wrapped in an ExecArgWrapper that escapes commas
|
||||
args execArgs.collect { a -> new EscapeCommaWrapper(arg: a) }
|
||||
} else {
|
||||
executable 'sh'
|
||||
args execArgs
|
||||
commandLine execArgs
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -335,7 +335,7 @@ final class BootstrapCheck {
|
|||
@Override
|
||||
public String errorMessage() {
|
||||
return "please set [" + ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey() +
|
||||
"] to a majority of the number of master eligible nodes in your cluster.";
|
||||
"] to a majority of the number of master eligible nodes in your cluster";
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -48,9 +48,9 @@ public final class FingerprintAnalyzer extends Analyzer {
|
|||
final Tokenizer tokenizer = new StandardTokenizer();
|
||||
TokenStream stream = tokenizer;
|
||||
stream = new LowerCaseFilter(stream);
|
||||
stream = new ASCIIFoldingFilter(stream, preserveOriginal);
|
||||
stream = new StopFilter(stream, stopWords);
|
||||
stream = new FingerprintFilter(stream, maxOutputSize, separator);
|
||||
stream = new ASCIIFoldingFilter(stream, preserveOriginal);
|
||||
return new TokenStreamComponents(tokenizer, stream);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -43,12 +43,15 @@ public class FingerprintAnalyzerTests extends ESTokenStreamTestCase {
|
|||
Analyzer a = new FingerprintAnalyzer(CharArraySet.EMPTY_SET, ' ', 255, false);
|
||||
assertAnalyzesTo(a, "gödel escher bach",
|
||||
new String[]{"bach escher godel"});
|
||||
|
||||
assertAnalyzesTo(a, "gödel godel escher bach",
|
||||
new String[]{"bach escher godel"});
|
||||
}
|
||||
|
||||
public void testPreserveOriginal() throws Exception {
|
||||
Analyzer a = new FingerprintAnalyzer(CharArraySet.EMPTY_SET, ' ', 255, true);
|
||||
assertAnalyzesTo(a, "gödel escher bach",
|
||||
new String[]{"bach escher godel", "bach escher gödel"});
|
||||
new String[]{"bach escher godel gödel"});
|
||||
}
|
||||
|
||||
public void testLimit() throws Exception {
|
||||
|
@ -65,4 +68,5 @@ public class FingerprintAnalyzerTests extends ESTokenStreamTestCase {
|
|||
assertAnalyzesTo(a, "b c a",
|
||||
new String[]{"a_b_c"});
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -17,11 +17,11 @@ It consists of:
|
|||
Tokenizer::
|
||||
* <<analysis-standard-tokenizer,Standard Tokenizer>>
|
||||
|
||||
Token Filters::
|
||||
* <<analysis-lowercase-tokenfilter,Lower Case Token Filter>>
|
||||
* <<analysis-stop-tokenfilter,Stop Token Filter>> (disabled by default)
|
||||
* <<analysis-fingerprint-tokenfilter>>
|
||||
* <<analysis-asciifolding-tokenfilter>>
|
||||
Token Filters (in order)::
|
||||
1. <<analysis-lowercase-tokenfilter,Lower Case Token Filter>>
|
||||
2. <<analysis-asciifolding-tokenfilter>>
|
||||
3. <<analysis-stop-tokenfilter,Stop Token Filter>> (disabled by default)
|
||||
4. <<analysis-fingerprint-tokenfilter>>
|
||||
|
||||
[float]
|
||||
=== Example output
|
||||
|
@ -68,7 +68,6 @@ The `fingerprint` analyzer accepts the following parameters:
|
|||
|
||||
A pre-defined stop words list like `_english_` or an array containing a
|
||||
list of stop words. Defaults to `_none_`.
|
||||
|
||||
`stopwords_path`::
|
||||
|
||||
The path to a file containing stop words.
|
||||
|
|
|
@ -174,7 +174,7 @@ POST _reindex
|
|||
}
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
// TEST[s/^/PUT twitter\nPUT blog\n/]
|
||||
// TEST[s/^/PUT twitter\nPUT blog\nGET _cluster\/health?wait_for_status=yellow\n/]
|
||||
|
||||
It's also possible to limit the number of processed documents by setting
|
||||
`size`. This will only copy a single document from `twitter` to
|
||||
|
@ -299,7 +299,7 @@ POST _reindex
|
|||
}
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
// TEST[s/^/PUT source\n/]
|
||||
// TEST[s/^/PUT source\nGET _cluster\/health?wait_for_status=yellow\n/]
|
||||
|
||||
By default `_reindex` uses scroll batches of 100. You can change the
|
||||
batch size with the `size` field in the `source` element:
|
||||
|
@ -319,7 +319,7 @@ POST _reindex
|
|||
}
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
// TEST[s/^/PUT source\n/]
|
||||
// TEST[s/^/PUT source\nGET _cluster\/health?wait_for_status=yellow\n/]
|
||||
|
||||
Reindex can also use the <<ingest>> feature by specifying a
|
||||
`pipeline` like this:
|
||||
|
@ -338,7 +338,7 @@ POST _reindex
|
|||
}
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
// TEST[s/^/PUT source\n/]
|
||||
// TEST[s/^/PUT source\nGET _cluster\/health?wait_for_status=yellow\n/]
|
||||
|
||||
[float]
|
||||
=== URL Parameters
|
||||
|
@ -435,7 +435,7 @@ While Reindex is running you can fetch their status using the
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
GET _tasks/?pretty&detailed=true&actions=*reindex
|
||||
GET _tasks?detailed=true&actions=*reindex
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
|
||||
|
@ -534,7 +534,7 @@ create an index containing documents that look like this:
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
POST test/test/1?refresh&pretty
|
||||
POST test/test/1?refresh
|
||||
{
|
||||
"text": "words words",
|
||||
"flag": "foo"
|
||||
|
@ -547,7 +547,7 @@ But you don't like the name `flag` and want to replace it with `tag`.
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
POST _reindex?pretty
|
||||
POST _reindex
|
||||
{
|
||||
"source": {
|
||||
"index": "test"
|
||||
|
@ -567,7 +567,7 @@ Now you can get the new document:
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
GET test2/test/1?pretty
|
||||
GET test2/test/1
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
// TEST[continued]
|
||||
|
|
|
@ -134,7 +134,7 @@ types at once, just like the search API:
|
|||
POST twitter,blog/tweet,post/_update_by_query
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
// TEST[s/^/PUT twitter\nPUT blog\n/]
|
||||
// TEST[s/^/PUT twitter\nPUT blog\nGET _cluster\/health?wait_for_status=yellow\n/]
|
||||
|
||||
If you provide `routing` then the routing is copied to the scroll query,
|
||||
limiting the process to the shards that match that routing value:
|
||||
|
@ -266,7 +266,7 @@ While Update By Query is running you can fetch their status using the
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
GET _tasks/?pretty&detailed=true&action=*byquery
|
||||
GET _tasks?detailed=true&action=*byquery
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
|
||||
|
|
|
@ -40,6 +40,10 @@ setup() {
|
|||
export_elasticsearch_paths
|
||||
}
|
||||
|
||||
@test "[DEB] package depends on bash" {
|
||||
dpkg -I elasticsearch-$(cat version).deb | grep "Depends:.*bash.*"
|
||||
}
|
||||
|
||||
##################################
|
||||
# Install DEB package
|
||||
##################################
|
||||
|
|
|
@ -39,6 +39,10 @@ setup() {
|
|||
export_elasticsearch_paths
|
||||
}
|
||||
|
||||
@test "[RPM] package depends on bash" {
|
||||
rpm -qpR elasticsearch-$(cat version).rpm | grep '/bin/bash'
|
||||
}
|
||||
|
||||
##################################
|
||||
# Install RPM package
|
||||
##################################
|
||||
|
|
Loading…
Reference in New Issue