mirror of
https://github.com/honeymoose/OpenSearch.git
synced 2025-03-09 14:34:43 +00:00
Merge branch 'master' into feature/aggs_2_0
Conflicts: src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricAggregator.java src/test/java/org/elasticsearch/search/aggregations/bucket/significant/SignificanceHeuristicTests.java
This commit is contained in:
commit
2ac93098b2
@ -283,3 +283,9 @@ To disable validation step (forbidden API or `// NOCOMMIT`) use
|
||||
---------------------------------------------------------------------------
|
||||
mvn test -Dvalidate.skip=true
|
||||
---------------------------------------------------------------------------
|
||||
|
||||
You can also skip this by using the "dev" profile:
|
||||
|
||||
---------------------------------------------------------------------------
|
||||
mvn test -Pdev
|
||||
---------------------------------------------------------------------------
|
||||
|
@ -583,6 +583,20 @@ def ensure_checkout_is_clean(branchName):
|
||||
if 'is ahead' in s:
|
||||
raise RuntimeError('git status shows local commits; try running "git fetch origin", "git checkout %s", "git reset --hard origin/%s": got:\n%s' % (branchName, branchName, s))
|
||||
|
||||
# Checks all source files for //NORELEASE comments
|
||||
def check_norelease(path='src'):
|
||||
pattern = re.compile(r'\bnorelease\b', re.IGNORECASE)
|
||||
for root, _, file_names in os.walk(path):
|
||||
for file_name in fnmatch.filter(file_names, '*.java'):
|
||||
full_path = os.path.join(root, file_name)
|
||||
line_number = 0
|
||||
with open(full_path, 'r', encoding='utf-8') as current_file:
|
||||
for line in current_file:
|
||||
line_number = line_number + 1
|
||||
if pattern.search(line):
|
||||
raise RuntimeError('Found //norelease comment in %s line %s' % (full_path, line_number))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
parser = argparse.ArgumentParser(description='Builds and publishes a Elasticsearch Release')
|
||||
parser.add_argument('--branch', '-b', metavar='RELEASE_BRANCH', default=get_current_branch(),
|
||||
@ -626,6 +640,7 @@ if __name__ == '__main__':
|
||||
print(' JAVA_HOME is [%s]' % JAVA_HOME)
|
||||
print(' Running with maven command: [%s] ' % (MVN))
|
||||
if build:
|
||||
check_norelease(path='src')
|
||||
ensure_checkout_is_clean(src_branch)
|
||||
verify_lucene_version()
|
||||
release_version = find_release_version(src_branch)
|
||||
|
@ -66,8 +66,6 @@ def index_documents(es, index_name, type, num_docs):
|
||||
es.indices.refresh(index=index_name)
|
||||
if rarely():
|
||||
es.indices.flush(index=index_name, force=frequently())
|
||||
if rarely():
|
||||
es.indices.optimize(index=index_name)
|
||||
logging.info('Flushing index')
|
||||
es.indices.flush(index=index_name)
|
||||
|
||||
@ -149,12 +147,15 @@ def generate_index(client, version):
|
||||
'type': 'string',
|
||||
'index_analyzer': 'standard'
|
||||
},
|
||||
'completion_with_index_analyzer': {
|
||||
'type': 'completion',
|
||||
'index_analyzer': 'standard'
|
||||
}
|
||||
}
|
||||
}
|
||||
# completion type was added in 0.90.3
|
||||
if not version in ['0.90.0.Beta1', '0.90.0.RC1', '0.90.0.RC2', '0.90.0', '0.90.1', '0.90.2']:
|
||||
mappings['analyzer_1']['properties']['completion_with_index_analyzer'] = {
|
||||
'type': 'completion',
|
||||
'index_analyzer': 'standard'
|
||||
}
|
||||
|
||||
mappings['analyzer_type2'] = {
|
||||
'index_analyzer': 'standard',
|
||||
'search_analyzer': 'keyword',
|
||||
@ -209,7 +210,7 @@ def generate_index(client, version):
|
||||
health = client.cluster.health(wait_for_status='green', wait_for_relocating_shards=0)
|
||||
assert health['timed_out'] == False, 'cluster health timed out %s' % health
|
||||
|
||||
num_docs = random.randint(10, 100)
|
||||
num_docs = random.randint(2000, 3000)
|
||||
index_documents(client, 'test', 'doc', num_docs)
|
||||
logging.info('Running basic asserts on the data added')
|
||||
run_basic_asserts(client, 'test', 'doc', num_docs)
|
||||
|
@ -1,3 +1,19 @@
|
||||
# Licensed to Elasticsearch under one or more contributor
|
||||
# license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright
|
||||
# ownership. Elasticsearch licenses this file to you under
|
||||
# the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing,
|
||||
# software distributed under the License is distributed on
|
||||
# an 'AS IS' BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
|
||||
# either express or implied. See the License for the specific
|
||||
# language governing permissions and limitations under the License.
|
||||
|
||||
@defaultMessage Convert to URI
|
||||
java.net.URL#getPath()
|
||||
java.net.URL#getFile()
|
||||
|
@ -1,3 +1,19 @@
|
||||
# Licensed to Elasticsearch under one or more contributor
|
||||
# license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright
|
||||
# ownership. Elasticsearch licenses this file to you under
|
||||
# the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing,
|
||||
# software distributed under the License is distributed on
|
||||
# an 'AS IS' BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
|
||||
# either express or implied. See the License for the specific
|
||||
# language governing permissions and limitations under the License.
|
||||
|
||||
@defaultMessage spawns threads with vague names; use a custom thread factory and name threads so that you can tell (by its name) which executor it is associated with
|
||||
|
||||
java.util.concurrent.Executors#newFixedThreadPool(int)
|
||||
|
@ -1,2 +1,18 @@
|
||||
# Licensed to Elasticsearch under one or more contributor
|
||||
# license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright
|
||||
# ownership. Elasticsearch licenses this file to you under
|
||||
# the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing,
|
||||
# software distributed under the License is distributed on
|
||||
# an 'AS IS' BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
|
||||
# either express or implied. See the License for the specific
|
||||
# language governing permissions and limitations under the License.
|
||||
|
||||
com.carrotsearch.randomizedtesting.RandomizedTest#globalTempDir() @ Use newTempDirPath() instead
|
||||
com.carrotsearch.randomizedtesting.annotations.Seed @ Don't commit hardcoded seeds
|
||||
|
@ -16,7 +16,7 @@ AggregationBuilder aggregation =
|
||||
AggregationBuilders
|
||||
.dateHistogram("agg")
|
||||
.field("dateOfBirth")
|
||||
.interval(DateHistogram.Interval.YEAR);
|
||||
.interval(DateHistogramInterval.YEAR);
|
||||
--------------------------------------------------
|
||||
|
||||
Or if you want to set an interval of 10 days:
|
||||
@ -27,7 +27,7 @@ AggregationBuilder aggregation =
|
||||
AggregationBuilders
|
||||
.dateHistogram("agg")
|
||||
.field("dateOfBirth")
|
||||
.interval(DateHistogram.Interval.days(10));
|
||||
.interval(DateHistogramInterval.days(10));
|
||||
--------------------------------------------------
|
||||
|
||||
|
||||
@ -43,13 +43,13 @@ import org.elasticsearch.search.aggregations.bucket.histogram.Histogram;
|
||||
[source,java]
|
||||
--------------------------------------------------
|
||||
// sr is here your SearchResponse object
|
||||
DateHistogram agg = sr.getAggregations().get("agg");
|
||||
Histogram agg = sr.getAggregations().get("agg");
|
||||
|
||||
// For each entry
|
||||
for (DateHistogram.Bucket entry : agg.getBuckets()) {
|
||||
String key = entry.getKey(); // Key
|
||||
DateTime keyAsDate = entry.getKeyAsDate(); // Key as date
|
||||
long docCount = entry.getDocCount(); // Doc count
|
||||
for (Histogram.Bucket entry : agg.getBuckets()) {
|
||||
DateTime keyAsDate = (DateTime) entry.getKey(); // Key
|
||||
String key = entry.getKeyAsString(); // Key as String
|
||||
long docCount = entry.getDocCount(); // Doc count
|
||||
|
||||
logger.info("key [{}], date [{}], doc_count [{}]", key, keyAsDate.getYear(), docCount);
|
||||
}
|
||||
|
@ -29,20 +29,20 @@ Import Aggregation definition classes:
|
||||
|
||||
[source,java]
|
||||
--------------------------------------------------
|
||||
import org.elasticsearch.search.aggregations.bucket.range.date.DateRange;
|
||||
import org.elasticsearch.search.aggregations.bucket.range.Range;
|
||||
--------------------------------------------------
|
||||
|
||||
[source,java]
|
||||
--------------------------------------------------
|
||||
// sr is here your SearchResponse object
|
||||
DateRange agg = sr.getAggregations().get("agg");
|
||||
Range agg = sr.getAggregations().get("agg");
|
||||
|
||||
// For each entry
|
||||
for (DateRange.Bucket entry : agg.getBuckets()) {
|
||||
String key = entry.getKey(); // Date range as key
|
||||
DateTime fromAsDate = entry.getFromAsDate(); // Date bucket from as a Date
|
||||
DateTime toAsDate = entry.getToAsDate(); // Date bucket to as a Date
|
||||
long docCount = entry.getDocCount(); // Doc count
|
||||
for (Range.Bucket entry : agg.getBuckets()) {
|
||||
String key = entry.getKey(); // Date range as key
|
||||
DateTime fromAsDate = (DateTime) entry.getFrom(); // Date bucket from as a Date
|
||||
DateTime toAsDate = (DateTime) entry.getTo(); // Date bucket to as a Date
|
||||
long docCount = entry.getDocCount(); // Doc count
|
||||
|
||||
logger.info("key [{}], from [{}], to [{}], doc_count [{}]", key, fromAsDate, toAsDate, docCount);
|
||||
}
|
||||
|
@ -30,20 +30,20 @@ Import Aggregation definition classes:
|
||||
|
||||
[source,java]
|
||||
--------------------------------------------------
|
||||
import org.elasticsearch.search.aggregations.bucket.range.geodistance.GeoDistance;
|
||||
import org.elasticsearch.search.aggregations.bucket.range.Range;
|
||||
--------------------------------------------------
|
||||
|
||||
[source,java]
|
||||
--------------------------------------------------
|
||||
// sr is here your SearchResponse object
|
||||
GeoDistance agg = sr.getAggregations().get("agg");
|
||||
Range agg = sr.getAggregations().get("agg");
|
||||
|
||||
// For each entry
|
||||
for (GeoDistance.Bucket entry : agg.getBuckets()) {
|
||||
String key = entry.getKey(); // key as String
|
||||
Number from = entry.getFrom(); // bucket from value
|
||||
Number to = entry.getTo(); // bucket to value
|
||||
long docCount = entry.getDocCount(); // Doc count
|
||||
for (Range.Bucket entry : agg.getBuckets()) {
|
||||
String key = entry.getKey(); // key as String
|
||||
Number from = (Number) entry.getFrom(); // bucket from value
|
||||
Number to = (Number) entry.getTo(); // bucket to value
|
||||
long docCount = entry.getDocCount(); // Doc count
|
||||
|
||||
logger.info("key [{}], from [{}], to [{}], doc_count [{}]", key, from, to, docCount);
|
||||
}
|
||||
|
@ -36,7 +36,8 @@ Histogram agg = sr.getAggregations().get("agg");
|
||||
|
||||
// For each entry
|
||||
for (Histogram.Bucket entry : agg.getBuckets()) {
|
||||
String key = entry.getKey(); // Key
|
||||
long docCount = entry.getDocCount(); // Doc count
|
||||
Number key = (Number) entry.getKey(); // Key
|
||||
String keyAsString = entry.getKeyAsString(); // Key As String
|
||||
long docCount = entry.getDocCount(); // Doc count
|
||||
}
|
||||
--------------------------------------------------
|
||||
|
@ -40,7 +40,7 @@ Import Aggregation definition classes:
|
||||
|
||||
[source,java]
|
||||
--------------------------------------------------
|
||||
import org.elasticsearch.search.aggregations.bucket.range.ipv4.IPv4Range;
|
||||
import org.elasticsearch.search.aggregations.bucket.range.Range;
|
||||
--------------------------------------------------
|
||||
|
||||
[source,java]
|
||||
@ -49,7 +49,7 @@ import org.elasticsearch.search.aggregations.bucket.range.ipv4.IPv4Range;
|
||||
IPv4Range agg = sr.getAggregations().get("agg");
|
||||
|
||||
// For each entry
|
||||
for (IPv4Range.Bucket entry : agg.getBuckets()) {
|
||||
for (Range.Bucket entry : agg.getBuckets()) {
|
||||
String key = entry.getKey(); // Ip range as key
|
||||
String fromAsString = entry.getFromAsString(); // Ip bucket from as a String
|
||||
String toAsString = entry.getToAsString(); // Ip bucket to as a String
|
||||
|
@ -38,10 +38,10 @@ Range agg = sr.getAggregations().get("agg");
|
||||
|
||||
// For each entry
|
||||
for (Range.Bucket entry : agg.getBuckets()) {
|
||||
String key = entry.getKey(); // Range as key
|
||||
Number from = entry.getFrom(); // Bucket from
|
||||
Number to = entry.getTo(); // Bucket to
|
||||
long docCount = entry.getDocCount(); // Doc count
|
||||
String key = entry.getKey(); // Range as key
|
||||
Number from = (Number) entry.getFrom(); // Bucket from
|
||||
Number to = (Number) entry.getTo(); // Bucket to
|
||||
long docCount = entry.getDocCount(); // Doc count
|
||||
|
||||
logger.info("key [{}], from [{}], to [{}], doc_count [{}]", key, from, to, docCount);
|
||||
}
|
||||
|
@ -7,9 +7,9 @@ information *spans nodes*.
|
||||
[source,shell]
|
||||
--------------------------------------------------
|
||||
% curl 'localhost:9200/_cat/indices/twi*?v'
|
||||
health index pri rep docs.count docs.deleted store.size pri.store.size
|
||||
green twitter 5 1 11434 0 64mb 32mb
|
||||
green twitter2 2 0 2030 0 5.8mb 5.8mb
|
||||
health status index pri rep docs.count docs.deleted store.size pri.store.size
|
||||
green open twitter 5 1 11434 0 64mb 32mb
|
||||
green open twitter2 2 0 2030 0 5.8mb 5.8mb
|
||||
--------------------------------------------------
|
||||
|
||||
We can tell quickly how many shards make up an index, the number of
|
||||
@ -33,8 +33,8 @@ Which indices are yellow?
|
||||
[source,shell]
|
||||
--------------------------------------------------
|
||||
% curl localhost:9200/_cat/indices | grep ^yell
|
||||
yellow wiki 2 1 6401 1115 151.4mb 151.4mb
|
||||
yellow twitter 5 1 11434 0 32mb 32mb
|
||||
yellow open wiki 2 1 6401 1115 151.4mb 151.4mb
|
||||
yellow open twitter 5 1 11434 0 32mb 32mb
|
||||
--------------------------------------------------
|
||||
|
||||
What's my largest index by disk usage not including replicas?
|
||||
@ -42,9 +42,9 @@ What's my largest index by disk usage not including replicas?
|
||||
[source,shell]
|
||||
--------------------------------------------------
|
||||
% curl 'localhost:9200/_cat/indices?bytes=b' | sort -rnk7
|
||||
green wiki 2 0 6401 1115 158843725 158843725
|
||||
green twitter 5 1 11434 0 67155614 33577857
|
||||
green twitter2 2 0 2030 0 6125085 6125085
|
||||
green open wiki 2 0 6401 1115 158843725 158843725
|
||||
green open twitter 5 1 11434 0 67155614 33577857
|
||||
green open twitter2 2 0 2030 0 6125085 6125085
|
||||
--------------------------------------------------
|
||||
|
||||
How many merge operations have the shards for the `wiki` completed?
|
||||
|
Binary file not shown.
Before Width: | Height: | Size: 1.5 KiB After Width: | Height: | Size: 1.7 KiB |
Binary file not shown.
Before Width: | Height: | Size: 2.3 KiB After Width: | Height: | Size: 2.2 KiB |
Binary file not shown.
Before Width: | Height: | Size: 2.0 KiB After Width: | Height: | Size: 2.2 KiB |
@ -45,7 +45,7 @@ settings API.
|
||||
[[fielddata-circuit-breaker]]
|
||||
==== Field data circuit breaker
|
||||
The field data circuit breaker allows Elasticsearch to estimate the amount of
|
||||
memory a field will required to be loaded into memory. It can then prevent the
|
||||
memory a field will require to be loaded into memory. It can then prevent the
|
||||
field data loading by raising an exception. By default the limit is configured
|
||||
to 60% of the maximum JVM heap. It can be configured with the following
|
||||
parameters:
|
||||
|
@ -65,6 +65,9 @@ coming[1.5.0, this syntax was change to fix inconsistencies with other API]
|
||||
},
|
||||
"translog" : {
|
||||
"recovered" : 0,
|
||||
"total" : 0,
|
||||
"percent" : "100.0%",
|
||||
"total_on_start" : 0,
|
||||
"total_time" : "0s",
|
||||
"total_time_in_millis" : 0
|
||||
},
|
||||
|
@ -64,9 +64,6 @@ settings API:
|
||||
`index.index_concurrency`::
|
||||
experimental[] Defaults to `8`.
|
||||
|
||||
`index.fail_on_merge_failure`::
|
||||
experimental[] Default to `true`.
|
||||
|
||||
`index.translog.flush_threshold_ops`::
|
||||
When to flush based on operations.
|
||||
|
||||
|
@ -53,7 +53,7 @@ GET /_search
|
||||
}
|
||||
-----------------------------------
|
||||
|
||||
Save the contents of the script as a file called `config/script/my_script.groovy`
|
||||
Save the contents of the script as a file called `config/scripts/my_script.groovy`
|
||||
on every data node in the cluster:
|
||||
|
||||
[source,js]
|
||||
|
@ -31,18 +31,19 @@ Response:
|
||||
"rings" : {
|
||||
"buckets": [
|
||||
{
|
||||
"unit": "km",
|
||||
"key": "*-100.0",
|
||||
"from": 0,
|
||||
"to": 100.0,
|
||||
"doc_count": 3
|
||||
},
|
||||
{
|
||||
"unit": "km",
|
||||
"key": "100.0-300.0",
|
||||
"from": 100.0,
|
||||
"to": 300.0,
|
||||
"doc_count": 1
|
||||
},
|
||||
{
|
||||
"unit": "km",
|
||||
"key": "300.0-*",
|
||||
"from": 300.0,
|
||||
"doc_count": 7
|
||||
}
|
||||
|
@ -30,7 +30,7 @@ Response:
|
||||
...
|
||||
|
||||
"aggregations": {
|
||||
"ip_ranges":
|
||||
"ip_ranges": {
|
||||
"buckets" : [
|
||||
{
|
||||
"to": 167772165,
|
||||
|
@ -149,7 +149,6 @@ Now we have anomaly detection for each of the police forces using a single reque
|
||||
We can use other forms of top-level aggregations to segment our data, for example segmenting by geographic
|
||||
area to identify unusual hot-spots of a particular crime type:
|
||||
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
{
|
||||
@ -345,9 +344,38 @@ Roughly, `mutual_information` prefers high frequent terms even if they occur als
|
||||
|
||||
It is hard to say which one of the different heuristics will be the best choice as it depends on what the significant terms are used for (see for example [Yang and Pedersen, "A Comparative Study on Feature Selection in Text Categorization", 1997](http://courses.ischool.berkeley.edu/i256/f06/papers/yang97comparative.pdf) for a study on using significant terms for feature selection for text classification).
|
||||
|
||||
If none of the above measures suits your usecase than another option is to implement a custom significance measure:
|
||||
|
||||
===== scripted
|
||||
coming[1.5.0]
|
||||
|
||||
Customized scores can be implemented via a script:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
|
||||
"script_heuristic": {
|
||||
"script": "_subset_freq/(_superset_freq - _subset_freq + 1)"
|
||||
}
|
||||
--------------------------------------------------
|
||||
|
||||
Scripts can be inline (as in above example), indexed or stored on disk. For details on the options, see <<modules-scripting, script documentation>>.
|
||||
Parameters need to be set as follows:
|
||||
|
||||
[horizontal]
|
||||
`script`:: Inline script, name of script file or name of indexed script. Mandatory.
|
||||
`script_type`:: One of "inline" (default), "indexed" or "file".
|
||||
`lang`:: Script language (default "groovy")
|
||||
`params`:: Script parameters (default empty).
|
||||
|
||||
Available parameters in the script are
|
||||
|
||||
[horizontal]
|
||||
`_subset_freq`:: Number of documents the term appears in in the subset.
|
||||
`_superset_freq`:: Number of documents the term appears in in the superset.
|
||||
`_subset_size`:: Number of documents in the subset.
|
||||
`_superset_size`:: Number of documents in the superset.
|
||||
|
||||
===== Size & Shard Size
|
||||
|
||||
The `size` parameter can be set to define how many term buckets should be returned out of the overall terms list. By
|
||||
|
@ -28,7 +28,7 @@ Let's look at a range of percentiles representing load time:
|
||||
"aggs" : {
|
||||
"load_time_outlier" : {
|
||||
"percentile_ranks" : {
|
||||
"field" : "load_time" <1>
|
||||
"field" : "load_time", <1>
|
||||
"values" : [15, 30]
|
||||
}
|
||||
}
|
||||
|
@ -152,6 +152,28 @@ An important default is that the `_source` returned in hits inside `inner_hits`
|
||||
So in the above example only the comment part is returned per nested hit and not the entire source of the top level
|
||||
document that contained the the comment.
|
||||
|
||||
[[hierarchical-nested-inner-hits]]
|
||||
==== Hierarchical levels of nested object fields and inner hits.
|
||||
|
||||
If a mapping has multiple levels of hierarchical nested object fields each level can be accessed via dot notated path.
|
||||
For example if there is a `comments` nested field that contains a `votes` nested field and votes should directly be returned
|
||||
with the the root hits then the following path can be defined:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
{
|
||||
"query" : {
|
||||
"nested" : {
|
||||
"path" : "comments.votes",
|
||||
"query" : { ... },
|
||||
"inner_hits" : {}
|
||||
}
|
||||
}
|
||||
}
|
||||
--------------------------------------------------
|
||||
|
||||
This indirect referencing is only supported for nested inner hits.
|
||||
|
||||
[[parent-child-inner-hits]]
|
||||
==== Parent/child inner hits
|
||||
|
||||
|
@ -42,6 +42,9 @@
|
||||
\d+\.\d+% \s+ # bytes_percent
|
||||
\d+ \s+ # total_files
|
||||
\d+ \s+ # total_bytes
|
||||
\d+ \s+ # translog
|
||||
-?\d+\.\d+% \s+ # translog_percent
|
||||
-?\d+ \s+ # total_translog
|
||||
\n
|
||||
)+
|
||||
$/
|
||||
|
@ -53,3 +53,26 @@
|
||||
- match:
|
||||
$body: |
|
||||
/^(index2 \s+ \d \s+ (p|r) \s+ ((STARTED|INITIALIZING) \s+ (\d \s+ (\d+|\d+[.]\d+)(kb|b) \s+)? \d{1,3}.\d{1,3}.\d{1,3}.\d{1,3} \s+ .+|UNASSIGNED \s+) \n?){5}$/
|
||||
|
||||
- do:
|
||||
indices.create:
|
||||
index: index3
|
||||
body:
|
||||
settings:
|
||||
number_of_shards: "1"
|
||||
number_of_replicas: "1"
|
||||
shadow_replicas: true
|
||||
shared_filesystem: false
|
||||
- do:
|
||||
cluster.health:
|
||||
wait_for_status: yellow
|
||||
wait_for_relocating_shards: 0
|
||||
|
||||
- do:
|
||||
cat.shards:
|
||||
index: index3
|
||||
v: false
|
||||
- match:
|
||||
$body: |
|
||||
/^(index3 \s+ \d \s+ (p|s) \s+ ((STARTED|INITIALIZING) \s+ (\d \s+ (\d+|\d+[.]\d+)(kb|b) \s+)? \d{1,3}.\d{1,3}.\d{1,3}.\d{1,3} \s+ .+|UNASSIGNED \s+) \n?){2}$/
|
||||
|
||||
|
@ -33,6 +33,8 @@
|
||||
- gte: { test_1.shards.0.index.size.recovered_in_bytes: 0 }
|
||||
- match: { test_1.shards.0.index.size.percent: /^\d+\.\d\%$/ }
|
||||
- gte: { test_1.shards.0.translog.recovered: 0 }
|
||||
- gte: { test_1.shards.0.translog.total: -1 }
|
||||
- gte: { test_1.shards.0.translog.total_on_start: 0 }
|
||||
- gte: { test_1.shards.0.translog.total_time_in_millis: 0 }
|
||||
- gte: { test_1.shards.0.start.check_index_time_in_millis: 0 }
|
||||
- gte: { test_1.shards.0.start.total_time_in_millis: 0 }
|
||||
|
@ -93,4 +93,9 @@ public class Build {
|
||||
out.writeString(build.hashShort());
|
||||
out.writeString(build.timestamp());
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "[" + hash + "][" + timestamp + "]";
|
||||
}
|
||||
}
|
||||
|
@ -226,9 +226,11 @@ public class Version {
|
||||
public static final int V_1_4_4_ID = 1040499;
|
||||
public static final Version V_1_4_4 = new Version(V_1_4_4_ID, false, org.apache.lucene.util.Version.LUCENE_4_10_3);
|
||||
public static final int V_1_4_5_ID = 1040599;
|
||||
public static final Version V_1_4_5 = new Version(V_1_4_5_ID, true, org.apache.lucene.util.Version.LUCENE_4_10_3);
|
||||
public static final Version V_1_4_5 = new Version(V_1_4_5_ID, true, org.apache.lucene.util.Version.LUCENE_4_10_3); // TODO 1.4.5 -> 1.6 is lucene 4.10.4 we need the constant here
|
||||
public static final int V_1_5_0_ID = 1050099;
|
||||
public static final Version V_1_5_0 = new Version(V_1_5_0_ID, true, org.apache.lucene.util.Version.LUCENE_4_10_3);
|
||||
public static final int V_1_6_0_ID = 1060099;
|
||||
public static final Version V_1_6_0 = new Version(V_1_6_0_ID, true, org.apache.lucene.util.Version.LUCENE_4_10_3);
|
||||
public static final int V_2_0_0_ID = 2000099;
|
||||
public static final Version V_2_0_0 = new Version(V_2_0_0_ID, true, org.apache.lucene.util.Version.LUCENE_5_1_0);
|
||||
|
||||
@ -246,6 +248,8 @@ public class Version {
|
||||
switch (id) {
|
||||
case V_2_0_0_ID:
|
||||
return V_2_0_0;
|
||||
case V_1_6_0_ID:
|
||||
return V_1_6_0;
|
||||
case V_1_5_0_ID:
|
||||
return V_1_5_0;
|
||||
case V_1_4_5_ID:
|
||||
|
@ -233,7 +233,6 @@ public class RestoreSnapshotRequestBuilder extends MasterNodeOperationRequestBui
|
||||
|
||||
/**
|
||||
* Sets index settings that should be added or replaced during restore
|
||||
|
||||
* @param settings index settings
|
||||
* @return this builder
|
||||
*/
|
||||
|
@ -24,7 +24,12 @@ import org.apache.lucene.search.ScoreDoc;
|
||||
import org.elasticsearch.ElasticsearchIllegalStateException;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.NoShardAvailableActionException;
|
||||
import org.elasticsearch.action.search.*;
|
||||
import org.elasticsearch.action.search.ReduceSearchPhaseException;
|
||||
import org.elasticsearch.action.search.SearchAction;
|
||||
import org.elasticsearch.action.search.SearchPhaseExecutionException;
|
||||
import org.elasticsearch.action.search.SearchRequest;
|
||||
import org.elasticsearch.action.search.SearchResponse;
|
||||
import org.elasticsearch.action.search.ShardSearchFailure;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.TransportAction;
|
||||
import org.elasticsearch.action.support.TransportActions;
|
||||
@ -213,7 +218,7 @@ public abstract class TransportSearchTypeAction extends TransportAction<SearchRe
|
||||
}
|
||||
if (successfulOps.get() == 0) {
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("All shards failed for phase: [{}]", firstPhaseName(), t);
|
||||
logger.debug("All shards failed for phase: [{}]", t, firstPhaseName());
|
||||
}
|
||||
// no successful ops, raise an exception
|
||||
raiseEarlyFailure(new SearchPhaseExecutionException(firstPhaseName(), "all shards failed", buildShardFailures()));
|
||||
|
@ -386,7 +386,7 @@ public abstract class TransportShardReplicationOperationAction<Request extends S
|
||||
}
|
||||
if (!shard.active() || !observer.observedState().nodes().nodeExists(shard.currentNodeId())) {
|
||||
logger.trace("primary shard [{}] is not yet active or we do not know the node it is assigned to [{}], scheduling a retry.", shard.shardId(), shard.currentNodeId());
|
||||
retryBecauseUnavailable(shardIt.shardId(), "Primary shard is not active or isn't assigned is a known node.");
|
||||
retryBecauseUnavailable(shardIt.shardId(), "Primary shard is not active or isn't assigned to a known node.");
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -66,15 +66,6 @@ public class PageCacheRecycler extends AbstractComponent {
|
||||
}
|
||||
}
|
||||
|
||||
// return the maximum number of pages that may be cached depending on
|
||||
// - limit: the total amount of memory available
|
||||
// - pageSize: the size of a single page
|
||||
// - weight: the weight for this data type
|
||||
// - totalWeight: the sum of all weights
|
||||
private static int maxCount(long limit, long pageSize, double weight, double totalWeight) {
|
||||
return (int) (weight / totalWeight * limit / pageSize);
|
||||
}
|
||||
|
||||
@Inject
|
||||
public PageCacheRecycler(Settings settings, ThreadPool threadPool) {
|
||||
super(settings);
|
||||
@ -103,8 +94,10 @@ public class PageCacheRecycler extends AbstractComponent {
|
||||
final double objectsWeight = settings.getAsDouble(WEIGHT + ".objects", 0.1d);
|
||||
|
||||
final double totalWeight = bytesWeight + intsWeight + longsWeight + objectsWeight;
|
||||
final int maxPageCount = (int) Math.min(Integer.MAX_VALUE, limit / BigArrays.PAGE_SIZE_IN_BYTES);
|
||||
|
||||
bytePage = build(type, maxCount(limit, BigArrays.BYTE_PAGE_SIZE, bytesWeight, totalWeight), searchThreadPoolSize, availableProcessors, new AbstractRecyclerC<byte[]>() {
|
||||
final int maxBytePageCount = (int) (bytesWeight * maxPageCount / totalWeight);
|
||||
bytePage = build(type, maxBytePageCount, searchThreadPoolSize, availableProcessors, new AbstractRecyclerC<byte[]>() {
|
||||
@Override
|
||||
public byte[] newInstance(int sizing) {
|
||||
return new byte[BigArrays.BYTE_PAGE_SIZE];
|
||||
@ -114,7 +107,9 @@ public class PageCacheRecycler extends AbstractComponent {
|
||||
// nothing to do
|
||||
}
|
||||
});
|
||||
intPage = build(type, maxCount(limit, BigArrays.INT_PAGE_SIZE, intsWeight, totalWeight), searchThreadPoolSize, availableProcessors, new AbstractRecyclerC<int[]>() {
|
||||
|
||||
final int maxIntPageCount = (int) (intsWeight * maxPageCount / totalWeight);
|
||||
intPage = build(type, maxIntPageCount, searchThreadPoolSize, availableProcessors, new AbstractRecyclerC<int[]>() {
|
||||
@Override
|
||||
public int[] newInstance(int sizing) {
|
||||
return new int[BigArrays.INT_PAGE_SIZE];
|
||||
@ -124,17 +119,21 @@ public class PageCacheRecycler extends AbstractComponent {
|
||||
// nothing to do
|
||||
}
|
||||
});
|
||||
longPage = build(type, maxCount(limit, BigArrays.LONG_PAGE_SIZE, longsWeight, totalWeight), searchThreadPoolSize, availableProcessors, new AbstractRecyclerC<long[]>() {
|
||||
|
||||
final int maxLongPageCount = (int) (longsWeight * maxPageCount / totalWeight);
|
||||
longPage = build(type, maxLongPageCount, searchThreadPoolSize, availableProcessors, new AbstractRecyclerC<long[]>() {
|
||||
@Override
|
||||
public long[] newInstance(int sizing) {
|
||||
return new long[BigArrays.LONG_PAGE_SIZE];
|
||||
}
|
||||
@Override
|
||||
public void recycle(long[] value) {
|
||||
// nothing to do
|
||||
// nothing to do
|
||||
}
|
||||
});
|
||||
objectPage = build(type, maxCount(limit, BigArrays.OBJECT_PAGE_SIZE, objectsWeight, totalWeight), searchThreadPoolSize, availableProcessors, new AbstractRecyclerC<Object[]>() {
|
||||
|
||||
final int maxObjectPageCount = (int) (objectsWeight * maxPageCount / totalWeight);
|
||||
objectPage = build(type, maxObjectPageCount, searchThreadPoolSize, availableProcessors, new AbstractRecyclerC<Object[]>() {
|
||||
@Override
|
||||
public Object[] newInstance(int sizing) {
|
||||
return new Object[BigArrays.OBJECT_PAGE_SIZE];
|
||||
@ -144,6 +143,8 @@ public class PageCacheRecycler extends AbstractComponent {
|
||||
Arrays.fill(value, null); // we need to remove the strong refs on the objects stored in the array
|
||||
}
|
||||
});
|
||||
|
||||
assert BigArrays.PAGE_SIZE_IN_BYTES * (maxBytePageCount + maxIntPageCount + maxLongPageCount + maxObjectPageCount) <= limit;
|
||||
}
|
||||
|
||||
public Recycler.V<byte[]> bytePage(boolean clear) {
|
||||
|
@ -333,7 +333,7 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards
|
||||
}
|
||||
final RoutingNodes.UnassignedShards unassigned = routingNodes.unassigned().transactionBegin();
|
||||
boolean changed = initialize(routingNodes, unassigned);
|
||||
if (!changed) {
|
||||
if (!changed && allocation.deciders().canRebalance(allocation).type() == Type.YES) {
|
||||
NodeSorter sorter = newNodeSorter();
|
||||
if (nodes.size() > 1) { /* skip if we only have one node */
|
||||
for (String index : buildWeightOrderedIndidces(Operation.BALANCE, sorter)) {
|
||||
|
@ -80,4 +80,13 @@ public abstract class AllocationDecider extends AbstractComponent {
|
||||
public Decision canAllocate(RoutingNode node, RoutingAllocation allocation) {
|
||||
return Decision.ALWAYS;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns a {@link Decision} whether the cluster can execute
|
||||
* re-balanced operations at all.
|
||||
* {@link Decision#ALWAYS}.
|
||||
*/
|
||||
public Decision canRebalance(RoutingAllocation allocation) {
|
||||
return Decision.ALWAYS;
|
||||
}
|
||||
}
|
||||
|
@ -157,4 +157,23 @@ public class AllocationDeciders extends AllocationDecider {
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Decision canRebalance(RoutingAllocation allocation) {
|
||||
Decision.Multi ret = new Decision.Multi();
|
||||
for (AllocationDecider allocationDecider : allocations) {
|
||||
Decision decision = allocationDecider.canRebalance(allocation);
|
||||
// short track if a NO is returned.
|
||||
if (decision == Decision.NO) {
|
||||
if (!allocation.debugDecision()) {
|
||||
return decision;
|
||||
} else {
|
||||
ret.add(decision);
|
||||
}
|
||||
} else if (decision != Decision.ALWAYS) {
|
||||
ret.add(decision);
|
||||
}
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
@ -135,6 +135,11 @@ public class ClusterRebalanceAllocationDecider extends AllocationDecider {
|
||||
|
||||
@Override
|
||||
public Decision canRebalance(ShardRouting shardRouting, RoutingAllocation allocation) {
|
||||
return canRebalance(allocation);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Decision canRebalance(RoutingAllocation allocation) {
|
||||
if (type == ClusterRebalanceType.INDICES_PRIMARIES_ACTIVE) {
|
||||
// check if there are unassigned primaries.
|
||||
if ( allocation.routingNodes().hasUnassignedPrimaries() ) {
|
||||
|
@ -205,6 +205,44 @@ public interface Validator {
|
||||
}
|
||||
};
|
||||
|
||||
public static final Validator PERCENTAGE = new Validator() {
|
||||
@Override
|
||||
public String validate(String setting, String value) {
|
||||
try {
|
||||
if (value == null) {
|
||||
return "the value of " + setting + " can not be null";
|
||||
}
|
||||
if (!value.endsWith("%")) {
|
||||
return "the value [" + value + "] for " + setting + " must end with %";
|
||||
}
|
||||
final double asDouble = Double.parseDouble(value.substring(0, value.length() - 1));
|
||||
if (asDouble < 0.0 || asDouble > 100.0) {
|
||||
return "the value [" + value + "] for " + setting + " must be a percentage between 0% and 100%";
|
||||
}
|
||||
} catch (NumberFormatException ex) {
|
||||
return ex.getMessage();
|
||||
}
|
||||
return null;
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
public static final Validator BYTES_SIZE_OR_PERCENTAGE = new Validator() {
|
||||
@Override
|
||||
public String validate(String setting, String value) {
|
||||
String byteSize = BYTES_SIZE.validate(setting, value);
|
||||
if (byteSize != null) {
|
||||
String percentage = PERCENTAGE.validate(setting, value);
|
||||
if (percentage == null) {
|
||||
return null;
|
||||
}
|
||||
return percentage + " or be a valid bytes size value, like [16mb]";
|
||||
}
|
||||
return null;
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
public static final Validator MEMORY_SIZE = new Validator() {
|
||||
@Override
|
||||
public String validate(String setting, String value) {
|
||||
|
@ -21,6 +21,7 @@ package org.elasticsearch.common.http.client;
|
||||
|
||||
import org.apache.lucene.util.IOUtils;
|
||||
import org.elasticsearch.ElasticsearchTimeoutException;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
|
||||
@ -277,6 +278,9 @@ public class HttpDownloadHelper {
|
||||
((HttpURLConnection) connection).setUseCaches(true);
|
||||
((HttpURLConnection) connection).setConnectTimeout(5000);
|
||||
}
|
||||
connection.setRequestProperty("ES-Version", Version.CURRENT.toString());
|
||||
connection.setRequestProperty("User-Agent", "elasticsearch-plugin-manager");
|
||||
|
||||
// connect to the remote site (may take some time)
|
||||
connection.connect();
|
||||
|
||||
|
@ -177,7 +177,7 @@ public class Lucene {
|
||||
}
|
||||
}
|
||||
final CommitPoint cp = new CommitPoint(si, directory);
|
||||
try (IndexWriter _ = new IndexWriter(directory, new IndexWriterConfig(Lucene.STANDARD_ANALYZER)
|
||||
try (IndexWriter writer = new IndexWriter(directory, new IndexWriterConfig(Lucene.STANDARD_ANALYZER)
|
||||
.setIndexCommit(cp)
|
||||
.setCommitOnClose(false)
|
||||
.setMergePolicy(NoMergePolicy.INSTANCE)
|
||||
@ -203,7 +203,7 @@ public class Lucene {
|
||||
}
|
||||
}
|
||||
}
|
||||
try (IndexWriter _ = new IndexWriter(directory, new IndexWriterConfig(Lucene.STANDARD_ANALYZER)
|
||||
try (IndexWriter writer = new IndexWriter(directory, new IndexWriterConfig(Lucene.STANDARD_ANALYZER)
|
||||
.setMergePolicy(NoMergePolicy.INSTANCE) // no merges
|
||||
.setCommitOnClose(false) // no commits
|
||||
.setOpenMode(IndexWriterConfig.OpenMode.CREATE))) // force creation - don't append...
|
||||
|
@ -125,7 +125,7 @@ public abstract class TimeZoneRounding extends Rounding {
|
||||
long timeLocal = utcMillis;
|
||||
timeLocal = timeZone.convertUTCToLocal(utcMillis);
|
||||
long rounded = field.roundFloor(timeLocal);
|
||||
return timeZone.convertLocalToUTC(rounded, true, utcMillis);
|
||||
return timeZone.convertLocalToUTC(rounded, false, utcMillis);
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -139,7 +139,7 @@ public abstract class TimeZoneRounding extends Rounding {
|
||||
long timeLocal = time;
|
||||
timeLocal = timeZone.convertUTCToLocal(time);
|
||||
long nextInLocalTime = durationField.add(timeLocal, 1);
|
||||
return timeZone.convertLocalToUTC(nextInLocalTime, true);
|
||||
return timeZone.convertLocalToUTC(nextInLocalTime, false);
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -184,7 +184,7 @@ public abstract class TimeZoneRounding extends Rounding {
|
||||
long timeLocal = utcMillis;
|
||||
timeLocal = timeZone.convertUTCToLocal(utcMillis);
|
||||
long rounded = Rounding.Interval.roundValue(Rounding.Interval.roundKey(timeLocal, interval), interval);
|
||||
return timeZone.convertLocalToUTC(rounded, true);
|
||||
return timeZone.convertLocalToUTC(rounded, false);
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -198,7 +198,7 @@ public abstract class TimeZoneRounding extends Rounding {
|
||||
long timeLocal = time;
|
||||
timeLocal = timeZone.convertUTCToLocal(time);
|
||||
long next = timeLocal + interval;
|
||||
return timeZone.convertLocalToUTC(next, true);
|
||||
return timeZone.convertLocalToUTC(next, false);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -268,6 +268,9 @@ public class TimeValue implements Serializable, Streamable {
|
||||
return timeValue;
|
||||
}
|
||||
|
||||
/**
|
||||
* serialization converts TimeValue internally to NANOSECONDS
|
||||
*/
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
duration = in.readLong();
|
||||
@ -285,17 +288,12 @@ public class TimeValue implements Serializable, Streamable {
|
||||
if (o == null || getClass() != o.getClass()) return false;
|
||||
|
||||
TimeValue timeValue = (TimeValue) o;
|
||||
|
||||
if (duration != timeValue.duration) return false;
|
||||
if (timeUnit != timeValue.timeUnit) return false;
|
||||
|
||||
return true;
|
||||
return timeUnit.toNanos(duration) == timeValue.timeUnit.toNanos(timeValue.duration);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
int result = (int) (duration ^ (duration >>> 32));
|
||||
result = 31 * result + (timeUnit != null ? timeUnit.hashCode() : 0);
|
||||
return result;
|
||||
long normalized = timeUnit.toNanos(duration);
|
||||
return (int) (normalized ^ (normalized >>> 32));
|
||||
}
|
||||
}
|
||||
|
@ -744,11 +744,6 @@ public class ZenDiscovery extends AbstractLifecycleComponent<Discovery> implemen
|
||||
assert newClusterState.nodes().masterNode() != null : "received a cluster state without a master";
|
||||
assert !newClusterState.blocks().hasGlobalBlock(discoverySettings.getNoMasterBlock()) : "received a cluster state with a master block";
|
||||
|
||||
ClusterState currentState = clusterService.state();
|
||||
if (shouldIgnoreNewClusterState(logger, currentState, newClusterState)) {
|
||||
return;
|
||||
}
|
||||
|
||||
clusterService.submitStateUpdateTask("zen-disco-receive(from master [" + newClusterState.nodes().masterNode() + "])", Priority.URGENT, new ProcessedClusterStateNonMasterUpdateTask() {
|
||||
@Override
|
||||
public ClusterState execute(ClusterState currentState) {
|
||||
@ -766,7 +761,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent<Discovery> implemen
|
||||
if (updatedState == null) {
|
||||
updatedState = currentState;
|
||||
}
|
||||
if (shouldIgnoreNewClusterState(logger, currentState, updatedState)) {
|
||||
if (shouldIgnoreOrRejectNewClusterState(logger, currentState, updatedState)) {
|
||||
return currentState;
|
||||
}
|
||||
|
||||
@ -876,16 +871,17 @@ public class ZenDiscovery extends AbstractLifecycleComponent<Discovery> implemen
|
||||
|
||||
/**
|
||||
* In the case we follow an elected master the new cluster state needs to have the same elected master and
|
||||
* the new cluster state version needs to be equal or higher than our cluster state version. If either conditions
|
||||
* are true then the cluster state is dated and we should ignore it.
|
||||
* the new cluster state version needs to be equal or higher than our cluster state version.
|
||||
* If the first condition fails we reject the cluster state and throw an error.
|
||||
* If the second condition fails we ignore the cluster state.
|
||||
*/
|
||||
static boolean shouldIgnoreNewClusterState(ESLogger logger, ClusterState currentState, ClusterState newClusterState) {
|
||||
static boolean shouldIgnoreOrRejectNewClusterState(ESLogger logger, ClusterState currentState, ClusterState newClusterState) {
|
||||
if (currentState.nodes().masterNodeId() == null) {
|
||||
return false;
|
||||
}
|
||||
if (!currentState.nodes().masterNodeId().equals(newClusterState.nodes().masterNodeId())) {
|
||||
logger.warn("received a cluster state from a different master then the current one, ignoring (received {}, current {})", newClusterState.nodes().masterNode(), currentState.nodes().masterNode());
|
||||
return true;
|
||||
logger.warn("received a cluster state from a different master then the current one, rejecting (received {}, current {})", newClusterState.nodes().masterNode(), currentState.nodes().masterNode());
|
||||
throw new ElasticsearchIllegalStateException("cluster state from a different master then the current one, rejecting (received " + newClusterState.nodes().masterNode() + ", current " + currentState.nodes().masterNode() + ")");
|
||||
} else if (newClusterState.version() < currentState.version()) {
|
||||
// if the new state has a smaller version, and it has the same master node, then no need to process it
|
||||
logger.debug("received a cluster state that has a lower version than the current one, ignoring (received {}, current {})", newClusterState.version(), currentState.version());
|
||||
|
@ -21,14 +21,15 @@ package org.elasticsearch.discovery.zen.publish;
|
||||
|
||||
import com.google.common.collect.Maps;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.cluster.ClusterName;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.component.AbstractComponent;
|
||||
import org.elasticsearch.common.compress.Compressor;
|
||||
import org.elasticsearch.common.compress.CompressorFactory;
|
||||
import org.elasticsearch.common.io.stream.*;
|
||||
import org.elasticsearch.common.io.stream.BytesStreamOutput;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.discovery.AckClusterStatePublishResponseHandler;
|
||||
@ -190,25 +191,34 @@ public class PublishClusterStateAction extends AbstractComponent {
|
||||
ClusterState clusterState = ClusterState.Builder.readFrom(in, nodesProvider.nodes().localNode());
|
||||
clusterState.status(ClusterState.ClusterStateStatus.RECEIVED);
|
||||
logger.debug("received cluster state version {}", clusterState.version());
|
||||
listener.onNewClusterState(clusterState, new NewClusterStateListener.NewStateProcessed() {
|
||||
@Override
|
||||
public void onNewClusterStateProcessed() {
|
||||
try {
|
||||
channel.sendResponse(TransportResponse.Empty.INSTANCE);
|
||||
} catch (Throwable e) {
|
||||
logger.debug("failed to send response on cluster state processed", e);
|
||||
try {
|
||||
listener.onNewClusterState(clusterState, new NewClusterStateListener.NewStateProcessed() {
|
||||
@Override
|
||||
public void onNewClusterStateProcessed() {
|
||||
try {
|
||||
channel.sendResponse(TransportResponse.Empty.INSTANCE);
|
||||
} catch (Throwable e) {
|
||||
logger.debug("failed to send response on cluster state processed", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onNewClusterStateFailed(Throwable t) {
|
||||
try {
|
||||
channel.sendResponse(t);
|
||||
} catch (Throwable e) {
|
||||
logger.debug("failed to send response on cluster state processed", e);
|
||||
@Override
|
||||
public void onNewClusterStateFailed(Throwable t) {
|
||||
try {
|
||||
channel.sendResponse(t);
|
||||
} catch (Throwable e) {
|
||||
logger.debug("failed to send response on cluster state processed", e);
|
||||
}
|
||||
}
|
||||
});
|
||||
} catch (Exception e) {
|
||||
logger.warn("unexpected error while processing cluster state version [{}]", e, clusterState.version());
|
||||
try {
|
||||
channel.sendResponse(e);
|
||||
} catch (Throwable e1) {
|
||||
logger.debug("failed to send response on cluster state processed", e1);
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -0,0 +1,158 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.gateway;
|
||||
|
||||
import com.google.common.collect.*;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.metadata.MetaData;
|
||||
import org.elasticsearch.common.component.AbstractComponent;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
|
||||
import org.elasticsearch.env.NodeEnvironment;
|
||||
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
||||
/**
|
||||
* The dangling indices state is responsible for finding new dangling indices (indices that have
|
||||
* their state written on disk, but don't exists in the metadata of the cluster), and importing
|
||||
* them into the cluster.
|
||||
*/
|
||||
public class DanglingIndicesState extends AbstractComponent {
|
||||
|
||||
private final NodeEnvironment nodeEnv;
|
||||
private final MetaStateService metaStateService;
|
||||
private final LocalAllocateDangledIndices allocateDangledIndices;
|
||||
|
||||
private final Map<String, IndexMetaData> danglingIndices = ConcurrentCollections.newConcurrentMap();
|
||||
|
||||
@Inject
|
||||
public DanglingIndicesState(Settings settings, NodeEnvironment nodeEnv, MetaStateService metaStateService,
|
||||
LocalAllocateDangledIndices allocateDangledIndices) {
|
||||
super(settings);
|
||||
this.nodeEnv = nodeEnv;
|
||||
this.metaStateService = metaStateService;
|
||||
this.allocateDangledIndices = allocateDangledIndices;
|
||||
}
|
||||
|
||||
/**
|
||||
* Process dangling indices based on the provided meta data, handling cleanup, finding
|
||||
* new dangling indices, and allocating outstanding ones.
|
||||
*/
|
||||
public void processDanglingIndices(MetaData metaData) {
|
||||
if (nodeEnv.hasNodeFile() == false) {
|
||||
return;
|
||||
}
|
||||
cleanupAllocatedDangledIndices(metaData);
|
||||
findNewAndAddDanglingIndices(metaData);
|
||||
allocateDanglingIndices();
|
||||
}
|
||||
|
||||
/**
|
||||
* The current set of dangling indices.
|
||||
*/
|
||||
Map<String, IndexMetaData> getDanglingIndices() {
|
||||
return ImmutableMap.copyOf(danglingIndices);
|
||||
}
|
||||
|
||||
/**
|
||||
* Cleans dangling indices if they are already allocated on the provided meta data.
|
||||
*/
|
||||
void cleanupAllocatedDangledIndices(MetaData metaData) {
|
||||
for (String danglingIndex : danglingIndices.keySet()) {
|
||||
if (metaData.hasIndex(danglingIndex)) {
|
||||
logger.debug("[{}] no longer dangling (created), removing from dangling list", danglingIndex);
|
||||
danglingIndices.remove(danglingIndex);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Finds (@{link #findNewAndAddDanglingIndices}) and adds the new dangling indices
|
||||
* to the currently tracked dangling indices.
|
||||
*/
|
||||
void findNewAndAddDanglingIndices(MetaData metaData) {
|
||||
danglingIndices.putAll(findNewDanglingIndices(metaData));
|
||||
}
|
||||
|
||||
/**
|
||||
* Finds new dangling indices by iterating over the indices and trying to find indices
|
||||
* that have state on disk, but are not part of the provided meta data, or not detected
|
||||
* as dangled already.
|
||||
*/
|
||||
Map<String, IndexMetaData> findNewDanglingIndices(MetaData metaData) {
|
||||
final Set<String> indices;
|
||||
try {
|
||||
indices = nodeEnv.findAllIndices();
|
||||
} catch (Throwable e) {
|
||||
logger.warn("failed to list dangling indices", e);
|
||||
return ImmutableMap.of();
|
||||
}
|
||||
|
||||
Map<String, IndexMetaData> newIndices = Maps.newHashMap();
|
||||
for (String indexName : indices) {
|
||||
if (metaData.hasIndex(indexName) == false && danglingIndices.containsKey(indexName) == false) {
|
||||
try {
|
||||
IndexMetaData indexMetaData = metaStateService.loadIndexState(indexName);
|
||||
if (indexMetaData != null) {
|
||||
logger.info("[{}] dangling index, exists on local file system, but not in cluster metadata, auto import to cluster state", indexName);
|
||||
if (!indexMetaData.index().equals(indexName)) {
|
||||
logger.info("dangled index directory name is [{}], state name is [{}], renaming to directory name", indexName, indexMetaData.index());
|
||||
indexMetaData = IndexMetaData.builder(indexMetaData).index(indexName).build();
|
||||
}
|
||||
newIndices.put(indexName, indexMetaData);
|
||||
} else {
|
||||
logger.debug("[{}] dangling index directory detected, but no state found", indexName);
|
||||
}
|
||||
} catch (Throwable t) {
|
||||
logger.warn("[{}] failed to load index state for detected dangled index", t, indexName);
|
||||
}
|
||||
}
|
||||
}
|
||||
return newIndices;
|
||||
}
|
||||
|
||||
/**
|
||||
* Allocates the provided list of the dangled indices by sending them to the master node
|
||||
* for allocation.
|
||||
*/
|
||||
private void allocateDanglingIndices() {
|
||||
if (danglingIndices.isEmpty() == true) {
|
||||
return;
|
||||
}
|
||||
try {
|
||||
allocateDangledIndices.allocateDangled(ImmutableList.copyOf(danglingIndices.values()), new LocalAllocateDangledIndices.Listener() {
|
||||
@Override
|
||||
public void onResponse(LocalAllocateDangledIndices.AllocateDangledResponse response) {
|
||||
logger.trace("allocated dangled");
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Throwable e) {
|
||||
logger.info("failed to send allocated dangled", e);
|
||||
}
|
||||
});
|
||||
} catch (Throwable e) {
|
||||
logger.warn("failed to send allocate dangled", e);
|
||||
}
|
||||
}
|
||||
}
|
@ -19,11 +19,7 @@
|
||||
|
||||
package org.elasticsearch.gateway;
|
||||
|
||||
import com.google.common.collect.Lists;
|
||||
import com.google.common.collect.Maps;
|
||||
import org.apache.lucene.util.IOUtils;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.ElasticsearchIllegalArgumentException;
|
||||
import org.elasticsearch.ElasticsearchIllegalStateException;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.cluster.ClusterChangedEvent;
|
||||
@ -41,136 +37,38 @@ import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.ImmutableSettings;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
|
||||
import org.elasticsearch.common.util.concurrent.FutureUtils;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.env.NodeEnvironment;
|
||||
import org.elasticsearch.env.ShardLock;
|
||||
import org.elasticsearch.index.Index;
|
||||
import org.elasticsearch.indices.IndicesService;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.nio.file.DirectoryStream;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Path;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.ScheduledFuture;
|
||||
import java.util.regex.Pattern;
|
||||
|
||||
/**
|
||||
*
|
||||
*/
|
||||
public class GatewayMetaState extends AbstractComponent implements ClusterStateListener {
|
||||
|
||||
static final String GLOBAL_STATE_FILE_PREFIX = "global-";
|
||||
private static final String INDEX_STATE_FILE_PREFIX = "state-";
|
||||
static final Pattern GLOBAL_STATE_FILE_PATTERN = Pattern.compile(GLOBAL_STATE_FILE_PREFIX + "(\\d+)(" + MetaDataStateFormat.STATE_FILE_EXTENSION + ")?");
|
||||
static final Pattern INDEX_STATE_FILE_PATTERN = Pattern.compile(INDEX_STATE_FILE_PREFIX + "(\\d+)(" + MetaDataStateFormat.STATE_FILE_EXTENSION + ")?");
|
||||
private static final String GLOBAL_STATE_LOG_TYPE = "[_global]";
|
||||
private static final String DEPRECATED_SETTING_ROUTING_HASH_FUNCTION = "cluster.routing.operation.hash.type";
|
||||
private static final String DEPRECATED_SETTING_ROUTING_USE_TYPE = "cluster.routing.operation.use_type";
|
||||
public static final String GATEWAY_DANGLING_TIMEOUT = "gateway.dangling_timeout";
|
||||
public static final String GATEWAY_DELETE_TIMEOUT = "gateway.delete_timeout";
|
||||
public static final String GATEWAY_AUTO_IMPORT_DANGLED = "gateway.auto_import_dangled";
|
||||
// legacy - this used to be in a different package
|
||||
private static final String GATEWAY_LOCAL_DANGLING_TIMEOUT = "gateway.local.dangling_timeout";
|
||||
private static final String GATEWAY_LOCAL_AUTO_IMPORT_DANGLED = "gateway.local.auto_import_dangled";
|
||||
|
||||
static enum AutoImportDangledState {
|
||||
NO() {
|
||||
@Override
|
||||
public boolean shouldImport() {
|
||||
return false;
|
||||
}
|
||||
},
|
||||
YES() {
|
||||
@Override
|
||||
public boolean shouldImport() {
|
||||
return true;
|
||||
}
|
||||
},
|
||||
CLOSED() {
|
||||
@Override
|
||||
public boolean shouldImport() {
|
||||
return true;
|
||||
}
|
||||
};
|
||||
|
||||
public abstract boolean shouldImport();
|
||||
|
||||
public static AutoImportDangledState fromString(String value) {
|
||||
if ("no".equalsIgnoreCase(value)) {
|
||||
return NO;
|
||||
} else if ("yes".equalsIgnoreCase(value)) {
|
||||
return YES;
|
||||
} else if ("closed".equalsIgnoreCase(value)) {
|
||||
return CLOSED;
|
||||
} else {
|
||||
throw new ElasticsearchIllegalArgumentException("failed to parse [" + value + "], not a valid auto dangling import type");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private final NodeEnvironment nodeEnv;
|
||||
private final ThreadPool threadPool;
|
||||
|
||||
private final LocalAllocateDangledIndices allocateDangledIndices;
|
||||
private final MetaStateService metaStateService;
|
||||
private final DanglingIndicesState danglingIndicesState;
|
||||
|
||||
@Nullable
|
||||
private volatile MetaData currentMetaData;
|
||||
|
||||
private final XContentType format;
|
||||
private final ToXContent.Params formatParams;
|
||||
private final ToXContent.Params gatewayModeFormatParams;
|
||||
|
||||
|
||||
private final AutoImportDangledState autoImportDangled;
|
||||
private final TimeValue danglingTimeout;
|
||||
private final TimeValue deleteTimeout;
|
||||
private final Map<String, DanglingIndex> danglingIndices = ConcurrentCollections.newConcurrentMap();
|
||||
private final Object danglingMutex = new Object();
|
||||
private final IndicesService indicesService;
|
||||
|
||||
@Inject
|
||||
public GatewayMetaState(Settings settings, ThreadPool threadPool, NodeEnvironment nodeEnv,
|
||||
TransportNodesListGatewayMetaState nodesListGatewayMetaState, LocalAllocateDangledIndices allocateDangledIndices,
|
||||
IndicesService indicesService) throws Exception {
|
||||
public GatewayMetaState(Settings settings, NodeEnvironment nodeEnv, MetaStateService metaStateService,
|
||||
DanglingIndicesState danglingIndicesState, TransportNodesListGatewayMetaState nodesListGatewayMetaState) throws Exception {
|
||||
super(settings);
|
||||
this.nodeEnv = nodeEnv;
|
||||
this.threadPool = threadPool;
|
||||
this.format = XContentType.fromRestContentType(settings.get("format", "smile"));
|
||||
this.allocateDangledIndices = allocateDangledIndices;
|
||||
this.metaStateService = metaStateService;
|
||||
this.danglingIndicesState = danglingIndicesState;
|
||||
nodesListGatewayMetaState.init(this);
|
||||
|
||||
if (this.format == XContentType.SMILE) {
|
||||
Map<String, String> params = Maps.newHashMap();
|
||||
params.put("binary", "true");
|
||||
formatParams = new ToXContent.MapParams(params);
|
||||
Map<String, String> gatewayModeParams = Maps.newHashMap();
|
||||
gatewayModeParams.put("binary", "true");
|
||||
gatewayModeParams.put(MetaData.CONTEXT_MODE_PARAM, MetaData.CONTEXT_MODE_GATEWAY);
|
||||
gatewayModeFormatParams = new ToXContent.MapParams(gatewayModeParams);
|
||||
} else {
|
||||
formatParams = ToXContent.EMPTY_PARAMS;
|
||||
Map<String, String> gatewayModeParams = Maps.newHashMap();
|
||||
gatewayModeParams.put(MetaData.CONTEXT_MODE_PARAM, MetaData.CONTEXT_MODE_GATEWAY);
|
||||
gatewayModeFormatParams = new ToXContent.MapParams(gatewayModeParams);
|
||||
}
|
||||
|
||||
this.autoImportDangled = AutoImportDangledState.fromString(settings.get(GATEWAY_AUTO_IMPORT_DANGLED, settings.get(GATEWAY_LOCAL_AUTO_IMPORT_DANGLED, AutoImportDangledState.YES.toString())));
|
||||
this.danglingTimeout = settings.getAsTime(GATEWAY_DANGLING_TIMEOUT, settings.getAsTime(GATEWAY_LOCAL_DANGLING_TIMEOUT, TimeValue.timeValueHours(2)));
|
||||
this.deleteTimeout = settings.getAsTime(GATEWAY_DELETE_TIMEOUT, TimeValue.timeValueSeconds(30));
|
||||
|
||||
logger.debug("using {} [{}], {} [{}], with {} [{}]",
|
||||
GATEWAY_AUTO_IMPORT_DANGLED, this.autoImportDangled,
|
||||
GATEWAY_DELETE_TIMEOUT, this.deleteTimeout,
|
||||
GATEWAY_DANGLING_TIMEOUT, this.danglingTimeout);
|
||||
if (DiscoveryNode.masterNode(settings) || DiscoveryNode.dataNode(settings)) {
|
||||
nodeEnv.ensureAtomicMoveSupported();
|
||||
}
|
||||
@ -179,18 +77,17 @@ public class GatewayMetaState extends AbstractComponent implements ClusterStateL
|
||||
ensureNoPre019State();
|
||||
pre20Upgrade();
|
||||
long start = System.currentTimeMillis();
|
||||
loadState();
|
||||
metaStateService.loadFullState();
|
||||
logger.debug("took {} to load state", TimeValue.timeValueMillis(System.currentTimeMillis() - start));
|
||||
} catch (Exception e) {
|
||||
logger.error("failed to read local state, exiting...", e);
|
||||
throw e;
|
||||
}
|
||||
}
|
||||
this.indicesService = indicesService;
|
||||
}
|
||||
|
||||
public MetaData loadMetaState() throws Exception {
|
||||
return loadState();
|
||||
return metaStateService.loadFullState();
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -211,7 +108,7 @@ public class GatewayMetaState extends AbstractComponent implements ClusterStateL
|
||||
// check if the global state changed?
|
||||
if (currentMetaData == null || !MetaData.isGlobalStateEquals(currentMetaData, newMetaData)) {
|
||||
try {
|
||||
writeGlobalState("changed", newMetaData);
|
||||
metaStateService.writeGlobalState("changed", newMetaData);
|
||||
} catch (Throwable e) {
|
||||
success = false;
|
||||
}
|
||||
@ -224,7 +121,7 @@ public class GatewayMetaState extends AbstractComponent implements ClusterStateL
|
||||
if (currentMetaData == null) {
|
||||
// a new event..., check from the state stored
|
||||
try {
|
||||
currentIndexMetaData = loadIndexState(indexMetaData.index());
|
||||
currentIndexMetaData = metaStateService.loadIndexState(indexMetaData.index());
|
||||
} catch (IOException ex) {
|
||||
throw new ElasticsearchException("failed to load index state", ex);
|
||||
}
|
||||
@ -243,198 +140,20 @@ public class GatewayMetaState extends AbstractComponent implements ClusterStateL
|
||||
}
|
||||
|
||||
try {
|
||||
writeIndex(writeReason, indexMetaData, currentIndexMetaData);
|
||||
metaStateService.writeIndex(writeReason, indexMetaData, currentIndexMetaData);
|
||||
} catch (Throwable e) {
|
||||
success = false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// handle dangling indices, we handle those for all nodes that have a node file (data or master)
|
||||
if (nodeEnv.hasNodeFile()) {
|
||||
if (danglingTimeout.millis() >= 0) {
|
||||
synchronized (danglingMutex) {
|
||||
for (String danglingIndex : danglingIndices.keySet()) {
|
||||
if (newMetaData.hasIndex(danglingIndex)) {
|
||||
logger.debug("[{}] no longer dangling (created), removing", danglingIndex);
|
||||
DanglingIndex removed = danglingIndices.remove(danglingIndex);
|
||||
FutureUtils.cancel(removed.future);
|
||||
}
|
||||
}
|
||||
// delete indices that are no longer part of the metadata
|
||||
try {
|
||||
for (String indexName : nodeEnv.findAllIndices()) {
|
||||
// if we have the index on the metadata, don't delete it
|
||||
if (newMetaData.hasIndex(indexName)) {
|
||||
continue;
|
||||
}
|
||||
if (danglingIndices.containsKey(indexName)) {
|
||||
// already dangling, continue
|
||||
continue;
|
||||
}
|
||||
final IndexMetaData indexMetaData = loadIndexState(indexName);
|
||||
if (indexMetaData != null) {
|
||||
if(autoImportDangled.shouldImport()){
|
||||
logger.info("[{}] dangling index, exists on local file system, but not in cluster metadata, auto import to cluster state [{}]", indexName, autoImportDangled);
|
||||
danglingIndices.put(indexName, new DanglingIndex(indexName, null));
|
||||
} else if (danglingTimeout.millis() == 0) {
|
||||
logger.info("[{}] dangling index, exists on local file system, but not in cluster metadata, timeout set to 0, deleting now", indexName);
|
||||
indicesService.deleteIndexStore("dangling index with timeout set to 0", indexMetaData);
|
||||
} else {
|
||||
logger.info("[{}] dangling index, exists on local file system, but not in cluster metadata, scheduling to delete in [{}], auto import to cluster state [{}]", indexName, danglingTimeout, autoImportDangled);
|
||||
danglingIndices.put(indexName,
|
||||
new DanglingIndex(indexName,
|
||||
threadPool.schedule(danglingTimeout,
|
||||
ThreadPool.Names.SAME,
|
||||
new RemoveDanglingIndex(indexMetaData))));
|
||||
}
|
||||
}
|
||||
}
|
||||
} catch (Throwable e) {
|
||||
logger.warn("failed to find dangling indices", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
if (autoImportDangled.shouldImport() && !danglingIndices.isEmpty()) {
|
||||
final List<IndexMetaData> dangled = Lists.newArrayList();
|
||||
for (String indexName : danglingIndices.keySet()) {
|
||||
IndexMetaData indexMetaData;
|
||||
try {
|
||||
indexMetaData = loadIndexState(indexName);
|
||||
} catch (IOException ex) {
|
||||
throw new ElasticsearchException("failed to load index state", ex);
|
||||
}
|
||||
if (indexMetaData == null) {
|
||||
logger.debug("failed to find state for dangling index [{}]", indexName);
|
||||
continue;
|
||||
}
|
||||
// we might have someone copying over an index, renaming the directory, handle that
|
||||
if (!indexMetaData.index().equals(indexName)) {
|
||||
logger.info("dangled index directory name is [{}], state name is [{}], renaming to directory name", indexName, indexMetaData.index());
|
||||
indexMetaData = IndexMetaData.builder(indexMetaData).index(indexName).build();
|
||||
}
|
||||
if (autoImportDangled == AutoImportDangledState.CLOSED) {
|
||||
indexMetaData = IndexMetaData.builder(indexMetaData).state(IndexMetaData.State.CLOSE).build();
|
||||
}
|
||||
if (indexMetaData != null) {
|
||||
dangled.add(indexMetaData);
|
||||
}
|
||||
}
|
||||
IndexMetaData[] dangledIndices = dangled.toArray(new IndexMetaData[dangled.size()]);
|
||||
try {
|
||||
allocateDangledIndices.allocateDangled(dangledIndices, new LocalAllocateDangledIndices.Listener() {
|
||||
@Override
|
||||
public void onResponse(LocalAllocateDangledIndices.AllocateDangledResponse response) {
|
||||
logger.trace("allocated dangled");
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Throwable e) {
|
||||
logger.info("failed to send allocated dangled", e);
|
||||
}
|
||||
});
|
||||
} catch (Throwable e) {
|
||||
logger.warn("failed to send allocate dangled", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
danglingIndicesState.processDanglingIndices(newMetaData);
|
||||
|
||||
if (success) {
|
||||
currentMetaData = newMetaData;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns a StateFormat that can read and write {@link MetaData}
|
||||
*/
|
||||
static MetaDataStateFormat<MetaData> globalStateFormat(XContentType format, final ToXContent.Params formatParams, final boolean deleteOldFiles) {
|
||||
return new MetaDataStateFormat<MetaData>(format, deleteOldFiles) {
|
||||
|
||||
@Override
|
||||
public void toXContent(XContentBuilder builder, MetaData state) throws IOException {
|
||||
MetaData.Builder.toXContent(state, builder, formatParams);
|
||||
}
|
||||
|
||||
@Override
|
||||
public MetaData fromXContent(XContentParser parser) throws IOException {
|
||||
return MetaData.Builder.fromXContent(parser);
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns a StateFormat that can read and write {@link IndexMetaData}
|
||||
*/
|
||||
static MetaDataStateFormat<IndexMetaData> indexStateFormat(XContentType format, final ToXContent.Params formatParams, boolean deleteOldFiles) {
|
||||
return new MetaDataStateFormat<IndexMetaData>(format, deleteOldFiles) {
|
||||
|
||||
@Override
|
||||
public void toXContent(XContentBuilder builder, IndexMetaData state) throws IOException {
|
||||
IndexMetaData.Builder.toXContent(state, builder, formatParams); }
|
||||
|
||||
@Override
|
||||
public IndexMetaData fromXContent(XContentParser parser) throws IOException {
|
||||
return IndexMetaData.Builder.fromXContent(parser);
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
private void writeIndex(String reason, IndexMetaData indexMetaData, @Nullable IndexMetaData previousIndexMetaData) throws Exception {
|
||||
logger.trace("[{}] writing state, reason [{}]", indexMetaData.index(), reason);
|
||||
final boolean deleteOldFiles = previousIndexMetaData != null && previousIndexMetaData.version() != indexMetaData.version();
|
||||
final MetaDataStateFormat<IndexMetaData> writer = indexStateFormat(format, formatParams, deleteOldFiles);
|
||||
try {
|
||||
writer.write(indexMetaData, INDEX_STATE_FILE_PREFIX, indexMetaData.version(),
|
||||
nodeEnv.indexPaths(new Index(indexMetaData.index())));
|
||||
} catch (Throwable ex) {
|
||||
logger.warn("[{}]: failed to write index state", ex, indexMetaData.index());
|
||||
throw new IOException("failed to write state for [" + indexMetaData.index() + "]", ex);
|
||||
}
|
||||
}
|
||||
|
||||
private void writeGlobalState(String reason, MetaData metaData) throws Exception {
|
||||
logger.trace("{} writing state, reason [{}]", GLOBAL_STATE_LOG_TYPE, reason);
|
||||
final MetaDataStateFormat<MetaData> writer = globalStateFormat(format, gatewayModeFormatParams, true);
|
||||
try {
|
||||
writer.write(metaData, GLOBAL_STATE_FILE_PREFIX, metaData.version(), nodeEnv.nodeDataPaths());
|
||||
} catch (Throwable ex) {
|
||||
logger.warn("{}: failed to write global state", ex, GLOBAL_STATE_LOG_TYPE);
|
||||
throw new IOException("failed to write global state", ex);
|
||||
}
|
||||
}
|
||||
|
||||
private MetaData loadState() throws Exception {
|
||||
MetaData globalMetaData = loadGlobalState();
|
||||
MetaData.Builder metaDataBuilder;
|
||||
if (globalMetaData != null) {
|
||||
metaDataBuilder = MetaData.builder(globalMetaData);
|
||||
} else {
|
||||
metaDataBuilder = MetaData.builder();
|
||||
}
|
||||
|
||||
final Set<String> indices = nodeEnv.findAllIndices();
|
||||
for (String index : indices) {
|
||||
IndexMetaData indexMetaData = loadIndexState(index);
|
||||
if (indexMetaData == null) {
|
||||
logger.debug("[{}] failed to find metadata for existing index location", index);
|
||||
} else {
|
||||
metaDataBuilder.put(indexMetaData, false);
|
||||
}
|
||||
}
|
||||
return metaDataBuilder.build();
|
||||
}
|
||||
|
||||
@Nullable
|
||||
private IndexMetaData loadIndexState(String index) throws IOException {
|
||||
return MetaDataStateFormat.loadLatestState(logger, indexStateFormat(format, formatParams, true),
|
||||
INDEX_STATE_FILE_PATTERN, "[" + index + "]", nodeEnv.indexPaths(new Index(index)));
|
||||
}
|
||||
|
||||
private MetaData loadGlobalState() throws IOException {
|
||||
return MetaDataStateFormat.loadLatestState(logger, globalStateFormat(format, gatewayModeFormatParams, true), GLOBAL_STATE_FILE_PATTERN, GLOBAL_STATE_LOG_TYPE, nodeEnv.nodeDataPaths());
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Throws an IAE if a pre 0.19 state is detected
|
||||
*/
|
||||
@ -497,7 +216,7 @@ public class GatewayMetaState extends AbstractComponent implements ClusterStateL
|
||||
.version(indexMetaData.version())
|
||||
.settings(indexSettings)
|
||||
.build();
|
||||
writeIndex("upgrade", newMetaData, null);
|
||||
metaStateService.writeIndex("upgrade", newMetaData, null);
|
||||
} else if (indexMetaData.getCreationVersion().onOrAfter(Version.V_2_0_0)) {
|
||||
if (indexMetaData.getSettings().get(IndexMetaData.SETTING_LEGACY_ROUTING_HASH_FUNCTION) != null
|
||||
|| indexMetaData.getSettings().get(IndexMetaData.SETTING_LEGACY_ROUTING_USE_TYPE) != null) {
|
||||
@ -511,41 +230,4 @@ public class GatewayMetaState extends AbstractComponent implements ClusterStateL
|
||||
+ "used some custom routing logic, you can now remove these settings from your `elasticsearch.yml` file", DEPRECATED_SETTING_ROUTING_HASH_FUNCTION, DEPRECATED_SETTING_ROUTING_USE_TYPE);
|
||||
}
|
||||
}
|
||||
|
||||
class RemoveDanglingIndex implements Runnable {
|
||||
|
||||
private final IndexMetaData metaData;
|
||||
|
||||
RemoveDanglingIndex(IndexMetaData metaData) {
|
||||
this.metaData = metaData;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void run() {
|
||||
synchronized (danglingMutex) {
|
||||
DanglingIndex remove = danglingIndices.remove(metaData.index());
|
||||
// no longer there...
|
||||
if (remove == null) {
|
||||
return;
|
||||
}
|
||||
logger.warn("[{}] deleting dangling index", metaData.index());
|
||||
try {
|
||||
indicesService.deleteIndexStore("deleting dangling index", metaData);
|
||||
} catch (Exception ex) {
|
||||
logger.debug("failed to delete dangling index", ex);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static class DanglingIndex {
|
||||
public final String index;
|
||||
public final ScheduledFuture future;
|
||||
|
||||
DanglingIndex(String index, ScheduledFuture future) {
|
||||
this.index = index;
|
||||
this.future = future;
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -28,6 +28,8 @@ public class GatewayModule extends AbstractModule {
|
||||
|
||||
@Override
|
||||
protected void configure() {
|
||||
bind(MetaStateService.class).asEagerSingleton();
|
||||
bind(DanglingIndicesState.class).asEagerSingleton();
|
||||
bind(GatewayService.class).asEagerSingleton();
|
||||
bind(Gateway.class).asEagerSingleton();
|
||||
bind(GatewayShardsState.class).asEagerSingleton();
|
||||
|
@ -40,6 +40,7 @@ import org.elasticsearch.transport.*;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collection;
|
||||
|
||||
/**
|
||||
*/
|
||||
@ -62,14 +63,14 @@ public class LocalAllocateDangledIndices extends AbstractComponent {
|
||||
transportService.registerHandler(ACTION_NAME, new AllocateDangledRequestHandler());
|
||||
}
|
||||
|
||||
public void allocateDangled(IndexMetaData[] indices, final Listener listener) {
|
||||
public void allocateDangled(Collection<IndexMetaData> indices, final Listener listener) {
|
||||
ClusterState clusterState = clusterService.state();
|
||||
DiscoveryNode masterNode = clusterState.nodes().masterNode();
|
||||
if (masterNode == null) {
|
||||
listener.onFailure(new MasterNotDiscoveredException("no master to send allocate dangled request"));
|
||||
return;
|
||||
}
|
||||
AllocateDangledRequest request = new AllocateDangledRequest(clusterService.localNode(), indices);
|
||||
AllocateDangledRequest request = new AllocateDangledRequest(clusterService.localNode(), indices.toArray(new IndexMetaData[indices.size()]));
|
||||
transportService.sendRequest(masterNode, ACTION_NAME, request, new TransportResponseHandler<AllocateDangledResponse>() {
|
||||
@Override
|
||||
public AllocateDangledResponse newInstance() {
|
||||
|
187
src/main/java/org/elasticsearch/gateway/MetaStateService.java
Normal file
187
src/main/java/org/elasticsearch/gateway/MetaStateService.java
Normal file
@ -0,0 +1,187 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.gateway;
|
||||
|
||||
import com.google.common.collect.Maps;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.metadata.MetaData;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.component.AbstractComponent;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.env.NodeEnvironment;
|
||||
import org.elasticsearch.index.Index;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.regex.Pattern;
|
||||
|
||||
/**
|
||||
* Handles writing and loading both {@link MetaData} and {@link IndexMetaData}
|
||||
*/
|
||||
public class MetaStateService extends AbstractComponent {
|
||||
|
||||
static final String FORMAT_SETTING = "gateway.format";
|
||||
|
||||
static final String GLOBAL_STATE_FILE_PREFIX = "global-";
|
||||
private static final String INDEX_STATE_FILE_PREFIX = "state-";
|
||||
static final Pattern GLOBAL_STATE_FILE_PATTERN = Pattern.compile(GLOBAL_STATE_FILE_PREFIX + "(\\d+)(" + MetaDataStateFormat.STATE_FILE_EXTENSION + ")?");
|
||||
static final Pattern INDEX_STATE_FILE_PATTERN = Pattern.compile(INDEX_STATE_FILE_PREFIX + "(\\d+)(" + MetaDataStateFormat.STATE_FILE_EXTENSION + ")?");
|
||||
private static final String GLOBAL_STATE_LOG_TYPE = "[_global]";
|
||||
|
||||
private final NodeEnvironment nodeEnv;
|
||||
|
||||
private final XContentType format;
|
||||
private final ToXContent.Params formatParams;
|
||||
private final ToXContent.Params gatewayModeFormatParams;
|
||||
|
||||
@Inject
|
||||
public MetaStateService(Settings settings, NodeEnvironment nodeEnv) {
|
||||
super(settings);
|
||||
this.nodeEnv = nodeEnv;
|
||||
this.format = XContentType.fromRestContentType(settings.get(FORMAT_SETTING, "smile"));
|
||||
|
||||
if (this.format == XContentType.SMILE) {
|
||||
Map<String, String> params = Maps.newHashMap();
|
||||
params.put("binary", "true");
|
||||
formatParams = new ToXContent.MapParams(params);
|
||||
Map<String, String> gatewayModeParams = Maps.newHashMap();
|
||||
gatewayModeParams.put("binary", "true");
|
||||
gatewayModeParams.put(MetaData.CONTEXT_MODE_PARAM, MetaData.CONTEXT_MODE_GATEWAY);
|
||||
gatewayModeFormatParams = new ToXContent.MapParams(gatewayModeParams);
|
||||
} else {
|
||||
formatParams = ToXContent.EMPTY_PARAMS;
|
||||
Map<String, String> gatewayModeParams = Maps.newHashMap();
|
||||
gatewayModeParams.put(MetaData.CONTEXT_MODE_PARAM, MetaData.CONTEXT_MODE_GATEWAY);
|
||||
gatewayModeFormatParams = new ToXContent.MapParams(gatewayModeParams);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Loads the full state, which includes both the global state and all the indices
|
||||
* meta state.
|
||||
*/
|
||||
MetaData loadFullState() throws Exception {
|
||||
MetaData globalMetaData = loadGlobalState();
|
||||
MetaData.Builder metaDataBuilder;
|
||||
if (globalMetaData != null) {
|
||||
metaDataBuilder = MetaData.builder(globalMetaData);
|
||||
} else {
|
||||
metaDataBuilder = MetaData.builder();
|
||||
}
|
||||
|
||||
final Set<String> indices = nodeEnv.findAllIndices();
|
||||
for (String index : indices) {
|
||||
IndexMetaData indexMetaData = loadIndexState(index);
|
||||
if (indexMetaData == null) {
|
||||
logger.debug("[{}] failed to find metadata for existing index location", index);
|
||||
} else {
|
||||
metaDataBuilder.put(indexMetaData, false);
|
||||
}
|
||||
}
|
||||
return metaDataBuilder.build();
|
||||
}
|
||||
|
||||
/**
|
||||
* Loads the index state for the provided index name, returning null if doesn't exists.
|
||||
*/
|
||||
@Nullable
|
||||
IndexMetaData loadIndexState(String index) throws IOException {
|
||||
return MetaDataStateFormat.loadLatestState(logger, indexStateFormat(format, formatParams, true),
|
||||
INDEX_STATE_FILE_PATTERN, "[" + index + "]", nodeEnv.indexPaths(new Index(index)));
|
||||
}
|
||||
|
||||
/**
|
||||
* Loads the global state, *without* index state, see {@link #loadFullState()} for that.
|
||||
*/
|
||||
MetaData loadGlobalState() throws IOException {
|
||||
return MetaDataStateFormat.loadLatestState(logger, globalStateFormat(format, gatewayModeFormatParams, true), GLOBAL_STATE_FILE_PATTERN, GLOBAL_STATE_LOG_TYPE, nodeEnv.nodeDataPaths());
|
||||
}
|
||||
|
||||
/**
|
||||
* Writes the index state.
|
||||
*/
|
||||
void writeIndex(String reason, IndexMetaData indexMetaData, @Nullable IndexMetaData previousIndexMetaData) throws Exception {
|
||||
logger.trace("[{}] writing state, reason [{}]", indexMetaData.index(), reason);
|
||||
final boolean deleteOldFiles = previousIndexMetaData != null && previousIndexMetaData.version() != indexMetaData.version();
|
||||
final MetaDataStateFormat<IndexMetaData> writer = indexStateFormat(format, formatParams, deleteOldFiles);
|
||||
try {
|
||||
writer.write(indexMetaData, INDEX_STATE_FILE_PREFIX, indexMetaData.version(),
|
||||
nodeEnv.indexPaths(new Index(indexMetaData.index())));
|
||||
} catch (Throwable ex) {
|
||||
logger.warn("[{}]: failed to write index state", ex, indexMetaData.index());
|
||||
throw new IOException("failed to write state for [" + indexMetaData.index() + "]", ex);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Writes the global state, *without* the indices states.
|
||||
*/
|
||||
void writeGlobalState(String reason, MetaData metaData) throws Exception {
|
||||
logger.trace("{} writing state, reason [{}]", GLOBAL_STATE_LOG_TYPE, reason);
|
||||
final MetaDataStateFormat<MetaData> writer = globalStateFormat(format, gatewayModeFormatParams, true);
|
||||
try {
|
||||
writer.write(metaData, GLOBAL_STATE_FILE_PREFIX, metaData.version(), nodeEnv.nodeDataPaths());
|
||||
} catch (Throwable ex) {
|
||||
logger.warn("{}: failed to write global state", ex, GLOBAL_STATE_LOG_TYPE);
|
||||
throw new IOException("failed to write global state", ex);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns a StateFormat that can read and write {@link MetaData}
|
||||
*/
|
||||
static MetaDataStateFormat<MetaData> globalStateFormat(XContentType format, final ToXContent.Params formatParams, final boolean deleteOldFiles) {
|
||||
return new MetaDataStateFormat<MetaData>(format, deleteOldFiles) {
|
||||
|
||||
@Override
|
||||
public void toXContent(XContentBuilder builder, MetaData state) throws IOException {
|
||||
MetaData.Builder.toXContent(state, builder, formatParams);
|
||||
}
|
||||
|
||||
@Override
|
||||
public MetaData fromXContent(XContentParser parser) throws IOException {
|
||||
return MetaData.Builder.fromXContent(parser);
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns a StateFormat that can read and write {@link IndexMetaData}
|
||||
*/
|
||||
static MetaDataStateFormat<IndexMetaData> indexStateFormat(XContentType format, final ToXContent.Params formatParams, boolean deleteOldFiles) {
|
||||
return new MetaDataStateFormat<IndexMetaData>(format, deleteOldFiles) {
|
||||
|
||||
@Override
|
||||
public void toXContent(XContentBuilder builder, IndexMetaData state) throws IOException {
|
||||
IndexMetaData.Builder.toXContent(state, builder, formatParams); }
|
||||
|
||||
@Override
|
||||
public IndexMetaData fromXContent(XContentParser parser) throws IOException {
|
||||
return IndexMetaData.Builder.fromXContent(parser);
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
@ -489,13 +489,8 @@ public abstract class Engine implements Closeable {
|
||||
/** Check whether the engine should be failed */
|
||||
protected boolean maybeFailEngine(String source, Throwable t) {
|
||||
if (Lucene.isCorruptionException(t)) {
|
||||
if (engineConfig.isFailEngineOnCorruption()) {
|
||||
failEngine("corrupt file detected source: [" + source + "]", t);
|
||||
return true;
|
||||
} else {
|
||||
logger.warn("corrupt file detected source: [{}] but [{}] is set to [{}]", t, source,
|
||||
EngineConfig.INDEX_FAIL_ON_CORRUPTION_SETTING, engineConfig.isFailEngineOnCorruption());
|
||||
}
|
||||
failEngine("corrupt file detected source: [" + source + "]", t);
|
||||
return true;
|
||||
} else if (ExceptionsHelper.isOOM(t)) {
|
||||
failEngine("out of memory", t);
|
||||
return true;
|
||||
|
@ -23,7 +23,6 @@ import org.apache.lucene.codecs.Codec;
|
||||
import org.apache.lucene.index.IndexWriterConfig;
|
||||
import org.apache.lucene.search.similarities.Similarity;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.logging.ESLogger;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.ByteSizeUnit;
|
||||
import org.elasticsearch.common.unit.ByteSizeValue;
|
||||
@ -50,9 +49,9 @@ import java.util.concurrent.TimeUnit;
|
||||
*/
|
||||
public final class EngineConfig {
|
||||
private final ShardId shardId;
|
||||
private volatile boolean failOnMergeFailure = true;
|
||||
private volatile boolean failEngineOnCorruption = true;
|
||||
private volatile ByteSizeValue indexingBufferSize;
|
||||
private volatile ByteSizeValue versionMapSize;
|
||||
private volatile String versionMapSizeSetting;
|
||||
private final int indexConcurrency;
|
||||
private volatile boolean compoundOnFlush = true;
|
||||
private long gcDeletesInMillis = DEFAULT_GC_DELETES.millis();
|
||||
@ -99,18 +98,6 @@ public final class EngineConfig {
|
||||
*/
|
||||
public static final String INDEX_GC_DELETES_SETTING = "index.gc_deletes";
|
||||
|
||||
/**
|
||||
* Index setting to enable / disable engine failures on merge exceptions. Default is <code>true</code> / <tt>enabled</tt>.
|
||||
* This setting is realtime updateable.
|
||||
*/
|
||||
public static final String INDEX_FAIL_ON_MERGE_FAILURE_SETTING = "index.fail_on_merge_failure";
|
||||
|
||||
/**
|
||||
* Index setting to enable / disable engine failures on detected index corruptions. Default is <code>true</code> / <tt>enabled</tt>.
|
||||
* This setting is realtime updateable.
|
||||
*/
|
||||
public static final String INDEX_FAIL_ON_CORRUPTION_SETTING = "index.fail_on_corruption";
|
||||
|
||||
/**
|
||||
* Index setting to control the initial index buffer size.
|
||||
* This setting is <b>not</b> realtime updateable.
|
||||
@ -123,11 +110,25 @@ public final class EngineConfig {
|
||||
*/
|
||||
public static final String INDEX_CODEC_SETTING = "index.codec";
|
||||
|
||||
/**
|
||||
* Index setting to enable / disable checksum checks on merge
|
||||
* This setting is realtime updateable.
|
||||
*/
|
||||
public static final String INDEX_CHECKSUM_ON_MERGE = "index.checksum_on_merge";
|
||||
|
||||
/**
|
||||
* The maximum size the version map should grow to before issuing a refresh. Can be an absolute value or a percentage of
|
||||
* the current index memory buffer (defaults to 25%)
|
||||
*/
|
||||
public static final String INDEX_VERSION_MAP_SIZE = "index.version_map_size";
|
||||
|
||||
public static final TimeValue DEFAULT_REFRESH_INTERVAL = new TimeValue(1, TimeUnit.SECONDS);
|
||||
public static final TimeValue DEFAULT_GC_DELETES = TimeValue.timeValueSeconds(60);
|
||||
public static final ByteSizeValue DEFAUTL_INDEX_BUFFER_SIZE = new ByteSizeValue(64, ByteSizeUnit.MB);
|
||||
public static final ByteSizeValue INACTIVE_SHARD_INDEXING_BUFFER = ByteSizeValue.parseBytesSizeValue("500kb");
|
||||
|
||||
public static final String DEFAULT_VERSION_MAP_SIZE = "25%";
|
||||
|
||||
private static final String DEFAULT_CODEC_NAME = "default";
|
||||
|
||||
|
||||
@ -155,9 +156,42 @@ public final class EngineConfig {
|
||||
this.indexConcurrency = indexSettings.getAsInt(EngineConfig.INDEX_CONCURRENCY_SETTING, Math.max(IndexWriterConfig.DEFAULT_MAX_THREAD_STATES, (int) (EsExecutors.boundedNumberOfProcessors(indexSettings) * 0.65)));
|
||||
codecName = indexSettings.get(EngineConfig.INDEX_CODEC_SETTING, EngineConfig.DEFAULT_CODEC_NAME);
|
||||
indexingBufferSize = indexSettings.getAsBytesSize(INDEX_BUFFER_SIZE_SETTING, DEFAUTL_INDEX_BUFFER_SIZE);
|
||||
failEngineOnCorruption = indexSettings.getAsBoolean(INDEX_FAIL_ON_CORRUPTION_SETTING, true);
|
||||
failOnMergeFailure = indexSettings.getAsBoolean(INDEX_FAIL_ON_MERGE_FAILURE_SETTING, true);
|
||||
gcDeletesInMillis = indexSettings.getAsTime(INDEX_GC_DELETES_SETTING, EngineConfig.DEFAULT_GC_DELETES).millis();
|
||||
versionMapSizeSetting = indexSettings.get(INDEX_VERSION_MAP_SIZE, DEFAULT_VERSION_MAP_SIZE);
|
||||
updateVersionMapSize();
|
||||
}
|
||||
|
||||
/** updates {@link #versionMapSize} based on current setting and {@link #indexingBufferSize} */
|
||||
private void updateVersionMapSize() {
|
||||
if (versionMapSizeSetting.endsWith("%")) {
|
||||
double percent = Double.parseDouble(versionMapSizeSetting.substring(0, versionMapSizeSetting.length() - 1));
|
||||
versionMapSize = new ByteSizeValue((long) (((double) indexingBufferSize.bytes() * (percent / 100))));
|
||||
} else {
|
||||
versionMapSize = ByteSizeValue.parseBytesSizeValue(versionMapSizeSetting);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Settings the version map size that should trigger a refresh. See {@link #INDEX_VERSION_MAP_SIZE} for details.
|
||||
*/
|
||||
public void setVersionMapSizeSetting(String versionMapSizeSetting) {
|
||||
this.versionMapSizeSetting = versionMapSizeSetting;
|
||||
updateVersionMapSize();
|
||||
}
|
||||
|
||||
/**
|
||||
* current setting for the version map size that should trigger a refresh. See {@link #INDEX_VERSION_MAP_SIZE} for details.
|
||||
*/
|
||||
public String getVersionMapSizeSetting() {
|
||||
return versionMapSizeSetting;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* returns the size of the version map that should trigger a refresh
|
||||
*/
|
||||
public ByteSizeValue getVersionMapSize() {
|
||||
return versionMapSize;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -165,6 +199,7 @@ public final class EngineConfig {
|
||||
*/
|
||||
public void setIndexingBufferSize(ByteSizeValue indexingBufferSize) {
|
||||
this.indexingBufferSize = indexingBufferSize;
|
||||
updateVersionMapSize();
|
||||
}
|
||||
|
||||
/**
|
||||
@ -176,20 +211,6 @@ public final class EngineConfig {
|
||||
this.enableGcDeletes = enableGcDeletes;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns <code>true</code> iff the engine should be failed if a merge error is hit. Defaults to <code>true</code>
|
||||
*/
|
||||
public boolean isFailOnMergeFailure() {
|
||||
return failOnMergeFailure;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns <code>true</code> if the engine should be failed in the case of a corrupted index. Defaults to <code>true</code>
|
||||
*/
|
||||
public boolean isFailEngineOnCorruption() {
|
||||
return failEngineOnCorruption;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the initial index buffer size. This setting is only read on startup and otherwise controlled by {@link org.elasticsearch.indices.memory.IndexingMemoryController}
|
||||
*/
|
||||
@ -370,18 +391,4 @@ public final class EngineConfig {
|
||||
public void setCompoundOnFlush(boolean compoundOnFlush) {
|
||||
this.compoundOnFlush = compoundOnFlush;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets if the engine should be failed in the case of a corrupted index. Defaults to <code>true</code>
|
||||
*/
|
||||
public void setFailEngineOnCorruption(boolean failEngineOnCorruption) {
|
||||
this.failEngineOnCorruption = failEngineOnCorruption;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets if the engine should be failed if a merge error is hit. Defaults to <code>true</code>
|
||||
*/
|
||||
public void setFailOnMergeFailure(boolean failOnMergeFailure) {
|
||||
this.failOnMergeFailure = failOnMergeFailure;
|
||||
}
|
||||
}
|
||||
|
@ -24,9 +24,9 @@ import org.elasticsearch.index.shard.ShardId;
|
||||
/**
|
||||
*
|
||||
*/
|
||||
public class OptimizeFailedEngineException extends EngineException {
|
||||
public class ForceMergeFailedEngineException extends EngineException {
|
||||
|
||||
public OptimizeFailedEngineException(ShardId shardId, Throwable t) {
|
||||
super(shardId, "Optimize failed", t);
|
||||
public ForceMergeFailedEngineException(ShardId shardId, Throwable t) {
|
||||
super(shardId, "force merge failed", t);
|
||||
}
|
||||
}
|
@ -52,7 +52,6 @@ import org.elasticsearch.indices.IndicesWarmer;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.lang.reflect.Method;
|
||||
import java.util.*;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
@ -83,11 +82,11 @@ public class InternalEngine extends Engine {
|
||||
private final SearcherFactory searcherFactory;
|
||||
private final SearcherManager searcherManager;
|
||||
|
||||
private final AtomicBoolean optimizeMutex = new AtomicBoolean();
|
||||
// we use flushNeeded here, since if there are no changes, then the commit won't write
|
||||
// will not really happen, and then the commitUserData and the new translog will not be reflected
|
||||
private volatile boolean flushNeeded = false;
|
||||
private final Lock flushLock = new ReentrantLock();
|
||||
private final ReentrantLock optimizeLock = new ReentrantLock();
|
||||
|
||||
protected final FlushingRecoveryCounter onGoingRecoveries;
|
||||
// A uid (in the form of BytesRef) to the version map
|
||||
@ -203,7 +202,7 @@ public class InternalEngine extends Engine {
|
||||
|
||||
@Override
|
||||
public GetResult get(Get get) throws EngineException {
|
||||
try (ReleasableLock _ = readLock.acquire()) {
|
||||
try (ReleasableLock lock = readLock.acquire()) {
|
||||
ensureOpen();
|
||||
if (get.realtime()) {
|
||||
VersionValue versionValue = versionMap.getUnderLock(get.uid().bytes());
|
||||
@ -232,7 +231,7 @@ public class InternalEngine extends Engine {
|
||||
|
||||
@Override
|
||||
public void create(Create create) throws EngineException {
|
||||
try (ReleasableLock _ = readLock.acquire()) {
|
||||
try (ReleasableLock lock = readLock.acquire()) {
|
||||
ensureOpen();
|
||||
if (create.origin() == Operation.Origin.RECOVERY) {
|
||||
// Don't throttle recovery operations
|
||||
@ -338,7 +337,7 @@ public class InternalEngine extends Engine {
|
||||
|
||||
@Override
|
||||
public void index(Index index) throws EngineException {
|
||||
try (ReleasableLock _ = readLock.acquire()) {
|
||||
try (ReleasableLock lock = readLock.acquire()) {
|
||||
ensureOpen();
|
||||
if (index.origin() == Operation.Origin.RECOVERY) {
|
||||
// Don't throttle recovery operations
|
||||
@ -357,11 +356,10 @@ public class InternalEngine extends Engine {
|
||||
}
|
||||
|
||||
/**
|
||||
* Forces a refresh if the versionMap is using too much RAM (currently > 25% of IndexWriter's RAM buffer).
|
||||
* Forces a refresh if the versionMap is using too much RAM
|
||||
*/
|
||||
private void checkVersionMapRefresh() {
|
||||
// TODO: we force refresh when versionMap is using > 25% of IW's RAM buffer; should we make this separately configurable?
|
||||
if (versionMap.ramBytesUsedForRefresh() > 0.25 * engineConfig.getIndexingBufferSize().bytes() && versionMapRefreshPending.getAndSet(true) == false) {
|
||||
if (versionMap.ramBytesUsedForRefresh() > config().getVersionMapSize().bytes() && versionMapRefreshPending.getAndSet(true) == false) {
|
||||
try {
|
||||
if (isClosed.get()) {
|
||||
// no point...
|
||||
@ -438,8 +436,9 @@ public class InternalEngine extends Engine {
|
||||
|
||||
@Override
|
||||
public void delete(Delete delete) throws EngineException {
|
||||
try (ReleasableLock _ = readLock.acquire()) {
|
||||
try (ReleasableLock lock = readLock.acquire()) {
|
||||
ensureOpen();
|
||||
// NOTE: we don't throttle this when merges fall behind because delete-by-id does not create new segments:
|
||||
innerDelete(delete);
|
||||
flushNeeded = true;
|
||||
} catch (OutOfMemoryError | IllegalStateException | IOException t) {
|
||||
@ -505,8 +504,21 @@ public class InternalEngine extends Engine {
|
||||
|
||||
@Override
|
||||
public void delete(DeleteByQuery delete) throws EngineException {
|
||||
try (ReleasableLock _ = readLock.acquire()) {
|
||||
try (ReleasableLock lock = readLock.acquire()) {
|
||||
ensureOpen();
|
||||
if (delete.origin() == Operation.Origin.RECOVERY) {
|
||||
// Don't throttle recovery operations
|
||||
innerDelete(delete);
|
||||
} else {
|
||||
try (Releasable r = throttle.acquireThrottle()) {
|
||||
innerDelete(delete);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private void innerDelete(DeleteByQuery delete) throws EngineException {
|
||||
try {
|
||||
Query query;
|
||||
if (delete.nested() && delete.aliasFilter() != null) {
|
||||
query = new IncludeNestedDocsQuery(new FilteredQuery(delete.query(), delete.aliasFilter()), delete.parentFilter());
|
||||
@ -535,7 +547,7 @@ public class InternalEngine extends Engine {
|
||||
public void refresh(String source) throws EngineException {
|
||||
// we obtain a read lock here, since we don't want a flush to happen while we are refreshing
|
||||
// since it flushes the index as well (though, in terms of concurrency, we are allowed to do it)
|
||||
try (ReleasableLock _ = readLock.acquire()) {
|
||||
try (ReleasableLock lock = readLock.acquire()) {
|
||||
ensureOpen();
|
||||
updateIndexWriterSettings();
|
||||
searcherManager.maybeRefreshBlocking();
|
||||
@ -580,7 +592,7 @@ public class InternalEngine extends Engine {
|
||||
* Thread 1: flushes via API and gets the flush lock but blocks on the readlock since Thread 2 has the writeLock
|
||||
* Thread 2: flushes at the end of the recovery holding the writeLock and blocks on the flushLock owned by Thread 1
|
||||
*/
|
||||
try (ReleasableLock _ = readLock.acquire()) {
|
||||
try (ReleasableLock lock = readLock.acquire()) {
|
||||
ensureOpen();
|
||||
updateIndexWriterSettings();
|
||||
if (flushLock.tryLock() == false) {
|
||||
@ -640,8 +652,15 @@ public class InternalEngine extends Engine {
|
||||
}
|
||||
|
||||
}
|
||||
// reread the last committed segment infos
|
||||
/*
|
||||
* we have to inc-ref the store here since if the engine is closed by a tragic event
|
||||
* we don't acquire the write lock and wait until we have exclusive access. This might also
|
||||
* dec the store reference which can essentially close the store and unless we can inc the reference
|
||||
* we can't use it.
|
||||
*/
|
||||
store.incRef();
|
||||
try {
|
||||
// reread the last committed segment infos
|
||||
lastCommittedSegmentInfos = store.readLastCommittedSegmentsInfo();
|
||||
} catch (Throwable e) {
|
||||
if (isClosed.get() == false) {
|
||||
@ -650,6 +669,8 @@ public class InternalEngine extends Engine {
|
||||
throw new FlushFailedEngineException(shardId, e);
|
||||
}
|
||||
}
|
||||
} finally {
|
||||
store.decRef();
|
||||
}
|
||||
} catch (FlushFailedEngineException ex) {
|
||||
maybeFailEngine("flush", ex);
|
||||
@ -688,58 +709,59 @@ public class InternalEngine extends Engine {
|
||||
lastDeleteVersionPruneTimeMSec = timeMSec;
|
||||
}
|
||||
|
||||
// TODO: can we please remove this method?!
|
||||
private void waitForMerges(boolean flushAfter, boolean upgrade) {
|
||||
try {
|
||||
Method method = IndexWriter.class.getDeclaredMethod("waitForMerges");
|
||||
method.setAccessible(true);
|
||||
method.invoke(indexWriter);
|
||||
} catch (ReflectiveOperationException e) {
|
||||
throw new OptimizeFailedEngineException(shardId, e);
|
||||
}
|
||||
if (flushAfter) {
|
||||
flush(true, true, true);
|
||||
}
|
||||
if (upgrade) {
|
||||
logger.info("Finished upgrade of " + shardId);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void forceMerge(final boolean flush, int maxNumSegments, boolean onlyExpungeDeletes, final boolean upgrade) throws EngineException {
|
||||
if (optimizeMutex.compareAndSet(false, true)) {
|
||||
try (ReleasableLock _ = readLock.acquire()) {
|
||||
ensureOpen();
|
||||
/*
|
||||
* The way we implement upgrades is a bit hackish in the sense that we set an instance
|
||||
* variable and that this setting will thus apply to the next forced merge that will be run.
|
||||
* This is ok because (1) this is the only place we call forceMerge, (2) we have a single
|
||||
* thread for optimize, and the 'optimizeMutex' guarding this code, and (3) ConcurrentMergeScheduler
|
||||
* syncs calls to findForcedMerges.
|
||||
*/
|
||||
MergePolicy mp = indexWriter.getConfig().getMergePolicy();
|
||||
assert mp instanceof ElasticsearchMergePolicy : "MergePolicy is " + mp.getClass().getName();
|
||||
if (upgrade) {
|
||||
logger.info("Starting upgrade of " + shardId);
|
||||
((ElasticsearchMergePolicy) mp).setUpgradeInProgress(true);
|
||||
}
|
||||
|
||||
/*
|
||||
* We do NOT acquire the readlock here since we are waiting on the merges to finish
|
||||
* that's fine since the IW.rollback should stop all the threads and trigger an IOException
|
||||
* causing us to fail the forceMerge
|
||||
*
|
||||
* The way we implement upgrades is a bit hackish in the sense that we set an instance
|
||||
* variable and that this setting will thus apply to the next forced merge that will be run.
|
||||
* This is ok because (1) this is the only place we call forceMerge, (2) we have a single
|
||||
* thread for optimize, and the 'optimizeLock' guarding this code, and (3) ConcurrentMergeScheduler
|
||||
* syncs calls to findForcedMerges.
|
||||
*/
|
||||
assert indexWriter.getConfig().getMergePolicy() instanceof ElasticsearchMergePolicy : "MergePolicy is " + indexWriter.getConfig().getMergePolicy().getClass().getName();
|
||||
ElasticsearchMergePolicy mp = (ElasticsearchMergePolicy) indexWriter.getConfig().getMergePolicy();
|
||||
optimizeLock.lock();
|
||||
try {
|
||||
ensureOpen();
|
||||
if (upgrade) {
|
||||
logger.info("starting segment upgrade");
|
||||
mp.setUpgradeInProgress(true);
|
||||
}
|
||||
store.incRef(); // increment the ref just to ensure nobody closes the store while we optimize
|
||||
try {
|
||||
if (onlyExpungeDeletes) {
|
||||
indexWriter.forceMergeDeletes(false);
|
||||
assert upgrade == false;
|
||||
indexWriter.forceMergeDeletes(true /* blocks and waits for merges*/);
|
||||
} else if (maxNumSegments <= 0) {
|
||||
assert upgrade == false;
|
||||
indexWriter.maybeMerge();
|
||||
} else {
|
||||
indexWriter.forceMerge(maxNumSegments, false);
|
||||
indexWriter.forceMerge(maxNumSegments, true /* blocks and waits for merges*/);
|
||||
}
|
||||
if (flush) {
|
||||
flush(true, true, true);
|
||||
}
|
||||
if (upgrade) {
|
||||
logger.info("finished segment upgrade");
|
||||
}
|
||||
} catch (Throwable t) {
|
||||
maybeFailEngine("optimize", t);
|
||||
throw new OptimizeFailedEngineException(shardId, t);
|
||||
} finally {
|
||||
optimizeMutex.set(false);
|
||||
store.decRef();
|
||||
}
|
||||
} catch (Throwable t) {
|
||||
ForceMergeFailedEngineException ex = new ForceMergeFailedEngineException(shardId, t);
|
||||
maybeFailEngine("force merge", ex);
|
||||
throw ex;
|
||||
} finally {
|
||||
try {
|
||||
mp.setUpgradeInProgress(false); // reset it just to make sure we reset it in a case of an error
|
||||
} finally {
|
||||
optimizeLock.unlock();
|
||||
}
|
||||
}
|
||||
|
||||
waitForMerges(flush, upgrade);
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -747,7 +769,7 @@ public class InternalEngine extends Engine {
|
||||
// we have to flush outside of the readlock otherwise we might have a problem upgrading
|
||||
// the to a write lock when we fail the engine in this operation
|
||||
flush(false, false, true);
|
||||
try (ReleasableLock _ = readLock.acquire()) {
|
||||
try (ReleasableLock lock = readLock.acquire()) {
|
||||
ensureOpen();
|
||||
return deletionPolicy.snapshot();
|
||||
} catch (IOException e) {
|
||||
@ -759,7 +781,7 @@ public class InternalEngine extends Engine {
|
||||
public void recover(RecoveryHandler recoveryHandler) throws EngineException {
|
||||
// take a write lock here so it won't happen while a flush is in progress
|
||||
// this means that next commits will not be allowed once the lock is released
|
||||
try (ReleasableLock _ = writeLock.acquire()) {
|
||||
try (ReleasableLock lock = writeLock.acquire()) {
|
||||
ensureOpen();
|
||||
onGoingRecoveries.startRecovery();
|
||||
}
|
||||
@ -848,7 +870,7 @@ public class InternalEngine extends Engine {
|
||||
|
||||
@Override
|
||||
public List<Segment> segments(boolean verbose) {
|
||||
try (ReleasableLock _ = readLock.acquire()) {
|
||||
try (ReleasableLock lock = readLock.acquire()) {
|
||||
Segment[] segmentsArr = getSegmentInfo(lastCommittedSegmentInfos, verbose);
|
||||
|
||||
// fill in the merges flag
|
||||
@ -1070,12 +1092,8 @@ public class InternalEngine extends Engine {
|
||||
@Override
|
||||
public void onFailedMerge(MergePolicy.MergeException e) {
|
||||
if (Lucene.isCorruptionException(e)) {
|
||||
if (engineConfig.isFailEngineOnCorruption()) {
|
||||
failEngine("corrupt file detected source: [merge]", e);
|
||||
} else {
|
||||
logger.warn("corrupt file detected source: [merge] but [{}] is set to [{}]", e, EngineConfig.INDEX_FAIL_ON_CORRUPTION_SETTING, engineConfig.isFailEngineOnCorruption());
|
||||
}
|
||||
} else if (engineConfig.isFailOnMergeFailure()) {
|
||||
failEngine("corrupt file detected source: [merge]", e);
|
||||
} else {
|
||||
failEngine("merge exception", e);
|
||||
}
|
||||
}
|
||||
|
@ -123,7 +123,15 @@ public class ShadowEngine extends Engine {
|
||||
logger.trace("skipping FLUSH on shadow engine");
|
||||
// reread the last committed segment infos
|
||||
refresh("flush");
|
||||
try (ReleasableLock _ = readLock.acquire()) {
|
||||
/*
|
||||
* we have to inc-ref the store here since if the engine is closed by a tragic event
|
||||
* we don't acquire the write lock and wait until we have exclusive access. This might also
|
||||
* dec the store reference which can essentially close the store and unless we can inc the reference
|
||||
* we can't use it.
|
||||
*/
|
||||
store.incRef();
|
||||
try (ReleasableLock lock = readLock.acquire()) {
|
||||
// reread the last committed segment infos
|
||||
lastCommittedSegmentInfos = store.readLastCommittedSegmentsInfo();
|
||||
} catch (Throwable e) {
|
||||
if (isClosed.get() == false) {
|
||||
@ -132,6 +140,8 @@ public class ShadowEngine extends Engine {
|
||||
throw new FlushFailedEngineException(shardId, e);
|
||||
}
|
||||
}
|
||||
} finally {
|
||||
store.decRef();
|
||||
}
|
||||
}
|
||||
|
||||
@ -149,7 +159,7 @@ public class ShadowEngine extends Engine {
|
||||
|
||||
@Override
|
||||
public List<Segment> segments(boolean verbose) {
|
||||
try (ReleasableLock _ = readLock.acquire()) {
|
||||
try (ReleasableLock lock = readLock.acquire()) {
|
||||
Segment[] segmentsArr = getSegmentInfo(lastCommittedSegmentInfos, verbose);
|
||||
for (int i = 0; i < segmentsArr.length; i++) {
|
||||
// hard code all segments as committed, because they are in
|
||||
@ -164,7 +174,7 @@ public class ShadowEngine extends Engine {
|
||||
public void refresh(String source) throws EngineException {
|
||||
// we obtain a read lock here, since we don't want a flush to happen while we are refreshing
|
||||
// since it flushes the index as well (though, in terms of concurrency, we are allowed to do it)
|
||||
try (ReleasableLock _ = readLock.acquire()) {
|
||||
try (ReleasableLock lock = readLock.acquire()) {
|
||||
ensureOpen();
|
||||
searcherManager.maybeRefreshBlocking();
|
||||
} catch (AlreadyClosedException e) {
|
||||
|
@ -191,6 +191,8 @@ public class IndexShardGateway extends AbstractIndexShardComponent implements Cl
|
||||
|
||||
if (recoveringTranslogFile == null || Files.exists(recoveringTranslogFile) == false) {
|
||||
// no translog files, bail
|
||||
recoveryState.getTranslog().totalOperations(0);
|
||||
recoveryState.getTranslog().totalOperationsOnStart(0);
|
||||
indexShard.finalizeRecovery();
|
||||
indexShard.postRecovery("post recovery from gateway, no translog");
|
||||
// no index, just start the shard and bail
|
||||
@ -236,7 +238,7 @@ public class IndexShardGateway extends AbstractIndexShardComponent implements Cl
|
||||
typesToUpdate.add(potentialIndexOperation.docMapper().type());
|
||||
}
|
||||
}
|
||||
recoveryState.getTranslog().addTranslogOperations(1);
|
||||
recoveryState.getTranslog().incrementRecoveredOperations();
|
||||
} catch (ElasticsearchException e) {
|
||||
if (e.status() == RestStatus.BAD_REQUEST) {
|
||||
// mainly for MapperParsingException and Failure to detect xcontent
|
||||
|
@ -114,13 +114,10 @@ public class IndexShardGatewayService extends AbstractIndexShardComponent implem
|
||||
shardGateway.recover(indexShouldExists, recoveryState);
|
||||
}
|
||||
|
||||
// Check that the gateway have set the shard to POST_RECOVERY. Note that if a shard
|
||||
// is in POST_RECOVERY, it may have been started as well if:
|
||||
// 1) master sent a new cluster state indicating shard is initializing
|
||||
// 2) IndicesClusterStateService#applyInitializingShard will send a shard started event
|
||||
// 3) Master will mark shard as started and this will be processed locally.
|
||||
// Check that the gateway didn't leave the shard in init or recovering stage. it is up to the gateway
|
||||
// to call post recovery.
|
||||
IndexShardState shardState = indexShard.state();
|
||||
assert shardState == IndexShardState.POST_RECOVERY || shardState == IndexShardState.STARTED : "recovery process didn't call post_recovery. shardState [" + shardState + "]";
|
||||
assert shardState != IndexShardState.CREATED && shardState != IndexShardState.RECOVERING : "recovery process of " + shardId + " didn't get to post_recovery. shardState [" + shardState + "]";
|
||||
|
||||
if (logger.isTraceEnabled()) {
|
||||
StringBuilder sb = new StringBuilder();
|
||||
@ -135,7 +132,7 @@ public class IndexShardGatewayService extends AbstractIndexShardComponent implem
|
||||
.append(new ByteSizeValue(index.reusedBytes())).append("]\n");
|
||||
sb.append(" start : took [").append(TimeValue.timeValueMillis(recoveryState.getStart().time())).append("], check_index [")
|
||||
.append(timeValueMillis(recoveryState.getStart().checkIndexTime())).append("]\n");
|
||||
sb.append(" translog : number_of_operations [").append(recoveryState.getTranslog().currentTranslogOperations())
|
||||
sb.append(" translog : number_of_operations [").append(recoveryState.getTranslog().recoveredOperations())
|
||||
.append("], took [").append(TimeValue.timeValueMillis(recoveryState.getTranslog().time())).append("]");
|
||||
logger.trace(sb.toString());
|
||||
} else if (logger.isDebugEnabled()) {
|
||||
|
@ -37,6 +37,7 @@ import org.apache.lucene.search.Filter;
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.apache.lucene.store.ByteArrayDataOutput;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.elasticsearch.ElasticsearchIllegalArgumentException;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.common.Explicit;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
@ -235,7 +236,7 @@ public abstract class NumberFieldMapper<T extends Number> extends AbstractFieldM
|
||||
RuntimeException e = null;
|
||||
try {
|
||||
innerParseCreateField(context, fields);
|
||||
} catch (IllegalArgumentException e1) {
|
||||
} catch (IllegalArgumentException | ElasticsearchIllegalArgumentException e1) {
|
||||
e = e1;
|
||||
} catch (MapperParsingException e2) {
|
||||
e = e2;
|
||||
|
@ -139,9 +139,11 @@ public class ConcurrentMergeSchedulerProvider extends MergeSchedulerProvider {
|
||||
|
||||
@Override
|
||||
protected void handleMergeException(Directory dir, Throwable exc) {
|
||||
logger.warn("failed to merge", exc);
|
||||
logger.error("failed to merge", exc);
|
||||
provider.failedMerge(new MergePolicy.MergeException(exc, dir));
|
||||
super.handleMergeException(dir, exc);
|
||||
// NOTE: do not call super.handleMergeException here, which would just re-throw the exception
|
||||
// and let Java's thread exc handler see it / log it to stderr, but we already 1) logged it
|
||||
// and 2) handled the exception by failing the engine
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -86,7 +86,7 @@ public class BoolQueryBuilder extends BaseQueryBuilder implements BoostableQuery
|
||||
}
|
||||
|
||||
/**
|
||||
* Disables <tt>Similarity#coord(int,int)</tt> in scoring. Defualts to <tt>false</tt>.
|
||||
* Disables <tt>Similarity#coord(int,int)</tt> in scoring. Defaults to <tt>false</tt>.
|
||||
*/
|
||||
public BoolQueryBuilder disableCoord(boolean disableCoord) {
|
||||
this.disableCoord = disableCoord;
|
||||
|
@ -30,7 +30,6 @@ import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.collect.Tuple;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.index.mapper.object.ObjectMapper;
|
||||
import org.elasticsearch.index.query.support.InnerHitsQueryParserHelper;
|
||||
import org.elasticsearch.index.query.support.NestedInnerQueryParseSupport;
|
||||
import org.elasticsearch.search.fetch.innerhits.InnerHitsContext;
|
||||
@ -149,8 +148,7 @@ public class NestedQueryParser implements QueryParser {
|
||||
}
|
||||
|
||||
if (innerHits != null) {
|
||||
ObjectMapper parentObjectMapper = childDocumentMapper.findParentObjectMapper(nestedObjectMapper);
|
||||
InnerHitsContext.NestedInnerHits nestedInnerHits = new InnerHitsContext.NestedInnerHits(innerHits.v2(), getInnerQuery(), null, parentObjectMapper, nestedObjectMapper);
|
||||
InnerHitsContext.NestedInnerHits nestedInnerHits = new InnerHitsContext.NestedInnerHits(innerHits.v2(), getInnerQuery(), null, getParentObjectMapper(), nestedObjectMapper);
|
||||
String name = innerHits.v1() != null ? innerHits.v1() : path;
|
||||
parseContext.addInnerHits(name, nestedInnerHits);
|
||||
}
|
||||
|
@ -69,7 +69,7 @@ public class ScriptFilterParser implements FilterParser {
|
||||
HashedBytesRef cacheKey = null;
|
||||
// also, when caching, since its isCacheable is false, will result in loading all bit set...
|
||||
String script = null;
|
||||
String scriptLang = null;
|
||||
String scriptLang;
|
||||
Map<String, Object> params = null;
|
||||
|
||||
String filterName = null;
|
||||
@ -130,12 +130,9 @@ public class ScriptFilterParser implements FilterParser {
|
||||
|
||||
private final SearchScript searchScript;
|
||||
|
||||
private final ScriptService.ScriptType scriptType;
|
||||
|
||||
public ScriptFilter(String scriptLang, String script, ScriptService.ScriptType scriptType, Map<String, Object> params, ScriptService scriptService, SearchLookup searchLookup) {
|
||||
this.script = script;
|
||||
this.params = params;
|
||||
this.scriptType = scriptType;
|
||||
this.searchScript = scriptService.search(searchLookup, scriptLang, script, scriptType, newHashMap(params));
|
||||
}
|
||||
|
||||
|
@ -21,7 +21,6 @@ package org.elasticsearch.index.query;
|
||||
import org.elasticsearch.ElasticsearchIllegalArgumentException;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.script.ScriptService;
|
||||
import org.elasticsearch.search.aggregations.support.ValuesSource;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Map;
|
||||
|
@ -19,17 +19,15 @@
|
||||
package org.elasticsearch.index.query;
|
||||
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.logging.ESLogger;
|
||||
import org.elasticsearch.common.logging.Loggers;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentFactory;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.script.ExecutableScript;
|
||||
import org.elasticsearch.script.ScriptService;
|
||||
import org.elasticsearch.script.mustache.MustacheScriptEngineService;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.HashMap;
|
||||
@ -52,9 +50,9 @@ public class TemplateQueryParser implements QueryParser {
|
||||
|
||||
private final static Map<String,ScriptService.ScriptType> parametersToTypes = new HashMap<>();
|
||||
static {
|
||||
parametersToTypes.put("query",ScriptService.ScriptType.INLINE);
|
||||
parametersToTypes.put("file",ScriptService.ScriptType.FILE);
|
||||
parametersToTypes.put("id",ScriptService.ScriptType.INDEXED);
|
||||
parametersToTypes.put("query", ScriptService.ScriptType.INLINE);
|
||||
parametersToTypes.put("file", ScriptService.ScriptType.FILE);
|
||||
parametersToTypes.put("id", ScriptService.ScriptType.INDEXED);
|
||||
}
|
||||
|
||||
@Inject
|
||||
@ -78,15 +76,14 @@ public class TemplateQueryParser implements QueryParser {
|
||||
public Query parse(QueryParseContext parseContext) throws IOException {
|
||||
XContentParser parser = parseContext.parser();
|
||||
TemplateContext templateContext = parse(parser, PARAMS, parametersToTypes);
|
||||
ExecutableScript executable = this.scriptService.executable("mustache", templateContext.template(), templateContext.scriptType(), templateContext.params());
|
||||
ExecutableScript executable = this.scriptService.executable(MustacheScriptEngineService.NAME, templateContext.template(), templateContext.scriptType(), templateContext.params());
|
||||
|
||||
BytesReference querySource = (BytesReference) executable.run();
|
||||
|
||||
try (XContentParser qSourceParser = XContentFactory.xContent(querySource).createParser(querySource)) {
|
||||
final QueryParseContext context = new QueryParseContext(parseContext.index(), parseContext.indexQueryParserService());
|
||||
context.reset(qSourceParser);
|
||||
Query result = context.parseInnerQuery();
|
||||
return result;
|
||||
return context.parseInnerQuery();
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -56,7 +56,6 @@ public class ScriptScoreFunctionParser implements ScoreFunctionParser {
|
||||
public ScoreFunction parse(QueryParseContext parseContext, XContentParser parser) throws IOException, QueryParsingException {
|
||||
ScriptParameterParser scriptParameterParser = new ScriptParameterParser();
|
||||
String script = null;
|
||||
String scriptLang = null;
|
||||
Map<String, Object> vars = null;
|
||||
ScriptService.ScriptType scriptType = null;
|
||||
String currentFieldName = null;
|
||||
@ -82,15 +81,13 @@ public class ScriptScoreFunctionParser implements ScoreFunctionParser {
|
||||
script = scriptValue.script();
|
||||
scriptType = scriptValue.scriptType();
|
||||
}
|
||||
scriptLang = scriptParameterParser.lang();
|
||||
|
||||
if (script == null) {
|
||||
throw new QueryParsingException(parseContext.index(), NAMES[0] + " requires 'script' field");
|
||||
}
|
||||
|
||||
SearchScript searchScript;
|
||||
try {
|
||||
searchScript = parseContext.scriptService().search(parseContext.lookup(), scriptLang, script, scriptType, vars);
|
||||
searchScript = parseContext.scriptService().search(parseContext.lookup(), scriptParameterParser.lang(), script, scriptType, vars);
|
||||
return new ScriptScoreFunction(script, vars, searchScript);
|
||||
} catch (Exception e) {
|
||||
throw new QueryParsingException(parseContext.index(), NAMES[0] + " the script could not be loaded", e);
|
||||
|
@ -60,6 +60,7 @@ public class NestedInnerQueryParseSupport {
|
||||
|
||||
protected DocumentMapper childDocumentMapper;
|
||||
protected ObjectMapper nestedObjectMapper;
|
||||
private ObjectMapper parentObjectMapper;
|
||||
|
||||
public NestedInnerQueryParseSupport(XContentParser parser, SearchContext searchContext) {
|
||||
parseContext = searchContext.queryParserService().getParseContext();
|
||||
@ -187,6 +188,10 @@ public class NestedInnerQueryParseSupport {
|
||||
return filterFound;
|
||||
}
|
||||
|
||||
public ObjectMapper getParentObjectMapper() {
|
||||
return parentObjectMapper;
|
||||
}
|
||||
|
||||
private void setPathLevel() {
|
||||
ObjectMapper objectMapper = parseContext.nestedScope().getObjectMapper();
|
||||
if (objectMapper == null) {
|
||||
@ -195,7 +200,7 @@ public class NestedInnerQueryParseSupport {
|
||||
parentFilter = parseContext.bitsetFilter(objectMapper.nestedTypeFilter());
|
||||
}
|
||||
childFilter = parseContext.bitsetFilter(nestedObjectMapper.nestedTypeFilter());
|
||||
parseContext.nestedScope().nextLevel(nestedObjectMapper);
|
||||
parentObjectMapper = parseContext.nestedScope().nextLevel(nestedObjectMapper);
|
||||
}
|
||||
|
||||
private void resetPathLevel() {
|
||||
|
@ -39,17 +39,19 @@ public final class NestedScope {
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the new current nested level and moves old current nested level down
|
||||
* Sets the new current nested level and pushes old current nested level down the stack returns that level.
|
||||
*/
|
||||
public void nextLevel(ObjectMapper level) {
|
||||
public ObjectMapper nextLevel(ObjectMapper level) {
|
||||
ObjectMapper previous = levelStack.peek();
|
||||
levelStack.push(level);
|
||||
return previous;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the previous nested level as current nested level and removes the current nested level.
|
||||
* Sets the previous nested level as current nested level and removes and returns the current nested level.
|
||||
*/
|
||||
public void previousLevel() {
|
||||
ObjectMapper level = levelStack.pop();
|
||||
public ObjectMapper previousLevel() {
|
||||
return levelStack.pop();
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -39,9 +39,9 @@ import org.elasticsearch.index.shard.IndexShard;
|
||||
import org.elasticsearch.index.store.support.AbstractIndexStore;
|
||||
import org.elasticsearch.index.translog.TranslogService;
|
||||
import org.elasticsearch.index.translog.fs.FsTranslog;
|
||||
import org.elasticsearch.indices.IndicesWarmer;
|
||||
import org.elasticsearch.indices.cache.query.IndicesQueryCache;
|
||||
import org.elasticsearch.indices.ttl.IndicesTTLService;
|
||||
import org.elasticsearch.indices.IndicesWarmer;
|
||||
|
||||
/**
|
||||
*/
|
||||
@ -85,9 +85,8 @@ public class IndexDynamicSettingsModule extends AbstractModule {
|
||||
indexDynamicSettings.addDynamicSetting(LogDocMergePolicyProvider.INDEX_COMPOUND_FORMAT);
|
||||
indexDynamicSettings.addDynamicSetting(EngineConfig.INDEX_COMPOUND_ON_FLUSH, Validator.BOOLEAN);
|
||||
indexDynamicSettings.addDynamicSetting(EngineConfig.INDEX_GC_DELETES_SETTING, Validator.TIME);
|
||||
indexDynamicSettings.addDynamicSetting(EngineConfig.INDEX_FAIL_ON_MERGE_FAILURE_SETTING, Validator.BOOLEAN);
|
||||
indexDynamicSettings.addDynamicSetting(EngineConfig.INDEX_FAIL_ON_CORRUPTION_SETTING, Validator.BOOLEAN);
|
||||
indexDynamicSettings.addDynamicSetting(IndexShard.INDEX_FLUSH_ON_CLOSE, Validator.BOOLEAN);
|
||||
indexDynamicSettings.addDynamicSetting(EngineConfig.INDEX_VERSION_MAP_SIZE, Validator.BYTES_SIZE_OR_PERCENTAGE);
|
||||
indexDynamicSettings.addDynamicSetting(ShardSlowLogIndexingService.INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_WARN, Validator.TIME);
|
||||
indexDynamicSettings.addDynamicSetting(ShardSlowLogIndexingService.INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_INFO, Validator.TIME);
|
||||
indexDynamicSettings.addDynamicSetting(ShardSlowLogIndexingService.INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_DEBUG, Validator.TIME);
|
||||
|
@ -46,7 +46,6 @@ import org.elasticsearch.common.Preconditions;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.collect.Tuple;
|
||||
import org.elasticsearch.common.component.Lifecycle;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.io.stream.BytesStreamOutput;
|
||||
import org.elasticsearch.common.lucene.Lucene;
|
||||
@ -222,7 +221,7 @@ public class IndexShard extends AbstractIndexShardComponent {
|
||||
this.codecService = codecService;
|
||||
this.shardSuggestService = shardSuggestService;
|
||||
this.shardBitsetFilterCache = shardBitsetFilterCache;
|
||||
assert clusterService.lifecycleState() == Lifecycle.State.STARTED; // otherwise localNode is still none;
|
||||
assert clusterService.localNode() != null : "Local node is null lifecycle state is: " + clusterService.lifecycleState();
|
||||
this.localNode = clusterService.localNode();
|
||||
state = IndexShardState.CREATED;
|
||||
this.refreshInterval = indexSettings.getAsTime(INDEX_REFRESH_INTERVAL, EngineConfig.DEFAULT_REFRESH_INTERVAL);
|
||||
@ -960,7 +959,8 @@ public class IndexShard extends AbstractIndexShardComponent {
|
||||
public void updateBufferSize(ByteSizeValue shardIndexingBufferSize, ByteSizeValue shardTranslogBufferSize) {
|
||||
ByteSizeValue preValue = config.getIndexingBufferSize();
|
||||
config.setIndexingBufferSize(shardIndexingBufferSize);
|
||||
if (preValue.bytes() != shardIndexingBufferSize.bytes()) {
|
||||
// update engine if it is already started.
|
||||
if (preValue.bytes() != shardIndexingBufferSize.bytes() && engineUnsafe() != null) {
|
||||
// its inactive, make sure we do a refresh / full IW flush in this case, since the memory
|
||||
// changes only after a "data" change has happened to the writer
|
||||
// the index writer lazily allocates memory and a refresh will clean it all up.
|
||||
@ -1029,18 +1029,9 @@ public class IndexShard extends AbstractIndexShardComponent {
|
||||
config.setCompoundOnFlush(compoundOnFlush);
|
||||
change = true;
|
||||
}
|
||||
|
||||
final boolean failEngineOnCorruption = settings.getAsBoolean(EngineConfig.INDEX_FAIL_ON_CORRUPTION_SETTING, config.isFailEngineOnCorruption());
|
||||
if (failEngineOnCorruption != config.isFailEngineOnCorruption()) {
|
||||
logger.info("updating {} from [{}] to [{}]", EngineConfig.INDEX_FAIL_ON_CORRUPTION_SETTING, config.isFailEngineOnCorruption(), failEngineOnCorruption);
|
||||
config.setFailEngineOnCorruption(failEngineOnCorruption);
|
||||
change = true;
|
||||
}
|
||||
final boolean failOnMergeFailure = settings.getAsBoolean(EngineConfig.INDEX_FAIL_ON_MERGE_FAILURE_SETTING, config.isFailOnMergeFailure());
|
||||
if (failOnMergeFailure != config.isFailOnMergeFailure()) {
|
||||
logger.info("updating {} from [{}] to [{}]", EngineConfig.INDEX_FAIL_ON_MERGE_FAILURE_SETTING, config.isFailOnMergeFailure(), failOnMergeFailure);
|
||||
config.setFailOnMergeFailure(failOnMergeFailure);
|
||||
change = true;
|
||||
final String versionMapSize = settings.get(EngineConfig.INDEX_VERSION_MAP_SIZE, config.getVersionMapSizeSetting());
|
||||
if (config.getVersionMapSizeSetting().equals(versionMapSize) == false) {
|
||||
config.setVersionMapSizeSetting(versionMapSize);
|
||||
}
|
||||
}
|
||||
if (change) {
|
||||
@ -1175,13 +1166,17 @@ public class IndexShard extends AbstractIndexShardComponent {
|
||||
}
|
||||
|
||||
public Engine engine() {
|
||||
Engine engine = this.currentEngineReference.get();
|
||||
Engine engine = engineUnsafe();
|
||||
if (engine == null) {
|
||||
throw new EngineClosedException(shardId);
|
||||
}
|
||||
return engine;
|
||||
}
|
||||
|
||||
protected Engine engineUnsafe() {
|
||||
return this.currentEngineReference.get();
|
||||
}
|
||||
|
||||
class ShardEngineFailListener implements Engine.FailedEngineListener {
|
||||
private final CopyOnWriteArrayList<Engine.FailedEngineListener> delegates = new CopyOnWriteArrayList<>();
|
||||
|
||||
|
@ -115,6 +115,8 @@ public class IndexShardSnapshotAndRestoreService extends AbstractIndexShardCompo
|
||||
logger.trace("[{}] restoring shard [{}]", restoreSource.snapshotId(), shardId);
|
||||
}
|
||||
try {
|
||||
recoveryState.getTranslog().totalOperations(0);
|
||||
recoveryState.getTranslog().totalOperationsOnStart(0);
|
||||
indexShard.prepareForIndexRecovery();
|
||||
IndexShardRepository indexShardRepository = repositoriesService.indexShardRepository(restoreSource.snapshotId().getRepository());
|
||||
ShardId snapshotShardId = shardId;
|
||||
|
@ -21,6 +21,7 @@ package org.elasticsearch.index.translog;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.io.stream.Streamable;
|
||||
import org.elasticsearch.common.unit.ByteSizeValue;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilderString;
|
||||
@ -35,9 +36,12 @@ public class TranslogStats implements ToXContent, Streamable {
|
||||
private long translogSizeInBytes = 0;
|
||||
private int estimatedNumberOfOperations = 0;
|
||||
|
||||
public TranslogStats() {}
|
||||
public TranslogStats() {
|
||||
}
|
||||
|
||||
public TranslogStats(int estimatedNumberOfOperations, long translogSizeInBytes) {
|
||||
assert estimatedNumberOfOperations >= 0 : "estimatedNumberOfOperations must be >=0, got [" + estimatedNumberOfOperations + "]";
|
||||
assert translogSizeInBytes >= 0 : "translogSizeInBytes must be >=0, got [" + translogSizeInBytes + "]";
|
||||
this.estimatedNumberOfOperations = estimatedNumberOfOperations;
|
||||
this.translogSizeInBytes = translogSizeInBytes;
|
||||
}
|
||||
@ -48,7 +52,15 @@ public class TranslogStats implements ToXContent, Streamable {
|
||||
}
|
||||
|
||||
this.estimatedNumberOfOperations += translogStats.estimatedNumberOfOperations;
|
||||
this.translogSizeInBytes =+ translogStats.translogSizeInBytes;
|
||||
this.translogSizeInBytes = +translogStats.translogSizeInBytes;
|
||||
}
|
||||
|
||||
public ByteSizeValue translogSizeInBytes() {
|
||||
return new ByteSizeValue(translogSizeInBytes);
|
||||
}
|
||||
|
||||
public long estimatedNumberOfOperations() {
|
||||
return estimatedNumberOfOperations;
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -70,10 +82,12 @@ public class TranslogStats implements ToXContent, Streamable {
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
estimatedNumberOfOperations = in.readVInt();
|
||||
translogSizeInBytes = in.readVLong();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
out.writeVInt(estimatedNumberOfOperations);
|
||||
out.writeVLong(translogSizeInBytes);
|
||||
}
|
||||
}
|
||||
|
@ -439,7 +439,12 @@ public class FsTranslog extends AbstractIndexShardComponent implements Translog
|
||||
|
||||
@Override
|
||||
public TranslogStats stats() {
|
||||
return new TranslogStats(estimatedNumberOfOperations(), translogSizeInBytes());
|
||||
FsTranslogFile current = this.current;
|
||||
if (current == null) {
|
||||
return new TranslogStats(0, 0);
|
||||
}
|
||||
|
||||
return new TranslogStats(current.estimatedNumberOfOperations(), current.translogSizeInBytes());
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -118,7 +118,6 @@ public class IndicesService extends AbstractLifecycleComponent<IndicesService> i
|
||||
|
||||
private final PluginsService pluginsService;
|
||||
private final NodeEnvironment nodeEnv;
|
||||
private final ClusterService clusterService;
|
||||
|
||||
private volatile Map<String, Tuple<IndexService, Injector>> indices = ImmutableMap.of();
|
||||
private final Map<Index, List<PendingDelete>> pendingDeletes = new HashMap<>();
|
||||
@ -126,10 +125,9 @@ public class IndicesService extends AbstractLifecycleComponent<IndicesService> i
|
||||
private final OldShardsStats oldShardsStats = new OldShardsStats();
|
||||
|
||||
@Inject
|
||||
public IndicesService(Settings settings, IndicesLifecycle indicesLifecycle, IndicesAnalysisService indicesAnalysisService, Injector injector, NodeEnvironment nodeEnv, ClusterService clusterService) {
|
||||
public IndicesService(Settings settings, IndicesLifecycle indicesLifecycle, IndicesAnalysisService indicesAnalysisService, Injector injector, NodeEnvironment nodeEnv) {
|
||||
super(settings);
|
||||
this.indicesLifecycle = (InternalIndicesLifecycle) indicesLifecycle;
|
||||
this.clusterService = clusterService;
|
||||
this.indicesAnalysisService = indicesAnalysisService;
|
||||
this.injector = injector;
|
||||
this.pluginsService = injector.getInstance(PluginsService.class);
|
||||
@ -447,16 +445,15 @@ public class IndicesService extends AbstractLifecycleComponent<IndicesService> i
|
||||
removeIndex(index, reason, true);
|
||||
}
|
||||
|
||||
public void deleteClosedIndex(String reason, IndexMetaData metaData) {
|
||||
public void deleteClosedIndex(String reason, IndexMetaData metaData, ClusterState clusterState) {
|
||||
if (nodeEnv.hasNodeFile()) {
|
||||
String indexName = metaData.getIndex();
|
||||
try {
|
||||
ClusterState clusterState = clusterService.state();
|
||||
if (clusterState.metaData().hasIndex(indexName)) {
|
||||
final IndexMetaData index = clusterState.metaData().index(indexName);
|
||||
throw new ElasticsearchIllegalStateException("Can't delete closed index store for [" + indexName + "] - it's still part of the cluster state [" + index.getUUID() + "] [" + metaData.getUUID() + "]");
|
||||
}
|
||||
deleteIndexStore(reason, metaData);
|
||||
deleteIndexStore(reason, metaData, clusterState);
|
||||
} catch (IOException e) {
|
||||
logger.warn("[{}] failed to delete closed index", e, metaData.index());
|
||||
}
|
||||
@ -467,16 +464,17 @@ public class IndicesService extends AbstractLifecycleComponent<IndicesService> i
|
||||
* Deletes the index store trying to acquire all shards locks for this index.
|
||||
* This method will delete the metadata for the index even if the actual shards can't be locked.
|
||||
*/
|
||||
public void deleteIndexStore(String reason, IndexMetaData metaData) throws IOException {
|
||||
public void deleteIndexStore(String reason, IndexMetaData metaData, ClusterState clusterState) throws IOException {
|
||||
if (nodeEnv.hasNodeFile()) {
|
||||
synchronized (this) {
|
||||
String indexName = metaData.index();
|
||||
if (indices.containsKey(metaData.index())) {
|
||||
String localUUid = indices.get(metaData.index()).v1().indexUUID();
|
||||
throw new ElasticsearchIllegalStateException("Can't delete index store for [" + metaData.getIndex() + "] - it's still part of the indices service [" + localUUid+ "] [" + metaData.getUUID() + "]");
|
||||
if (indices.containsKey(indexName)) {
|
||||
String localUUid = indices.get(indexName).v1().indexUUID();
|
||||
throw new ElasticsearchIllegalStateException("Can't delete index store for [" + indexName + "] - it's still part of the indices service [" + localUUid+ "] [" + metaData.getUUID() + "]");
|
||||
}
|
||||
ClusterState clusterState = clusterService.state();
|
||||
if (clusterState.metaData().hasIndex(indexName)) {
|
||||
if (clusterState.metaData().hasIndex(indexName) && (clusterState.nodes().localNode().masterNode() == true)) {
|
||||
// we do not delete the store if it is a master eligible node and the index is still in the cluster state
|
||||
// because we want to keep the meta data for indices around even if no shards are left here
|
||||
final IndexMetaData index = clusterState.metaData().index(indexName);
|
||||
throw new ElasticsearchIllegalStateException("Can't delete closed index store for [" + indexName + "] - it's still part of the cluster state [" + index.getUUID() + "] [" + metaData.getUUID() + "]");
|
||||
}
|
||||
|
@ -244,7 +244,7 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent<Indic
|
||||
final IndexMetaData metaData = previousState.metaData().index(index);
|
||||
assert metaData != null;
|
||||
indexSettings = metaData.settings();
|
||||
indicesService.deleteClosedIndex("closed index no longer part of the metadata", metaData);
|
||||
indicesService.deleteClosedIndex("closed index no longer part of the metadata", metaData, event.state());
|
||||
}
|
||||
try {
|
||||
nodeIndexDeletedAction.nodeIndexDeleted(event.state(), index, indexSettings, localNodeId);
|
||||
|
@ -28,13 +28,13 @@ import org.elasticsearch.common.unit.ByteSizeUnit;
|
||||
import org.elasticsearch.common.unit.ByteSizeValue;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.util.concurrent.FutureUtils;
|
||||
import org.elasticsearch.index.IndexService;
|
||||
import org.elasticsearch.index.engine.EngineClosedException;
|
||||
import org.elasticsearch.index.engine.EngineConfig;
|
||||
import org.elasticsearch.index.engine.FlushNotAllowedEngineException;
|
||||
import org.elasticsearch.index.IndexService;
|
||||
import org.elasticsearch.index.shard.IndexShard;
|
||||
import org.elasticsearch.index.shard.IndexShardState;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
import org.elasticsearch.index.shard.IndexShard;
|
||||
import org.elasticsearch.index.translog.Translog;
|
||||
import org.elasticsearch.indices.IndicesService;
|
||||
import org.elasticsearch.monitor.jvm.JvmInfo;
|
||||
@ -64,7 +64,8 @@ public class IndexingMemoryController extends AbstractLifecycleComponent<Indexin
|
||||
|
||||
private volatile ScheduledFuture scheduler;
|
||||
|
||||
private static final EnumSet<IndexShardState> CAN_UPDATE_INDEX_BUFFER_STATES = EnumSet.of(IndexShardState.POST_RECOVERY, IndexShardState.STARTED, IndexShardState.RELOCATED);
|
||||
private static final EnumSet<IndexShardState> CAN_UPDATE_INDEX_BUFFER_STATES = EnumSet.of(
|
||||
IndexShardState.RECOVERING, IndexShardState.POST_RECOVERY, IndexShardState.STARTED, IndexShardState.RELOCATED);
|
||||
|
||||
@Inject
|
||||
public IndexingMemoryController(Settings settings, ThreadPool threadPool, IndicesService indicesService) {
|
||||
|
@ -19,7 +19,6 @@
|
||||
|
||||
package org.elasticsearch.indices.recovery;
|
||||
|
||||
import com.google.common.collect.Sets;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
@ -27,7 +26,6 @@ import org.elasticsearch.index.store.Store;
|
||||
import org.elasticsearch.transport.TransportRequest;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Set;
|
||||
|
||||
/**
|
||||
*
|
||||
@ -37,15 +35,17 @@ class RecoveryCleanFilesRequest extends TransportRequest {
|
||||
private long recoveryId;
|
||||
private ShardId shardId;
|
||||
|
||||
private Store.MetadataSnapshot snapshotFiles;
|
||||
private Store.MetadataSnapshot snapshotFiles;
|
||||
private int totalTranslogOps = RecoveryState.Translog.UNKNOWN;
|
||||
|
||||
RecoveryCleanFilesRequest() {
|
||||
}
|
||||
|
||||
RecoveryCleanFilesRequest(long recoveryId, ShardId shardId, Store.MetadataSnapshot snapshotFiles) {
|
||||
RecoveryCleanFilesRequest(long recoveryId, ShardId shardId, Store.MetadataSnapshot snapshotFiles, int totalTranslogOps) {
|
||||
this.recoveryId = recoveryId;
|
||||
this.shardId = shardId;
|
||||
this.snapshotFiles = snapshotFiles;
|
||||
this.totalTranslogOps = totalTranslogOps;
|
||||
}
|
||||
|
||||
public long recoveryId() {
|
||||
@ -62,6 +62,7 @@ class RecoveryCleanFilesRequest extends TransportRequest {
|
||||
recoveryId = in.readLong();
|
||||
shardId = ShardId.readShardId(in);
|
||||
snapshotFiles = Store.MetadataSnapshot.read(in);
|
||||
totalTranslogOps = in.readVInt();
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -70,9 +71,14 @@ class RecoveryCleanFilesRequest extends TransportRequest {
|
||||
out.writeLong(recoveryId);
|
||||
shardId.writeTo(out);
|
||||
snapshotFiles.writeTo(out);
|
||||
out.writeVInt(totalTranslogOps);
|
||||
}
|
||||
|
||||
public Store.MetadataSnapshot sourceMetaSnapshot() {
|
||||
return snapshotFiles;
|
||||
}
|
||||
|
||||
public int totalTranslogOps() {
|
||||
return totalTranslogOps;
|
||||
}
|
||||
}
|
||||
|
@ -42,16 +42,20 @@ public final class RecoveryFileChunkRequest extends TransportRequest { // publi
|
||||
private BytesReference content;
|
||||
private StoreFileMetaData metaData;
|
||||
|
||||
private int totalTranslogOps;
|
||||
|
||||
RecoveryFileChunkRequest() {
|
||||
}
|
||||
|
||||
public RecoveryFileChunkRequest(long recoveryId, ShardId shardId, StoreFileMetaData metaData, long position, BytesReference content, boolean lastChunk) {
|
||||
public RecoveryFileChunkRequest(long recoveryId, ShardId shardId, StoreFileMetaData metaData, long position, BytesReference content,
|
||||
boolean lastChunk, int totalTranslogOps) {
|
||||
this.recoveryId = recoveryId;
|
||||
this.shardId = shardId;
|
||||
this.metaData = metaData;
|
||||
this.position = position;
|
||||
this.content = content;
|
||||
this.lastChunk = lastChunk;
|
||||
this.totalTranslogOps = totalTranslogOps;
|
||||
}
|
||||
|
||||
public long recoveryId() {
|
||||
@ -83,6 +87,10 @@ public final class RecoveryFileChunkRequest extends TransportRequest { // publi
|
||||
return content;
|
||||
}
|
||||
|
||||
public int totalTranslogOps() {
|
||||
return totalTranslogOps;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
@ -98,6 +106,7 @@ public final class RecoveryFileChunkRequest extends TransportRequest { // publi
|
||||
writtenBy = Lucene.parseVersionLenient(versionString, null);
|
||||
metaData = new StoreFileMetaData(name, length, checksum, writtenBy);
|
||||
lastChunk = in.readBoolean();
|
||||
totalTranslogOps = in.readVInt();
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -112,6 +121,7 @@ public final class RecoveryFileChunkRequest extends TransportRequest { // publi
|
||||
out.writeBytesReference(content);
|
||||
out.writeOptionalString(metaData.writtenBy() == null ? null : metaData.writtenBy().toString());
|
||||
out.writeBoolean(lastChunk);
|
||||
out.writeVInt(totalTranslogOps);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -41,17 +41,20 @@ class RecoveryFilesInfoRequest extends TransportRequest {
|
||||
List<String> phase1ExistingFileNames;
|
||||
List<Long> phase1ExistingFileSizes;
|
||||
|
||||
int totalTranslogOps;
|
||||
|
||||
RecoveryFilesInfoRequest() {
|
||||
}
|
||||
|
||||
RecoveryFilesInfoRequest(long recoveryId, ShardId shardId, List<String> phase1FileNames, List<Long> phase1FileSizes,
|
||||
List<String> phase1ExistingFileNames, List<Long> phase1ExistingFileSizes) {
|
||||
List<String> phase1ExistingFileNames, List<Long> phase1ExistingFileSizes, int totalTranslogOps) {
|
||||
this.recoveryId = recoveryId;
|
||||
this.shardId = shardId;
|
||||
this.phase1FileNames = phase1FileNames;
|
||||
this.phase1FileSizes = phase1FileSizes;
|
||||
this.phase1ExistingFileNames = phase1ExistingFileNames;
|
||||
this.phase1ExistingFileSizes = phase1ExistingFileSizes;
|
||||
this.totalTranslogOps = totalTranslogOps;
|
||||
}
|
||||
|
||||
public long recoveryId() {
|
||||
@ -90,6 +93,7 @@ class RecoveryFilesInfoRequest extends TransportRequest {
|
||||
for (int i = 0; i < size; i++) {
|
||||
phase1ExistingFileSizes.add(in.readVLong());
|
||||
}
|
||||
totalTranslogOps = in.readVInt();
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -117,5 +121,6 @@ class RecoveryFilesInfoRequest extends TransportRequest {
|
||||
for (Long phase1ExistingFileSize : phase1ExistingFileSizes) {
|
||||
out.writeVLong(phase1ExistingFileSize);
|
||||
}
|
||||
out.writeVInt(totalTranslogOps);
|
||||
}
|
||||
}
|
||||
|
@ -33,13 +33,15 @@ class RecoveryPrepareForTranslogOperationsRequest extends TransportRequest {
|
||||
|
||||
private long recoveryId;
|
||||
private ShardId shardId;
|
||||
private int totalTranslogOps = RecoveryState.Translog.UNKNOWN;
|
||||
|
||||
RecoveryPrepareForTranslogOperationsRequest() {
|
||||
}
|
||||
|
||||
RecoveryPrepareForTranslogOperationsRequest(long recoveryId, ShardId shardId) {
|
||||
RecoveryPrepareForTranslogOperationsRequest(long recoveryId, ShardId shardId, int totalTranslogOps) {
|
||||
this.recoveryId = recoveryId;
|
||||
this.shardId = shardId;
|
||||
this.totalTranslogOps = totalTranslogOps;
|
||||
}
|
||||
|
||||
public long recoveryId() {
|
||||
@ -50,11 +52,16 @@ class RecoveryPrepareForTranslogOperationsRequest extends TransportRequest {
|
||||
return shardId;
|
||||
}
|
||||
|
||||
public int totalTranslogOps() {
|
||||
return totalTranslogOps;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
recoveryId = in.readLong();
|
||||
shardId = ShardId.readShardId(in);
|
||||
totalTranslogOps = in.readVInt();
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -62,5 +69,6 @@ class RecoveryPrepareForTranslogOperationsRequest extends TransportRequest {
|
||||
super.writeTo(out);
|
||||
out.writeLong(recoveryId);
|
||||
shardId.writeTo(out);
|
||||
out.writeVInt(totalTranslogOps);
|
||||
}
|
||||
}
|
||||
|
@ -199,7 +199,8 @@ public class RecoverySourceHandler implements Engine.RecoveryHandler {
|
||||
@Override
|
||||
public void run() throws InterruptedException {
|
||||
RecoveryFilesInfoRequest recoveryInfoFilesRequest = new RecoveryFilesInfoRequest(request.recoveryId(), request.shardId(),
|
||||
response.phase1FileNames, response.phase1FileSizes, response.phase1ExistingFileNames, response.phase1ExistingFileSizes);
|
||||
response.phase1FileNames, response.phase1FileSizes, response.phase1ExistingFileNames, response.phase1ExistingFileSizes,
|
||||
shard.translog().estimatedNumberOfOperations());
|
||||
transportService.submitRequest(request.targetNode(), RecoveryTarget.Actions.FILES_INFO, recoveryInfoFilesRequest,
|
||||
TransportRequestOptions.options().withTimeout(recoverySettings.internalActionTimeout()),
|
||||
EmptyTransportResponseHandler.INSTANCE_SAME).txGet();
|
||||
@ -288,7 +289,8 @@ public class RecoverySourceHandler implements Engine.RecoveryHandler {
|
||||
public void run() throws InterruptedException {
|
||||
// Actually send the file chunk to the target node, waiting for it to complete
|
||||
transportService.submitRequest(request.targetNode(), RecoveryTarget.Actions.FILE_CHUNK,
|
||||
new RecoveryFileChunkRequest(request.recoveryId(), request.shardId(), md, position, content, lastChunk),
|
||||
new RecoveryFileChunkRequest(request.recoveryId(), request.shardId(), md, position, content,
|
||||
lastChunk, shard.translog().estimatedNumberOfOperations()),
|
||||
requestOptions, EmptyTransportResponseHandler.INSTANCE_SAME).txGet();
|
||||
}
|
||||
});
|
||||
@ -350,7 +352,7 @@ public class RecoverySourceHandler implements Engine.RecoveryHandler {
|
||||
// are deleted
|
||||
try {
|
||||
transportService.submitRequest(request.targetNode(), RecoveryTarget.Actions.CLEAN_FILES,
|
||||
new RecoveryCleanFilesRequest(request.recoveryId(), shard.shardId(), recoverySourceMetadata),
|
||||
new RecoveryCleanFilesRequest(request.recoveryId(), shard.shardId(), recoverySourceMetadata, shard.translog().estimatedNumberOfOperations()),
|
||||
TransportRequestOptions.options().withTimeout(recoverySettings.internalActionTimeout()),
|
||||
EmptyTransportResponseHandler.INSTANCE_SAME).txGet();
|
||||
} catch (RemoteTransportException remoteException) {
|
||||
@ -427,7 +429,7 @@ public class RecoverySourceHandler implements Engine.RecoveryHandler {
|
||||
// operations. This ensures the shard engine is started and disables
|
||||
// garbage collection (not the JVM's GC!) of tombstone deletes
|
||||
transportService.submitRequest(request.targetNode(), RecoveryTarget.Actions.PREPARE_TRANSLOG,
|
||||
new RecoveryPrepareForTranslogOperationsRequest(request.recoveryId(), request.shardId()),
|
||||
new RecoveryPrepareForTranslogOperationsRequest(request.recoveryId(), request.shardId(), shard.translog().estimatedNumberOfOperations()),
|
||||
TransportRequestOptions.options().withTimeout(recoverySettings.internalActionTimeout()), EmptyTransportResponseHandler.INSTANCE_SAME).txGet();
|
||||
}
|
||||
});
|
||||
@ -435,7 +437,7 @@ public class RecoverySourceHandler implements Engine.RecoveryHandler {
|
||||
stopWatch.stop();
|
||||
response.startTime = stopWatch.totalTime().millis();
|
||||
logger.trace("{} recovery [phase2] to {}: start took [{}]",
|
||||
request.shardId(), request.targetNode(), request.targetNode(), stopWatch.totalTime());
|
||||
request.shardId(), request.targetNode(), stopWatch.totalTime());
|
||||
|
||||
|
||||
logger.trace("{} recovery [phase2] to {}: updating current mapping to master", request.shardId(), request.targetNode());
|
||||
@ -616,7 +618,8 @@ public class RecoverySourceHandler implements Engine.RecoveryHandler {
|
||||
cancellableThreads.execute(new Interruptable() {
|
||||
@Override
|
||||
public void run() throws InterruptedException {
|
||||
final RecoveryTranslogOperationsRequest translogOperationsRequest = new RecoveryTranslogOperationsRequest(request.recoveryId(), request.shardId(), operations);
|
||||
final RecoveryTranslogOperationsRequest translogOperationsRequest = new RecoveryTranslogOperationsRequest(
|
||||
request.recoveryId(), request.shardId(), operations, shard.translog().estimatedNumberOfOperations());
|
||||
transportService.submitRequest(request.targetNode(), RecoveryTarget.Actions.TRANSLOG_OPS, translogOperationsRequest,
|
||||
recoveryOptions, EmptyTransportResponseHandler.INSTANCE_SAME).txGet();
|
||||
}
|
||||
@ -633,7 +636,8 @@ public class RecoverySourceHandler implements Engine.RecoveryHandler {
|
||||
cancellableThreads.execute(new Interruptable() {
|
||||
@Override
|
||||
public void run() throws InterruptedException {
|
||||
RecoveryTranslogOperationsRequest translogOperationsRequest = new RecoveryTranslogOperationsRequest(request.recoveryId(), request.shardId(), operations);
|
||||
RecoveryTranslogOperationsRequest translogOperationsRequest = new RecoveryTranslogOperationsRequest(
|
||||
request.recoveryId(), request.shardId(), operations, shard.translog().estimatedNumberOfOperations());
|
||||
transportService.submitRequest(request.targetNode(), RecoveryTarget.Actions.TRANSLOG_OPS, translogOperationsRequest,
|
||||
recoveryOptions, EmptyTransportResponseHandler.INSTANCE_SAME).txGet();
|
||||
}
|
||||
|
@ -39,7 +39,6 @@ import java.io.IOException;
|
||||
import java.util.List;
|
||||
import java.util.Locale;
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
|
||||
/**
|
||||
* Keeps track of state related to shard recovery.
|
||||
@ -359,6 +358,7 @@ public class RecoveryState implements ToXContent, Streamable {
|
||||
static final XContentBuilderString TARGET = new XContentBuilderString("target");
|
||||
static final XContentBuilderString INDEX = new XContentBuilderString("index");
|
||||
static final XContentBuilderString TRANSLOG = new XContentBuilderString("translog");
|
||||
static final XContentBuilderString TOTAL_ON_START = new XContentBuilderString("total_on_start");
|
||||
static final XContentBuilderString START = new XContentBuilderString("start");
|
||||
static final XContentBuilderString RECOVERED = new XContentBuilderString("recovered");
|
||||
static final XContentBuilderString RECOVERED_IN_BYTES = new XContentBuilderString("recovered_in_bytes");
|
||||
@ -473,40 +473,90 @@ public class RecoveryState implements ToXContent, Streamable {
|
||||
}
|
||||
|
||||
public static class Translog extends Timer implements ToXContent, Streamable {
|
||||
private final AtomicInteger currentTranslogOperations = new AtomicInteger();
|
||||
public static final int UNKNOWN = -1;
|
||||
|
||||
public void reset() {
|
||||
private int recovered;
|
||||
private int total = UNKNOWN;
|
||||
private int totalOnStart = UNKNOWN;
|
||||
|
||||
public synchronized void reset() {
|
||||
super.reset();
|
||||
currentTranslogOperations.set(0);
|
||||
recovered = 0;
|
||||
total = UNKNOWN;
|
||||
totalOnStart = UNKNOWN;
|
||||
}
|
||||
|
||||
public void addTranslogOperations(int count) {
|
||||
this.currentTranslogOperations.addAndGet(count);
|
||||
public synchronized void incrementRecoveredOperations() {
|
||||
recovered++;
|
||||
assert total == UNKNOWN || total >= recovered : "total, if known, should be > recovered. total [" + total + "], recovered [" + recovered + "]";
|
||||
}
|
||||
|
||||
public void incrementTranslogOperations() {
|
||||
this.currentTranslogOperations.incrementAndGet();
|
||||
/** returns the total number of translog operations recovered so far */
|
||||
public synchronized int recoveredOperations() {
|
||||
return recovered;
|
||||
}
|
||||
|
||||
public int currentTranslogOperations() {
|
||||
return this.currentTranslogOperations.get();
|
||||
/**
|
||||
* returns the total number of translog operations needed to be recovered at this moment.
|
||||
* Note that this can change as the number of operations grows during recovery.
|
||||
* <p/>
|
||||
* A value of -1 ({@link RecoveryState.Translog#UNKNOWN} is return if this is unknown (typically a gateway recovery)
|
||||
*/
|
||||
public synchronized int totalOperations() {
|
||||
return total;
|
||||
}
|
||||
|
||||
public synchronized void totalOperations(int total) {
|
||||
this.total = total;
|
||||
assert total == UNKNOWN || total >= recovered : "total, if known, should be > recovered. total [" + total + "], recovered [" + recovered + "]";
|
||||
}
|
||||
|
||||
/**
|
||||
* returns the total number of translog operations to recovered, on the start of the recovery. Unlike {@link #totalOperations}
|
||||
* this does change during recovery.
|
||||
* <p/>
|
||||
* A value of -1 ({@link RecoveryState.Translog#UNKNOWN} is return if this is unknown (typically a gateway recovery)
|
||||
*/
|
||||
public synchronized int totalOperationsOnStart() {
|
||||
return this.totalOnStart;
|
||||
}
|
||||
|
||||
public synchronized void totalOperationsOnStart(int total) {
|
||||
this.totalOnStart = total;
|
||||
}
|
||||
|
||||
public synchronized float recoveredPercent() {
|
||||
if (total == UNKNOWN) {
|
||||
return -1.f;
|
||||
}
|
||||
if (total == 0) {
|
||||
return 100.f;
|
||||
}
|
||||
return recovered * 100.0f / total;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
currentTranslogOperations.set(in.readVInt());
|
||||
recovered = in.readVInt();
|
||||
total = in.readVInt();
|
||||
totalOnStart = in.readVInt();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
out.writeVInt(currentTranslogOperations.get());
|
||||
out.writeVInt(recovered);
|
||||
out.writeVInt(total);
|
||||
out.writeVInt(totalOnStart);
|
||||
}
|
||||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.field(Fields.RECOVERED, currentTranslogOperations.get());
|
||||
public synchronized XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.field(Fields.RECOVERED, recovered);
|
||||
builder.field(Fields.TOTAL, total);
|
||||
builder.field(Fields.PERCENT, String.format(Locale.ROOT, "%1.1f%%", recoveredPercent()));
|
||||
builder.field(Fields.TOTAL_ON_START, totalOnStart);
|
||||
builder.timeValueField(Fields.TOTAL_TIME_IN_MILLIS, Fields.TOTAL_TIME, time());
|
||||
return builder;
|
||||
}
|
||||
|
@ -277,6 +277,7 @@ public class RecoveryTarget extends AbstractComponent {
|
||||
public void messageReceived(RecoveryPrepareForTranslogOperationsRequest request, TransportChannel channel) throws Exception {
|
||||
try (RecoveriesCollection.StatusRef statusRef = onGoingRecoveries.getStatusSafe(request.recoveryId(), request.shardId())) {
|
||||
final RecoveryStatus recoveryStatus = statusRef.status();
|
||||
recoveryStatus.state().getTranslog().totalOperations(request.totalTranslogOps());
|
||||
recoveryStatus.indexShard().prepareForTranslogRecovery();
|
||||
}
|
||||
channel.sendResponse(TransportResponse.Empty.INSTANCE);
|
||||
@ -322,9 +323,11 @@ public class RecoveryTarget extends AbstractComponent {
|
||||
public void messageReceived(RecoveryTranslogOperationsRequest request, TransportChannel channel) throws Exception {
|
||||
try (RecoveriesCollection.StatusRef statusRef = onGoingRecoveries.getStatusSafe(request.recoveryId(), request.shardId())) {
|
||||
final RecoveryStatus recoveryStatus = statusRef.status();
|
||||
final RecoveryState.Translog translog = recoveryStatus.state().getTranslog();
|
||||
translog.totalOperations(request.totalTranslogOps());
|
||||
for (Translog.Operation operation : request.operations()) {
|
||||
recoveryStatus.indexShard().performRecoveryOperation(operation);
|
||||
recoveryStatus.state().getTranslog().incrementTranslogOperations();
|
||||
translog.incrementRecoveredOperations();
|
||||
}
|
||||
}
|
||||
channel.sendResponse(TransportResponse.Empty.INSTANCE);
|
||||
@ -355,6 +358,8 @@ public class RecoveryTarget extends AbstractComponent {
|
||||
for (int i = 0; i < request.phase1FileNames.size(); i++) {
|
||||
index.addFileDetail(request.phase1FileNames.get(i), request.phase1FileSizes.get(i), false);
|
||||
}
|
||||
recoveryStatus.state().getTranslog().totalOperations(request.totalTranslogOps);
|
||||
recoveryStatus.state().getTranslog().totalOperationsOnStart(request.totalTranslogOps);
|
||||
// recoveryBytesCount / recoveryFileCount will be set as we go...
|
||||
channel.sendResponse(TransportResponse.Empty.INSTANCE);
|
||||
}
|
||||
@ -377,6 +382,7 @@ public class RecoveryTarget extends AbstractComponent {
|
||||
public void messageReceived(RecoveryCleanFilesRequest request, TransportChannel channel) throws Exception {
|
||||
try (RecoveriesCollection.StatusRef statusRef = onGoingRecoveries.getStatusSafe(request.recoveryId(), request.shardId())) {
|
||||
final RecoveryStatus recoveryStatus = statusRef.status();
|
||||
recoveryStatus.state().getTranslog().totalOperations(request.totalTranslogOps());
|
||||
// first, we go and move files that were created with the recovery id suffix to
|
||||
// the actual names, its ok if we have a corrupted index here, since we have replicas
|
||||
// to recover from in case of a full cluster shutdown just when this code executes...
|
||||
@ -425,6 +431,7 @@ public class RecoveryTarget extends AbstractComponent {
|
||||
try (RecoveriesCollection.StatusRef statusRef = onGoingRecoveries.getStatusSafe(request.recoveryId(), request.shardId())) {
|
||||
final RecoveryStatus recoveryStatus = statusRef.status();
|
||||
final Store store = recoveryStatus.store();
|
||||
recoveryStatus.state().getTranslog().totalOperations(request.totalTranslogOps());
|
||||
IndexOutput indexOutput;
|
||||
if (request.position() == 0) {
|
||||
indexOutput = recoveryStatus.openAndPutIndexOutput(request.name(), request.metadata(), store);
|
||||
|
@ -20,12 +20,11 @@
|
||||
package org.elasticsearch.indices.recovery;
|
||||
|
||||
import com.google.common.collect.Lists;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
import org.elasticsearch.index.translog.TranslogStreams;
|
||||
import org.elasticsearch.index.translog.Translog;
|
||||
import org.elasticsearch.index.translog.TranslogStreams;
|
||||
import org.elasticsearch.transport.TransportRequest;
|
||||
|
||||
import java.io.IOException;
|
||||
@ -39,14 +38,16 @@ class RecoveryTranslogOperationsRequest extends TransportRequest {
|
||||
private long recoveryId;
|
||||
private ShardId shardId;
|
||||
private List<Translog.Operation> operations;
|
||||
private int totalTranslogOps = RecoveryState.Translog.UNKNOWN;
|
||||
|
||||
RecoveryTranslogOperationsRequest() {
|
||||
}
|
||||
|
||||
RecoveryTranslogOperationsRequest(long recoveryId, ShardId shardId, List<Translog.Operation> operations) {
|
||||
RecoveryTranslogOperationsRequest(long recoveryId, ShardId shardId, List<Translog.Operation> operations, int totalTranslogOps) {
|
||||
this.recoveryId = recoveryId;
|
||||
this.shardId = shardId;
|
||||
this.operations = operations;
|
||||
this.totalTranslogOps = totalTranslogOps;
|
||||
}
|
||||
|
||||
public long recoveryId() {
|
||||
@ -61,6 +62,10 @@ class RecoveryTranslogOperationsRequest extends TransportRequest {
|
||||
return operations;
|
||||
}
|
||||
|
||||
public int totalTranslogOps() {
|
||||
return totalTranslogOps;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
@ -71,6 +76,7 @@ class RecoveryTranslogOperationsRequest extends TransportRequest {
|
||||
for (int i = 0; i < size; i++) {
|
||||
operations.add(TranslogStreams.CHECKSUMMED_TRANSLOG_STREAM.read(in));
|
||||
}
|
||||
totalTranslogOps = in.readVInt();
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -82,5 +88,6 @@ class RecoveryTranslogOperationsRequest extends TransportRequest {
|
||||
for (Translog.Operation operation : operations) {
|
||||
TranslogStreams.CHECKSUMMED_TRANSLOG_STREAM.write(out, operation);
|
||||
}
|
||||
out.writeVInt(totalTranslogOps);
|
||||
}
|
||||
}
|
||||
|
@ -296,9 +296,18 @@ public class IndicesStore extends AbstractComponent implements ClusterStateListe
|
||||
IndexMetaData indexMeta = clusterState.getMetaData().indices().get(shardId.getIndex());
|
||||
try {
|
||||
indicesService.deleteShardStore("no longer used", shardId, indexMeta);
|
||||
} catch (Exception ex) {
|
||||
} catch (Throwable ex) {
|
||||
logger.debug("{} failed to delete unallocated shard, ignoring", ex, shardId);
|
||||
}
|
||||
// if the index doesn't exists anymore, delete its store as well, but only if its a non master node, since master
|
||||
// nodes keep the index metadata around
|
||||
if (indicesService.hasIndex(shardId.getIndex()) == false && currentState.nodes().localNode().masterNode() == false) {
|
||||
try {
|
||||
indicesService.deleteIndexStore("no longer used", indexMeta, currentState);
|
||||
} catch (Throwable ex) {
|
||||
logger.debug("{} failed to delete unallocated index, ignoring", ex, shardId.getIndex());
|
||||
}
|
||||
}
|
||||
return currentState;
|
||||
}
|
||||
|
||||
|
@ -380,7 +380,11 @@ public class Node implements Releasable {
|
||||
}
|
||||
|
||||
stopWatch.stop().start("script");
|
||||
injector.getInstance(ScriptService.class).close();
|
||||
try {
|
||||
injector.getInstance(ScriptService.class).close();
|
||||
} catch(IOException e) {
|
||||
logger.warn("ScriptService close failed", e);
|
||||
}
|
||||
|
||||
stopWatch.stop().start("thread_pool");
|
||||
// TODO this should really use ThreadPool.terminate()
|
||||
|
@ -90,6 +90,7 @@ public class PercolateContext extends SearchContext {
|
||||
private final ScriptService scriptService;
|
||||
private final ConcurrentMap<BytesRef, Query> percolateQueries;
|
||||
private final int numberOfShards;
|
||||
private final Filter aliasFilter;
|
||||
private String[] types;
|
||||
|
||||
private Engine.Searcher docSearcher;
|
||||
@ -109,7 +110,7 @@ public class PercolateContext extends SearchContext {
|
||||
|
||||
public PercolateContext(PercolateShardRequest request, SearchShardTarget searchShardTarget, IndexShard indexShard,
|
||||
IndexService indexService, PageCacheRecycler pageCacheRecycler,
|
||||
BigArrays bigArrays, ScriptService scriptService) {
|
||||
BigArrays bigArrays, ScriptService scriptService, Filter aliasFilter) {
|
||||
this.indexShard = indexShard;
|
||||
this.indexService = indexService;
|
||||
this.fieldDataService = indexService.fieldData();
|
||||
@ -123,6 +124,7 @@ public class PercolateContext extends SearchContext {
|
||||
this.searcher = new ContextIndexSearcher(this, engineSearcher);
|
||||
this.scriptService = scriptService;
|
||||
this.numberOfShards = request.getNumberOfShards();
|
||||
this.aliasFilter = aliasFilter;
|
||||
}
|
||||
|
||||
public IndexSearcher docSearcher() {
|
||||
@ -277,7 +279,7 @@ public class PercolateContext extends SearchContext {
|
||||
|
||||
@Override
|
||||
public Filter searchFilter(String[] types) {
|
||||
throw new UnsupportedOperationException();
|
||||
return aliasFilter();
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -509,7 +511,7 @@ public class PercolateContext extends SearchContext {
|
||||
|
||||
@Override
|
||||
public Filter aliasFilter() {
|
||||
throw new UnsupportedOperationException();
|
||||
return aliasFilter;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -23,13 +23,7 @@ import org.apache.lucene.index.LeafReaderContext;
|
||||
import org.apache.lucene.index.ReaderUtil;
|
||||
import org.apache.lucene.index.memory.ExtendedMemoryIndex;
|
||||
import org.apache.lucene.index.memory.MemoryIndex;
|
||||
import org.apache.lucene.search.ConstantScoreQuery;
|
||||
import org.apache.lucene.search.Filter;
|
||||
import org.apache.lucene.search.FilteredQuery;
|
||||
import org.apache.lucene.search.MatchAllDocsQuery;
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.apache.lucene.search.ScoreDoc;
|
||||
import org.apache.lucene.search.TopDocs;
|
||||
import org.apache.lucene.search.*;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.apache.lucene.util.CloseableThreadLocal;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
@ -48,6 +42,7 @@ import org.elasticsearch.common.component.AbstractComponent;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.io.stream.BytesStreamOutput;
|
||||
import org.elasticsearch.common.lucene.Lucene;
|
||||
import org.elasticsearch.common.lucene.search.XBooleanFilter;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.text.BytesText;
|
||||
import org.elasticsearch.common.text.StringText;
|
||||
@ -63,22 +58,14 @@ import org.elasticsearch.index.IndexService;
|
||||
import org.elasticsearch.index.engine.Engine;
|
||||
import org.elasticsearch.index.fielddata.IndexFieldData;
|
||||
import org.elasticsearch.index.fielddata.SortedBinaryDocValues;
|
||||
import org.elasticsearch.index.mapper.DocumentMapper;
|
||||
import org.elasticsearch.index.mapper.FieldMapper;
|
||||
import org.elasticsearch.index.mapper.MapperService;
|
||||
import org.elasticsearch.index.mapper.ParsedDocument;
|
||||
import org.elasticsearch.index.mapper.Uid;
|
||||
import org.elasticsearch.index.mapper.internal.IdFieldMapper;
|
||||
import org.elasticsearch.index.mapper.*;
|
||||
import org.elasticsearch.index.mapper.internal.UidFieldMapper;
|
||||
import org.elasticsearch.index.percolator.stats.ShardPercolateService;
|
||||
import org.elasticsearch.index.query.ParsedQuery;
|
||||
import org.elasticsearch.index.search.nested.NonNestedDocsFilter;
|
||||
import org.elasticsearch.index.shard.IndexShard;
|
||||
import org.elasticsearch.indices.IndicesService;
|
||||
import org.elasticsearch.percolator.QueryCollector.Count;
|
||||
import org.elasticsearch.percolator.QueryCollector.Match;
|
||||
import org.elasticsearch.percolator.QueryCollector.MatchAndScore;
|
||||
import org.elasticsearch.percolator.QueryCollector.MatchAndSort;
|
||||
import org.elasticsearch.percolator.QueryCollector.*;
|
||||
import org.elasticsearch.script.ScriptService;
|
||||
import org.elasticsearch.search.SearchParseElement;
|
||||
import org.elasticsearch.search.SearchShardTarget;
|
||||
@ -96,9 +83,7 @@ import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import static org.elasticsearch.index.mapper.SourceToParse.source;
|
||||
import static org.elasticsearch.percolator.QueryCollector.count;
|
||||
import static org.elasticsearch.percolator.QueryCollector.match;
|
||||
import static org.elasticsearch.percolator.QueryCollector.matchAndScore;
|
||||
import static org.elasticsearch.percolator.QueryCollector.*;
|
||||
|
||||
public class PercolatorService extends AbstractComponent {
|
||||
|
||||
@ -174,9 +159,15 @@ public class PercolatorService extends AbstractComponent {
|
||||
shardPercolateService.prePercolate();
|
||||
long startTime = System.nanoTime();
|
||||
|
||||
String[] filteringAliases = clusterService.state().getMetaData().filteringAliases(
|
||||
indexShard.shardId().index().name(),
|
||||
request.indices()
|
||||
);
|
||||
Filter aliasFilter = percolateIndexService.aliasesService().aliasFilter(filteringAliases);
|
||||
|
||||
SearchShardTarget searchShardTarget = new SearchShardTarget(clusterService.localNode().id(), request.shardId().getIndex(), request.shardId().id());
|
||||
final PercolateContext context = new PercolateContext(
|
||||
request, searchShardTarget, indexShard, percolateIndexService, pageCacheRecycler, bigArrays, scriptService
|
||||
request, searchShardTarget, indexShard, percolateIndexService, pageCacheRecycler, bigArrays, scriptService, aliasFilter
|
||||
);
|
||||
try {
|
||||
ParsedDocument parsedDocument = parseRequest(percolateIndexService, request, context);
|
||||
@ -190,7 +181,7 @@ public class PercolatorService extends AbstractComponent {
|
||||
throw new ElasticsearchIllegalArgumentException("Nothing to percolate");
|
||||
}
|
||||
|
||||
if (context.percolateQuery() == null && (context.trackScores() || context.doSort || context.aggregations() != null)) {
|
||||
if (context.percolateQuery() == null && (context.trackScores() || context.doSort || context.aggregations() != null) || context.aliasFilter() != null) {
|
||||
context.percolateQuery(new MatchAllDocsQuery());
|
||||
}
|
||||
|
||||
@ -779,8 +770,19 @@ public class PercolatorService extends AbstractComponent {
|
||||
|
||||
private void queryBasedPercolating(Engine.Searcher percolatorSearcher, PercolateContext context, QueryCollector percolateCollector) throws IOException {
|
||||
Filter percolatorTypeFilter = context.indexService().mapperService().documentMapper(TYPE_NAME).typeFilter();
|
||||
percolatorTypeFilter = context.indexService().cache().filter().cache(percolatorTypeFilter, null, context.indexService().queryParserService().autoFilterCachePolicy());
|
||||
FilteredQuery query = new FilteredQuery(context.percolateQuery(), percolatorTypeFilter);
|
||||
percolatorTypeFilter = context.indexService().cache().filter().cache(percolatorTypeFilter, null, context.queryParserService().autoFilterCachePolicy());
|
||||
|
||||
final Filter filter;
|
||||
if (context.aliasFilter() != null) {
|
||||
XBooleanFilter booleanFilter = new XBooleanFilter();
|
||||
booleanFilter.add(context.aliasFilter(), BooleanClause.Occur.MUST);
|
||||
booleanFilter.add(percolatorTypeFilter, BooleanClause.Occur.MUST);
|
||||
filter = booleanFilter;
|
||||
} else {
|
||||
filter = percolatorTypeFilter;
|
||||
}
|
||||
|
||||
FilteredQuery query = new FilteredQuery(context.percolateQuery(), filter);
|
||||
percolatorSearcher.searcher().search(query, percolateCollector);
|
||||
percolateCollector.aggregatorCollector.postCollection();
|
||||
if (context.aggregations() != null) {
|
||||
|
@ -35,10 +35,32 @@ import org.elasticsearch.common.Table;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.transport.InetSocketTransportAddress;
|
||||
import org.elasticsearch.index.cache.filter.FilterCacheStats;
|
||||
import org.elasticsearch.index.cache.id.IdCacheStats;
|
||||
import org.elasticsearch.index.cache.query.QueryCacheStats;
|
||||
import org.elasticsearch.index.engine.SegmentsStats;
|
||||
import org.elasticsearch.index.fielddata.FieldDataStats;
|
||||
import org.elasticsearch.index.flush.FlushStats;
|
||||
import org.elasticsearch.index.get.GetStats;
|
||||
import org.elasticsearch.index.indexing.IndexingStats;
|
||||
import org.elasticsearch.index.merge.MergeStats;
|
||||
import org.elasticsearch.index.percolator.stats.PercolateStats;
|
||||
import org.elasticsearch.index.refresh.RefreshStats;
|
||||
import org.elasticsearch.index.search.stats.SearchStats;
|
||||
import org.elasticsearch.index.suggest.stats.SuggestStats;
|
||||
import org.elasticsearch.indices.NodeIndicesStats;
|
||||
import org.elasticsearch.monitor.fs.FsStats;
|
||||
import org.elasticsearch.monitor.jvm.JvmInfo;
|
||||
import org.elasticsearch.monitor.jvm.JvmStats;
|
||||
import org.elasticsearch.monitor.os.OsInfo;
|
||||
import org.elasticsearch.monitor.os.OsStats;
|
||||
import org.elasticsearch.monitor.process.ProcessInfo;
|
||||
import org.elasticsearch.monitor.process.ProcessStats;
|
||||
import org.elasticsearch.rest.*;
|
||||
import org.elasticsearch.rest.action.support.RestActionListener;
|
||||
import org.elasticsearch.rest.action.support.RestResponseListener;
|
||||
import org.elasticsearch.rest.action.support.RestTable;
|
||||
import org.elasticsearch.search.suggest.completion.CompletionStats;
|
||||
|
||||
import java.util.Locale;
|
||||
|
||||
@ -200,6 +222,16 @@ public class RestNodesAction extends AbstractCatAction {
|
||||
NodeInfo info = nodesInfo.getNodesMap().get(node.id());
|
||||
NodeStats stats = nodesStats.getNodesMap().get(node.id());
|
||||
|
||||
JvmInfo jvmInfo = info == null ? null : info.getJvm();
|
||||
OsInfo osInfo = info == null ? null : info.getOs();
|
||||
ProcessInfo processInfo = info == null ? null : info.getProcess();
|
||||
|
||||
JvmStats jvmStats = stats == null ? null : stats.getJvm();
|
||||
FsStats fsStats = stats == null ? null : stats.getFs();
|
||||
OsStats osStats = stats == null ? null : stats.getOs();
|
||||
ProcessStats processStats = stats == null ? null : stats.getProcess();
|
||||
NodeIndicesStats indicesStats = stats == null ? null : stats.getIndices();
|
||||
|
||||
table.startRow();
|
||||
|
||||
table.addCell(fullId ? node.id() : Strings.substring(node.getId(), 0, 4));
|
||||
@ -214,93 +246,107 @@ public class RestNodesAction extends AbstractCatAction {
|
||||
|
||||
table.addCell(node.getVersion().number());
|
||||
table.addCell(info == null ? null : info.getBuild().hashShort());
|
||||
table.addCell(info == null ? null : info.getJvm().version());
|
||||
table.addCell(stats == null ? null : stats.getFs() == null ? null : stats.getFs().total().getAvailable());
|
||||
table.addCell(stats == null ? null : stats.getJvm().getMem().getHeapUsed());
|
||||
table.addCell(stats == null ? null : stats.getJvm().getMem().getHeapUsedPrecent());
|
||||
table.addCell(info == null ? null : info.getJvm().getMem().getHeapMax());
|
||||
table.addCell(stats == null ? null : stats.getOs().mem() == null ? null : stats.getOs().mem().used());
|
||||
table.addCell(stats == null ? null : stats.getOs().mem() == null ? null : stats.getOs().mem().usedPercent());
|
||||
table.addCell(info == null ? null : info.getOs().mem() == null ? null : info.getOs().mem().total()); // sigar fails to load in IntelliJ
|
||||
table.addCell(stats == null ? null : stats.getProcess().getOpenFileDescriptors());
|
||||
table.addCell(stats == null || info == null ? null :
|
||||
calculatePercentage(stats.getProcess().getOpenFileDescriptors(), info.getProcess().getMaxFileDescriptors()));
|
||||
table.addCell(info == null ? null : info.getProcess().getMaxFileDescriptors());
|
||||
table.addCell(jvmInfo == null ? null : jvmInfo.version());
|
||||
table.addCell(fsStats == null ? null : fsStats.getTotal().getAvailable());
|
||||
table.addCell(jvmStats == null ? null : jvmStats.getMem().getHeapUsed());
|
||||
table.addCell(jvmStats == null ? null : jvmStats.getMem().getHeapUsedPrecent());
|
||||
table.addCell(jvmInfo == null ? null : jvmInfo.getMem().getHeapMax());
|
||||
table.addCell(osStats == null ? null : osStats.getMem() == null ? null : osStats.getMem().used());
|
||||
table.addCell(osStats == null ? null : osStats.getMem() == null ? null : osStats.getMem().usedPercent());
|
||||
table.addCell(osInfo == null ? null : osInfo.getMem() == null ? null : osInfo.getMem().total()); // sigar fails to load in IntelliJ
|
||||
table.addCell(processStats == null ? null : processStats.getOpenFileDescriptors());
|
||||
table.addCell(processStats == null || processInfo == null ? null :
|
||||
calculatePercentage(processStats.getOpenFileDescriptors(), processInfo.getMaxFileDescriptors()));
|
||||
table.addCell(processInfo == null ? null : processInfo.getMaxFileDescriptors());
|
||||
|
||||
table.addCell(stats == null ? null : stats.getOs() == null ? null : stats.getOs().getLoadAverage().length < 1 ? null : String.format(Locale.ROOT, "%.2f", stats.getOs().getLoadAverage()[0]));
|
||||
table.addCell(stats == null ? null : stats.getJvm().uptime());
|
||||
table.addCell(osStats == null ? null : osStats.getLoadAverage().length < 1 ? null : String.format(Locale.ROOT, "%.2f", osStats.getLoadAverage()[0]));
|
||||
table.addCell(jvmStats == null ? null : jvmStats.uptime());
|
||||
table.addCell(node.clientNode() ? "c" : node.dataNode() ? "d" : "-");
|
||||
table.addCell(masterId == null ? "x" : masterId.equals(node.id()) ? "*" : node.masterNode() ? "m" : "-");
|
||||
table.addCell(node.name());
|
||||
|
||||
table.addCell(stats == null ? null : stats.getIndices().getCompletion().getSize());
|
||||
CompletionStats completionStats = indicesStats == null ? null : stats.getIndices().getCompletion();
|
||||
table.addCell(completionStats == null ? null : completionStats.getSize());
|
||||
|
||||
table.addCell(stats == null ? null : stats.getIndices().getFieldData().getMemorySize());
|
||||
table.addCell(stats == null ? null : stats.getIndices().getFieldData().getEvictions());
|
||||
FieldDataStats fdStats = indicesStats == null ? null : stats.getIndices().getFieldData();
|
||||
table.addCell(fdStats == null ? null : fdStats.getMemorySize());
|
||||
table.addCell(fdStats == null ? null : fdStats.getEvictions());
|
||||
|
||||
table.addCell(stats == null ? null : stats.getIndices().getFilterCache().getMemorySize());
|
||||
table.addCell(stats == null ? null : stats.getIndices().getFilterCache().getEvictions());
|
||||
FilterCacheStats fcStats = indicesStats == null ? null : indicesStats.getFilterCache();
|
||||
table.addCell(fcStats == null ? null : fcStats.getMemorySize());
|
||||
table.addCell(fcStats == null ? null : fcStats.getEvictions());
|
||||
|
||||
table.addCell(stats == null ? null : stats.getIndices().getQueryCache().getMemorySize());
|
||||
table.addCell(stats == null ? null : stats.getIndices().getQueryCache().getEvictions());
|
||||
table.addCell(stats == null ? null : stats.getIndices().getQueryCache().getHitCount());
|
||||
table.addCell(stats == null ? null : stats.getIndices().getQueryCache().getMissCount());
|
||||
QueryCacheStats qcStats = indicesStats == null ? null : indicesStats.getQueryCache();
|
||||
table.addCell(qcStats == null ? null : qcStats.getMemorySize());
|
||||
table.addCell(qcStats == null ? null : qcStats.getEvictions());
|
||||
table.addCell(qcStats == null ? null : qcStats.getHitCount());
|
||||
table.addCell(qcStats == null ? null : qcStats.getMissCount());
|
||||
|
||||
table.addCell(stats == null ? null : stats.getIndices().getFlush().getTotal());
|
||||
table.addCell(stats == null ? null : stats.getIndices().getFlush().getTotalTime());
|
||||
FlushStats flushStats = indicesStats == null ? null : indicesStats.getFlush();
|
||||
table.addCell(flushStats == null ? null : flushStats.getTotal());
|
||||
table.addCell(flushStats == null ? null : flushStats.getTotalTime());
|
||||
|
||||
table.addCell(stats == null ? null : stats.getIndices().getGet().current());
|
||||
table.addCell(stats == null ? null : stats.getIndices().getGet().getTime());
|
||||
table.addCell(stats == null ? null : stats.getIndices().getGet().getCount());
|
||||
table.addCell(stats == null ? null : stats.getIndices().getGet().getExistsTime());
|
||||
table.addCell(stats == null ? null : stats.getIndices().getGet().getExistsCount());
|
||||
table.addCell(stats == null ? null : stats.getIndices().getGet().getMissingTime());
|
||||
table.addCell(stats == null ? null : stats.getIndices().getGet().getMissingCount());
|
||||
GetStats getStats = indicesStats == null ? null : indicesStats.getGet();
|
||||
table.addCell(getStats == null ? null : getStats.current());
|
||||
table.addCell(getStats == null ? null : getStats.getTime());
|
||||
table.addCell(getStats == null ? null : getStats.getCount());
|
||||
table.addCell(getStats == null ? null : getStats.getExistsTime());
|
||||
table.addCell(getStats == null ? null : getStats.getExistsCount());
|
||||
table.addCell(getStats == null ? null : getStats.getMissingTime());
|
||||
table.addCell(getStats == null ? null : getStats.getMissingCount());
|
||||
|
||||
table.addCell(stats == null ? null : stats.getIndices().getIdCache().getMemorySize());
|
||||
IdCacheStats idCacheStats = indicesStats == null ? null : indicesStats.getIdCache();
|
||||
table.addCell(idCacheStats == null ? null : idCacheStats.getMemorySize());
|
||||
|
||||
table.addCell(stats == null ? null : stats.getIndices().getIndexing().getTotal().getDeleteCurrent());
|
||||
table.addCell(stats == null ? null : stats.getIndices().getIndexing().getTotal().getDeleteTime());
|
||||
table.addCell(stats == null ? null : stats.getIndices().getIndexing().getTotal().getDeleteCount());
|
||||
table.addCell(stats == null ? null : stats.getIndices().getIndexing().getTotal().getIndexCurrent());
|
||||
table.addCell(stats == null ? null : stats.getIndices().getIndexing().getTotal().getIndexTime());
|
||||
table.addCell(stats == null ? null : stats.getIndices().getIndexing().getTotal().getIndexCount());
|
||||
IndexingStats indexingStats = indicesStats == null ? null : indicesStats.getIndexing();
|
||||
table.addCell(indexingStats == null ? null : indexingStats.getTotal().getDeleteCurrent());
|
||||
table.addCell(indexingStats == null ? null : indexingStats.getTotal().getDeleteTime());
|
||||
table.addCell(indexingStats == null ? null : indexingStats.getTotal().getDeleteCount());
|
||||
table.addCell(indexingStats == null ? null : indexingStats.getTotal().getIndexCurrent());
|
||||
table.addCell(indexingStats == null ? null : indexingStats.getTotal().getIndexTime());
|
||||
table.addCell(indexingStats == null ? null : indexingStats.getTotal().getIndexCount());
|
||||
|
||||
table.addCell(stats == null ? null : stats.getIndices().getMerge().getCurrent());
|
||||
table.addCell(stats == null ? null : stats.getIndices().getMerge().getCurrentNumDocs());
|
||||
table.addCell(stats == null ? null : stats.getIndices().getMerge().getCurrentSize());
|
||||
table.addCell(stats == null ? null : stats.getIndices().getMerge().getTotal());
|
||||
table.addCell(stats == null ? null : stats.getIndices().getMerge().getTotalNumDocs());
|
||||
table.addCell(stats == null ? null : stats.getIndices().getMerge().getTotalSize());
|
||||
table.addCell(stats == null ? null : stats.getIndices().getMerge().getTotalTime());
|
||||
MergeStats mergeStats = indicesStats == null ? null : indicesStats.getMerge();
|
||||
table.addCell(mergeStats == null ? null : mergeStats.getCurrent());
|
||||
table.addCell(mergeStats == null ? null : mergeStats.getCurrentNumDocs());
|
||||
table.addCell(mergeStats == null ? null : mergeStats.getCurrentSize());
|
||||
table.addCell(mergeStats == null ? null : mergeStats.getTotal());
|
||||
table.addCell(mergeStats == null ? null : mergeStats.getTotalNumDocs());
|
||||
table.addCell(mergeStats == null ? null : mergeStats.getTotalSize());
|
||||
table.addCell(mergeStats == null ? null : mergeStats.getTotalTime());
|
||||
|
||||
table.addCell(stats == null ? null : stats.getIndices().getPercolate().getCurrent());
|
||||
table.addCell(stats == null ? null : stats.getIndices().getPercolate().getMemorySize());
|
||||
table.addCell(stats == null ? null : stats.getIndices().getPercolate().getNumQueries());
|
||||
table.addCell(stats == null ? null : stats.getIndices().getPercolate().getTime());
|
||||
table.addCell(stats == null ? null : stats.getIndices().getPercolate().getCount());
|
||||
PercolateStats percolateStats = indicesStats == null ? null : indicesStats.getPercolate();
|
||||
table.addCell(percolateStats == null ? null : percolateStats.getCurrent());
|
||||
table.addCell(percolateStats == null ? null : percolateStats.getMemorySize());
|
||||
table.addCell(percolateStats == null ? null : percolateStats.getNumQueries());
|
||||
table.addCell(percolateStats == null ? null : percolateStats.getTime());
|
||||
table.addCell(percolateStats == null ? null : percolateStats.getCount());
|
||||
|
||||
table.addCell(stats == null ? null : stats.getIndices().getRefresh().getTotal());
|
||||
table.addCell(stats == null ? null : stats.getIndices().getRefresh().getTotalTime());
|
||||
RefreshStats refreshStats = indicesStats == null ? null : indicesStats.getRefresh();
|
||||
table.addCell(refreshStats == null ? null : refreshStats.getTotal());
|
||||
table.addCell(refreshStats == null ? null : refreshStats.getTotalTime());
|
||||
|
||||
table.addCell(stats == null ? null : stats.getIndices().getSearch().getTotal().getFetchCurrent());
|
||||
table.addCell(stats == null ? null : stats.getIndices().getSearch().getTotal().getFetchTime());
|
||||
table.addCell(stats == null ? null : stats.getIndices().getSearch().getTotal().getFetchCount());
|
||||
table.addCell(stats == null ? null : stats.getIndices().getSearch().getOpenContexts());
|
||||
table.addCell(stats == null ? null : stats.getIndices().getSearch().getTotal().getQueryCurrent());
|
||||
table.addCell(stats == null ? null : stats.getIndices().getSearch().getTotal().getQueryTime());
|
||||
table.addCell(stats == null ? null : stats.getIndices().getSearch().getTotal().getQueryCount());
|
||||
SearchStats searchStats = indicesStats == null ? null : indicesStats.getSearch();
|
||||
table.addCell(searchStats == null ? null : searchStats.getTotal().getFetchCurrent());
|
||||
table.addCell(searchStats == null ? null : searchStats.getTotal().getFetchTime());
|
||||
table.addCell(searchStats == null ? null : searchStats.getTotal().getFetchCount());
|
||||
table.addCell(searchStats == null ? null : searchStats.getOpenContexts());
|
||||
table.addCell(searchStats == null ? null : searchStats.getTotal().getQueryCurrent());
|
||||
table.addCell(searchStats == null ? null : searchStats.getTotal().getQueryTime());
|
||||
table.addCell(searchStats == null ? null : searchStats.getTotal().getQueryCount());
|
||||
|
||||
table.addCell(stats == null ? null : stats.getIndices().getSegments().getCount());
|
||||
table.addCell(stats == null ? null : stats.getIndices().getSegments().getMemory());
|
||||
table.addCell(stats == null ? null : stats.getIndices().getSegments().getIndexWriterMemory());
|
||||
table.addCell(stats == null ? null : stats.getIndices().getSegments().getIndexWriterMaxMemory());
|
||||
table.addCell(stats == null ? null : stats.getIndices().getSegments().getVersionMapMemory());
|
||||
table.addCell(stats == null ? null : stats.getIndices().getSegments().getBitsetMemory());
|
||||
SegmentsStats segmentsStats = indicesStats == null ? null : indicesStats.getSegments();
|
||||
table.addCell(segmentsStats == null ? null : segmentsStats.getCount());
|
||||
table.addCell(segmentsStats == null ? null : segmentsStats.getMemory());
|
||||
table.addCell(segmentsStats == null ? null : segmentsStats.getIndexWriterMemory());
|
||||
table.addCell(segmentsStats == null ? null : segmentsStats.getIndexWriterMaxMemory());
|
||||
table.addCell(segmentsStats == null ? null : segmentsStats.getVersionMapMemory());
|
||||
table.addCell(segmentsStats == null ? null : segmentsStats.getBitsetMemory());
|
||||
|
||||
table.addCell(stats == null ? null : stats.getIndices().getSuggest().getCurrent());
|
||||
table.addCell(stats == null ? null : stats.getIndices().getSuggest().getTime());
|
||||
table.addCell(stats == null ? null : stats.getIndices().getSuggest().getCount());
|
||||
SuggestStats suggestStats = indicesStats == null ? null : indicesStats.getSuggest();
|
||||
table.addCell(suggestStats == null ? null : suggestStats.getCurrent());
|
||||
table.addCell(suggestStats == null ? null : suggestStats.getTime());
|
||||
table.addCell(suggestStats == null ? null : suggestStats.getCount());
|
||||
|
||||
table.endRow();
|
||||
}
|
||||
|
@ -98,6 +98,9 @@ public class RestRecoveryAction extends AbstractCatAction {
|
||||
.addCell("bytes_percent", "alias:bp;desc:percent of bytes recovered")
|
||||
.addCell("total_files", "alias:tf;desc:total number of files")
|
||||
.addCell("total_bytes", "alias:tb;desc:total number of bytes")
|
||||
.addCell("translog", "alias:tr;desc:translog operations recovered")
|
||||
.addCell("translog_percent", "alias:trp;desc:percent of translog recovery")
|
||||
.addCell("total_translog", "alias:trt;desc:current total translog operations")
|
||||
.endHeaders();
|
||||
return t;
|
||||
}
|
||||
@ -156,6 +159,9 @@ public class RestRecoveryAction extends AbstractCatAction {
|
||||
t.addCell(String.format(Locale.ROOT, "%1.1f%%", state.getIndex().recoveredBytesPercent()));
|
||||
t.addCell(state.getIndex().totalFileCount());
|
||||
t.addCell(state.getIndex().totalBytes());
|
||||
t.addCell(state.getTranslog().recoveredOperations());
|
||||
t.addCell(String.format(Locale.ROOT, "%1.1f%%", state.getTranslog().recoveredPercent()));
|
||||
t.addCell(state.getTranslog().totalOperations());
|
||||
t.endRow();
|
||||
}
|
||||
}
|
||||
|
@ -25,6 +25,7 @@ import org.elasticsearch.action.admin.indices.stats.CommonStats;
|
||||
import org.elasticsearch.action.admin.indices.stats.IndicesStatsRequest;
|
||||
import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse;
|
||||
import org.elasticsearch.client.Client;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.Table;
|
||||
@ -58,7 +59,7 @@ public class RestShardsAction extends AbstractCatAction {
|
||||
final ClusterStateRequest clusterStateRequest = new ClusterStateRequest();
|
||||
clusterStateRequest.local(request.paramAsBoolean("local", clusterStateRequest.local()));
|
||||
clusterStateRequest.masterNodeTimeout(request.paramAsTime("master_timeout", clusterStateRequest.masterNodeTimeout()));
|
||||
clusterStateRequest.clear().nodes(true).routingTable(true).indices(indices);
|
||||
clusterStateRequest.clear().nodes(true).metaData(true).routingTable(true).indices(indices);
|
||||
client.admin().cluster().state(clusterStateRequest, new RestActionListener<ClusterStateResponse>(channel) {
|
||||
@Override
|
||||
public void processResponse(final ClusterStateResponse clusterStateResponse) {
|
||||
@ -165,7 +166,21 @@ public class RestShardsAction extends AbstractCatAction {
|
||||
|
||||
table.addCell(shard.index());
|
||||
table.addCell(shard.id());
|
||||
table.addCell(shard.primary() ? "p" : "r");
|
||||
|
||||
IndexMetaData indexMeta = state.getState().getMetaData().index(shard.index());
|
||||
boolean usesShadowReplicas = false;
|
||||
if (indexMeta != null) {
|
||||
usesShadowReplicas = IndexMetaData.isIndexUsingShadowReplicas(indexMeta.settings());
|
||||
}
|
||||
if (shard.primary()) {
|
||||
table.addCell("p");
|
||||
} else {
|
||||
if (usesShadowReplicas) {
|
||||
table.addCell("s");
|
||||
} else {
|
||||
table.addCell("r");
|
||||
}
|
||||
}
|
||||
table.addCell(shard.state());
|
||||
table.addCell(shardStats == null ? null : shardStats.getDocs().getCount());
|
||||
table.addCell(shardStats == null ? null : shardStats.getStore().getSize());
|
||||
|
@ -24,6 +24,7 @@ import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.rest.RestController;
|
||||
import org.elasticsearch.rest.RestRequest;
|
||||
import org.elasticsearch.rest.action.script.RestDeleteIndexedScriptAction;
|
||||
import org.elasticsearch.script.mustache.MustacheScriptEngineService;
|
||||
|
||||
import static org.elasticsearch.rest.RestRequest.Method.DELETE;
|
||||
|
||||
@ -37,6 +38,6 @@ public class RestDeleteSearchTemplateAction extends RestDeleteIndexedScriptActio
|
||||
|
||||
@Override
|
||||
protected String getScriptLang(RestRequest request) {
|
||||
return "mustache";
|
||||
return MustacheScriptEngineService.NAME;
|
||||
}
|
||||
}
|
@ -24,6 +24,7 @@ import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.rest.RestController;
|
||||
import org.elasticsearch.rest.RestRequest;
|
||||
import org.elasticsearch.rest.action.script.RestGetIndexedScriptAction;
|
||||
import org.elasticsearch.script.mustache.MustacheScriptEngineService;
|
||||
|
||||
import static org.elasticsearch.rest.RestRequest.Method.GET;
|
||||
|
||||
@ -40,7 +41,7 @@ public class RestGetSearchTemplateAction extends RestGetIndexedScriptAction {
|
||||
|
||||
@Override
|
||||
protected String getScriptLang(RestRequest request) {
|
||||
return "mustache";
|
||||
return MustacheScriptEngineService.NAME;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -23,6 +23,7 @@ import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.rest.*;
|
||||
import org.elasticsearch.rest.action.script.RestPutIndexedScriptAction;
|
||||
import org.elasticsearch.script.mustache.MustacheScriptEngineService;
|
||||
|
||||
import static org.elasticsearch.rest.RestRequest.Method.POST;
|
||||
import static org.elasticsearch.rest.RestRequest.Method.PUT;
|
||||
@ -58,6 +59,6 @@ public class RestPutSearchTemplateAction extends RestPutIndexedScriptAction {
|
||||
|
||||
@Override
|
||||
protected String getScriptLang(RestRequest request) {
|
||||
return "mustache";
|
||||
return MustacheScriptEngineService.NAME;
|
||||
}
|
||||
}
|
||||
|
@ -34,6 +34,8 @@ import java.util.Map;
|
||||
*/
|
||||
public class NativeScriptEngineService extends AbstractComponent implements ScriptEngineService {
|
||||
|
||||
public static final String NAME = "native";
|
||||
|
||||
private final ImmutableMap<String, NativeScriptFactory> scripts;
|
||||
|
||||
@Inject
|
||||
@ -44,7 +46,7 @@ public class NativeScriptEngineService extends AbstractComponent implements Scri
|
||||
|
||||
@Override
|
||||
public String[] types() {
|
||||
return new String[]{"native"};
|
||||
return new String[]{NAME};
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -22,12 +22,13 @@ package org.elasticsearch.script;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.search.lookup.SearchLookup;
|
||||
|
||||
import java.io.Closeable;
|
||||
import java.util.Map;
|
||||
|
||||
/**
|
||||
*
|
||||
*/
|
||||
public interface ScriptEngineService {
|
||||
public interface ScriptEngineService extends Closeable {
|
||||
|
||||
String[] types();
|
||||
|
||||
@ -45,8 +46,6 @@ public interface ScriptEngineService {
|
||||
|
||||
Object unwrap(Object value);
|
||||
|
||||
void close();
|
||||
|
||||
/**
|
||||
* Handler method called when a script is removed from the Guava cache.
|
||||
*
|
||||
|
@ -25,7 +25,7 @@ import com.google.common.cache.CacheBuilder;
|
||||
import com.google.common.cache.RemovalListener;
|
||||
import com.google.common.cache.RemovalNotification;
|
||||
import com.google.common.collect.ImmutableMap;
|
||||
|
||||
import org.apache.lucene.util.IOUtils;
|
||||
import org.elasticsearch.ElasticsearchIllegalArgumentException;
|
||||
import org.elasticsearch.ElasticsearchIllegalStateException;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
@ -60,11 +60,13 @@ import org.elasticsearch.env.Environment;
|
||||
import org.elasticsearch.index.query.TemplateQueryParser;
|
||||
import org.elasticsearch.node.settings.NodeSettingsService;
|
||||
import org.elasticsearch.script.groovy.GroovyScriptEngineService;
|
||||
import org.elasticsearch.script.mustache.MustacheScriptEngineService;
|
||||
import org.elasticsearch.search.lookup.SearchLookup;
|
||||
import org.elasticsearch.watcher.FileChangesListener;
|
||||
import org.elasticsearch.watcher.FileWatcher;
|
||||
import org.elasticsearch.watcher.ResourceWatcherService;
|
||||
|
||||
import java.io.Closeable;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStreamReader;
|
||||
import java.nio.file.Files;
|
||||
@ -78,7 +80,7 @@ import java.util.concurrent.TimeUnit;
|
||||
/**
|
||||
*
|
||||
*/
|
||||
public class ScriptService extends AbstractComponent {
|
||||
public class ScriptService extends AbstractComponent implements Closeable {
|
||||
|
||||
public static final String DEFAULT_SCRIPTING_LANGUAGE_SETTING = "script.default_lang";
|
||||
public static final String DISABLE_DYNAMIC_SCRIPTING_SETTING = "script.disable_dynamic";
|
||||
@ -91,9 +93,11 @@ public class ScriptService extends AbstractComponent {
|
||||
|
||||
private final String defaultLang;
|
||||
|
||||
private final ImmutableMap<String, ScriptEngineService> scriptEngines;
|
||||
private final Set<ScriptEngineService> scriptEngines;
|
||||
private final ImmutableMap<String, ScriptEngineService> scriptEnginesByLang;
|
||||
private final ImmutableMap<String, ScriptEngineService> scriptEnginesByExt;
|
||||
|
||||
private final ConcurrentMap<String, CompiledScript> staticCache = ConcurrentCollections.newConcurrentMap();
|
||||
private final ConcurrentMap<CacheKey, CompiledScript> staticCache = ConcurrentCollections.newConcurrentMap();
|
||||
|
||||
private final Cache<CacheKey, CompiledScript> cache;
|
||||
private final Path scriptsDirectory;
|
||||
@ -139,99 +143,12 @@ public class ScriptService extends AbstractComponent {
|
||||
public static final ParseField SCRIPT_ID = new ParseField("script_id");
|
||||
public static final ParseField SCRIPT_INLINE = new ParseField("script");
|
||||
|
||||
public static enum ScriptType {
|
||||
|
||||
INLINE,
|
||||
INDEXED,
|
||||
FILE;
|
||||
|
||||
private static final int INLINE_VAL = 0;
|
||||
private static final int INDEXED_VAL = 1;
|
||||
private static final int FILE_VAL = 2;
|
||||
|
||||
public static ScriptType readFrom(StreamInput in) throws IOException {
|
||||
int scriptTypeVal = in.readVInt();
|
||||
switch (scriptTypeVal) {
|
||||
case INDEXED_VAL:
|
||||
return INDEXED;
|
||||
case INLINE_VAL:
|
||||
return INLINE;
|
||||
case FILE_VAL:
|
||||
return FILE;
|
||||
default:
|
||||
throw new ElasticsearchIllegalArgumentException("Unexpected value read for ScriptType got [" + scriptTypeVal +
|
||||
"] expected one of ["+INLINE_VAL +"," + INDEXED_VAL + "," + FILE_VAL+"]");
|
||||
}
|
||||
}
|
||||
|
||||
public static void writeTo(ScriptType scriptType, StreamOutput out) throws IOException{
|
||||
if (scriptType != null) {
|
||||
switch (scriptType){
|
||||
case INDEXED:
|
||||
out.writeVInt(INDEXED_VAL);
|
||||
return;
|
||||
case INLINE:
|
||||
out.writeVInt(INLINE_VAL);
|
||||
return;
|
||||
case FILE:
|
||||
out.writeVInt(FILE_VAL);
|
||||
return;
|
||||
default:
|
||||
throw new ElasticsearchIllegalStateException("Unknown ScriptType " + scriptType);
|
||||
}
|
||||
} else {
|
||||
out.writeVInt(INLINE_VAL); //Default to inline
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static class IndexedScript {
|
||||
private final String lang;
|
||||
private final String id;
|
||||
|
||||
IndexedScript(String lang, String script) {
|
||||
this.lang = lang;
|
||||
final String[] parts = script.split("/");
|
||||
if (parts.length == 1) {
|
||||
this.id = script;
|
||||
} else {
|
||||
if (parts.length != 3) {
|
||||
throw new ElasticsearchIllegalArgumentException("Illegal index script format [" + script + "]" +
|
||||
" should be /lang/id");
|
||||
} else {
|
||||
if (!parts[1].equals(this.lang)) {
|
||||
throw new ElasticsearchIllegalStateException("Conflicting script language, found [" + parts[1] + "] expected + ["+ this.lang + "]");
|
||||
}
|
||||
this.id = parts[2];
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
class ApplySettings implements NodeSettingsService.Listener {
|
||||
@Override
|
||||
public void onRefreshSettings(Settings settings) {
|
||||
GroovyScriptEngineService engine = (GroovyScriptEngineService) ScriptService.this.scriptEngines.get("groovy");
|
||||
if (engine != null) {
|
||||
String[] patches = settings.getAsArray(GroovyScriptEngineService.GROOVY_SCRIPT_BLACKLIST_PATCH, Strings.EMPTY_ARRAY);
|
||||
boolean blacklistChanged = engine.addToBlacklist(patches);
|
||||
if (blacklistChanged) {
|
||||
logger.info("adding {} to [{}], new blacklisted methods: {}", patches,
|
||||
GroovyScriptEngineService.GROOVY_SCRIPT_BLACKLIST_PATCH, engine.blacklistAdditions());
|
||||
engine.reloadConfig();
|
||||
// Because the GroovyScriptEngineService knows nothing about the
|
||||
// cache, we need to clear it here if the setting changes
|
||||
ScriptService.this.clearCache();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Inject
|
||||
public ScriptService(Settings settings, Environment env, Set<ScriptEngineService> scriptEngines,
|
||||
ResourceWatcherService resourceWatcherService, NodeSettingsService nodeSettingsService) throws IOException {
|
||||
super(settings);
|
||||
|
||||
this.scriptEngines = scriptEngines;
|
||||
int cacheMaxSize = settings.getAsInt(SCRIPT_CACHE_SIZE_SETTING, 100);
|
||||
TimeValue cacheExpire = settings.getAsTime(SCRIPT_CACHE_EXPIRE_SETTING, null);
|
||||
logger.debug("using script cache with max_size [{}], expire [{}]", cacheMaxSize, cacheExpire);
|
||||
@ -249,13 +166,18 @@ public class ScriptService extends AbstractComponent {
|
||||
cacheBuilder.removalListener(new ScriptCacheRemovalListener());
|
||||
this.cache = cacheBuilder.build();
|
||||
|
||||
ImmutableMap.Builder<String, ScriptEngineService> builder = ImmutableMap.builder();
|
||||
ImmutableMap.Builder<String, ScriptEngineService> enginesByLangBuilder = ImmutableMap.builder();
|
||||
ImmutableMap.Builder<String, ScriptEngineService> enginesByExtBuilder = ImmutableMap.builder();
|
||||
for (ScriptEngineService scriptEngine : scriptEngines) {
|
||||
for (String type : scriptEngine.types()) {
|
||||
builder.put(type, scriptEngine);
|
||||
enginesByLangBuilder.put(type, scriptEngine);
|
||||
}
|
||||
for (String ext : scriptEngine.extensions()) {
|
||||
enginesByExtBuilder.put(ext, scriptEngine);
|
||||
}
|
||||
}
|
||||
this.scriptEngines = builder.build();
|
||||
this.scriptEnginesByLang = enginesByLangBuilder.build();
|
||||
this.scriptEnginesByExt = enginesByExtBuilder.build();
|
||||
|
||||
// add file watcher for static scripts
|
||||
scriptsDirectory = env.configFile().resolve("scripts");
|
||||
@ -281,18 +203,9 @@ public class ScriptService extends AbstractComponent {
|
||||
this.client = client;
|
||||
}
|
||||
|
||||
public void close() {
|
||||
for (ScriptEngineService engineService : scriptEngines.values()) {
|
||||
engineService.close();
|
||||
}
|
||||
}
|
||||
|
||||
public CompiledScript compile(String script) {
|
||||
return compile(defaultLang, script);
|
||||
}
|
||||
|
||||
public CompiledScript compile(String lang, String script) {
|
||||
return compile(lang, script, ScriptType.INLINE);
|
||||
@Override
|
||||
public void close() throws IOException {
|
||||
IOUtils.close(scriptEngines);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -310,77 +223,68 @@ public class ScriptService extends AbstractComponent {
|
||||
this.fileWatcher.clearState();
|
||||
}
|
||||
|
||||
private ScriptEngineService getScriptEngineServiceForLang(String lang) {
|
||||
ScriptEngineService scriptEngineService = scriptEnginesByLang.get(lang);
|
||||
if (scriptEngineService == null) {
|
||||
throw new ElasticsearchIllegalArgumentException("script_lang not supported [" + lang + "]");
|
||||
}
|
||||
return scriptEngineService;
|
||||
}
|
||||
|
||||
private ScriptEngineService getScriptEngineServiceForFileExt(String fileExtension) {
|
||||
ScriptEngineService scriptEngineService = scriptEnginesByExt.get(fileExtension);
|
||||
if (scriptEngineService == null) {
|
||||
throw new ElasticsearchIllegalArgumentException("script file extension not supported [" + fileExtension + "]");
|
||||
}
|
||||
return scriptEngineService;
|
||||
}
|
||||
|
||||
/**
|
||||
* Compiles a script straight-away, or returns the previously compiled and cached script, without checking if it can be executed based on settings.
|
||||
*/
|
||||
public CompiledScript compile(String lang, String script, ScriptType scriptType) {
|
||||
if (lang == null) {
|
||||
lang = defaultLang;
|
||||
}
|
||||
if (logger.isTraceEnabled()) {
|
||||
logger.trace("Compiling lang: [{}] type: [{}] script: {}", lang, scriptType, script);
|
||||
}
|
||||
|
||||
CacheKey cacheKey;
|
||||
CompiledScript compiled;
|
||||
ScriptEngineService scriptEngineService = getScriptEngineServiceForLang(lang);
|
||||
CacheKey cacheKey = newCacheKey(scriptEngineService, script);
|
||||
|
||||
if (lang == null) {
|
||||
lang = defaultLang;
|
||||
}
|
||||
|
||||
if(scriptType == ScriptType.INDEXED) {
|
||||
if (client == null) {
|
||||
throw new ElasticsearchIllegalArgumentException("Got an indexed script with no Client registered.");
|
||||
}
|
||||
|
||||
final IndexedScript indexedScript = new IndexedScript(lang, script);
|
||||
|
||||
verifyDynamicScripting(indexedScript.lang); //Since anyone can index a script, disable indexed scripting
|
||||
// if dynamic scripting is disabled, perhaps its own setting ?
|
||||
|
||||
script = getScriptFromIndex(client, indexedScript.lang, indexedScript.id);
|
||||
} else if (scriptType == ScriptType.FILE) {
|
||||
|
||||
compiled = staticCache.get(script); //On disk scripts will be loaded into the staticCache by the listener
|
||||
|
||||
if (compiled != null) {
|
||||
return compiled;
|
||||
} else {
|
||||
if (scriptType == ScriptType.FILE) {
|
||||
CompiledScript compiled = staticCache.get(cacheKey); //On disk scripts will be loaded into the staticCache by the listener
|
||||
if (compiled == null) {
|
||||
throw new ElasticsearchIllegalArgumentException("Unable to find on disk script " + script);
|
||||
}
|
||||
}
|
||||
|
||||
//This is an inline script check to see if we have it in the cache
|
||||
verifyDynamicScripting(lang);
|
||||
|
||||
cacheKey = new CacheKey(lang, script);
|
||||
|
||||
compiled = cache.getIfPresent(cacheKey);
|
||||
if (compiled != null) {
|
||||
return compiled;
|
||||
}
|
||||
|
||||
//Either an un-cached inline script or an indexed script
|
||||
verifyDynamicScripting(lang, scriptEngineService);
|
||||
|
||||
if (!dynamicScriptEnabled(lang)) {
|
||||
throw new ScriptException("dynamic scripting for [" + lang + "] disabled");
|
||||
if (scriptType == ScriptType.INDEXED) {
|
||||
if (client == null) {
|
||||
throw new ElasticsearchIllegalArgumentException("Got an indexed script with no Client registered.");
|
||||
}
|
||||
final IndexedScript indexedScript = new IndexedScript(lang, script);
|
||||
script = getScriptFromIndex(client, indexedScript.lang, indexedScript.id);
|
||||
}
|
||||
|
||||
// not the end of the world if we compile it twice...
|
||||
compiled = getCompiledScript(lang, script);
|
||||
//Since the cache key is the script content itself we don't need to
|
||||
//invalidate/check the cache if an indexed script changes.
|
||||
cache.put(cacheKey, compiled);
|
||||
|
||||
CompiledScript compiled = cache.getIfPresent(cacheKey);
|
||||
if (compiled == null) {
|
||||
//Either an un-cached inline script or an indexed script
|
||||
// not the end of the world if we compile it twice...
|
||||
compiled = new CompiledScript(lang, scriptEngineService.compile(script));
|
||||
//Since the cache key is the script content itself we don't need to
|
||||
//invalidate/check the cache if an indexed script changes.
|
||||
cache.put(cacheKey, compiled);
|
||||
}
|
||||
return compiled;
|
||||
}
|
||||
|
||||
private CompiledScript getCompiledScript(String lang, String script) {
|
||||
CompiledScript compiled;ScriptEngineService service = scriptEngines.get(lang);
|
||||
if (service == null) {
|
||||
throw new ElasticsearchIllegalArgumentException("script_lang not supported [" + lang + "]");
|
||||
}
|
||||
|
||||
compiled = new CompiledScript(lang, service.compile(script));
|
||||
return compiled;
|
||||
}
|
||||
|
||||
private void verifyDynamicScripting(String lang) {
|
||||
if (!dynamicScriptEnabled(lang)) {
|
||||
private void verifyDynamicScripting(String lang, ScriptEngineService scriptEngineService) {
|
||||
if (!dynamicScriptEnabled(lang, scriptEngineService)) {
|
||||
throw new ScriptException("dynamic scripting for [" + lang + "] disabled");
|
||||
}
|
||||
}
|
||||
@ -396,8 +300,8 @@ public class ScriptService extends AbstractComponent {
|
||||
private String validateScriptLanguage(String scriptLang) {
|
||||
if (scriptLang == null) {
|
||||
scriptLang = defaultLang;
|
||||
} else if (!scriptEngines.containsKey(scriptLang)) {
|
||||
throw new ElasticsearchIllegalArgumentException("script_lang not supported ["+scriptLang+"]");
|
||||
} else if (scriptEnginesByLang.containsKey(scriptLang) == false) {
|
||||
throw new ElasticsearchIllegalArgumentException("script_lang not supported [" + scriptLang + "]");
|
||||
}
|
||||
return scriptLang;
|
||||
}
|
||||
@ -421,7 +325,7 @@ public class ScriptService extends AbstractComponent {
|
||||
//Just try and compile it
|
||||
//This will have the benefit of also adding the script to the cache if it compiles
|
||||
try {
|
||||
CompiledScript compiledScript = compile(scriptLang, context.template(), ScriptService.ScriptType.INLINE);
|
||||
CompiledScript compiledScript = compile(scriptLang, context.template(), ScriptType.INLINE);
|
||||
if (compiledScript == null) {
|
||||
throw new ElasticsearchIllegalArgumentException("Unable to parse [" + context.template() +
|
||||
"] lang [" + scriptLang + "] (ScriptService.compile returned null)");
|
||||
@ -456,6 +360,7 @@ public class ScriptService extends AbstractComponent {
|
||||
client.delete(deleteRequest, listener);
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
public static String getScriptFromResponse(GetResponse responseFields) {
|
||||
Map<String, Object> source = responseFields.getSourceAsMap();
|
||||
if (source.containsKey("template")) {
|
||||
@ -484,37 +389,39 @@ public class ScriptService extends AbstractComponent {
|
||||
}
|
||||
}
|
||||
|
||||
public ExecutableScript executable(String lang, String script, ScriptType scriptType, Map vars) {
|
||||
/**
|
||||
* Compiles (or retrieves from cache) and executes the provided script
|
||||
*/
|
||||
public ExecutableScript executable(String lang, String script, ScriptType scriptType, Map<String, Object> vars) {
|
||||
return executable(compile(lang, script, scriptType), vars);
|
||||
}
|
||||
|
||||
public ExecutableScript executable(CompiledScript compiledScript, Map vars) {
|
||||
return scriptEngines.get(compiledScript.lang()).executable(compiledScript.compiled(), vars);
|
||||
}
|
||||
|
||||
public SearchScript search(CompiledScript compiledScript, SearchLookup lookup, @Nullable Map<String, Object> vars) {
|
||||
return scriptEngines.get(compiledScript.lang()).search(compiledScript.compiled(), lookup, vars);
|
||||
/**
|
||||
* Executes a previously compiled script provided as an argument
|
||||
*/
|
||||
public ExecutableScript executable(CompiledScript compiledScript, Map<String, Object> vars) {
|
||||
return getScriptEngineServiceForLang(compiledScript.lang()).executable(compiledScript.compiled(), vars);
|
||||
}
|
||||
|
||||
/**
|
||||
* Compiles (or retrieves from cache) and executes the provided search script
|
||||
*/
|
||||
public SearchScript search(SearchLookup lookup, String lang, String script, ScriptType scriptType, @Nullable Map<String, Object> vars) {
|
||||
return search(compile(lang, script, scriptType), lookup, vars);
|
||||
CompiledScript compiledScript = compile(lang, script, scriptType);
|
||||
return getScriptEngineServiceForLang(compiledScript.lang()).search(compiledScript.compiled(), lookup, vars);
|
||||
}
|
||||
|
||||
private boolean dynamicScriptEnabled(String lang) {
|
||||
ScriptEngineService service = scriptEngines.get(lang);
|
||||
if (service == null) {
|
||||
throw new ElasticsearchIllegalArgumentException("script_lang not supported [" + lang + "]");
|
||||
}
|
||||
|
||||
private boolean dynamicScriptEnabled(String lang, ScriptEngineService scriptEngineService) {
|
||||
// Templating languages (mustache) and native scripts are always
|
||||
// allowed, "native" executions are registered through plugins
|
||||
if (this.dynamicScriptingDisabled == DynamicScriptDisabling.EVERYTHING_ALLOWED || "native".equals(lang) || "mustache".equals(lang)) {
|
||||
if (this.dynamicScriptingDisabled == DynamicScriptDisabling.EVERYTHING_ALLOWED ||
|
||||
NativeScriptEngineService.NAME.equals(lang) || MustacheScriptEngineService.NAME.equals(lang)) {
|
||||
return true;
|
||||
} else if (this.dynamicScriptingDisabled == DynamicScriptDisabling.ONLY_DISK_ALLOWED) {
|
||||
return false;
|
||||
} else {
|
||||
return service.sandboxed();
|
||||
}
|
||||
if (this.dynamicScriptingDisabled == DynamicScriptDisabling.ONLY_DISK_ALLOWED) {
|
||||
return false;
|
||||
}
|
||||
return scriptEngineService.sandboxed();
|
||||
}
|
||||
|
||||
/**
|
||||
@ -529,7 +436,7 @@ public class ScriptService extends AbstractComponent {
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("notifying script services of script removal due to: [{}]", notification.getCause());
|
||||
}
|
||||
for (ScriptEngineService service : scriptEngines.values()) {
|
||||
for (ScriptEngineService service : scriptEngines) {
|
||||
try {
|
||||
service.scriptRemoved(notification.getValue());
|
||||
} catch (Exception e) {
|
||||
@ -562,27 +469,20 @@ public class ScriptService extends AbstractComponent {
|
||||
}
|
||||
Tuple<String, String> scriptNameExt = scriptNameExt(file);
|
||||
if (scriptNameExt != null) {
|
||||
boolean found = false;
|
||||
for (ScriptEngineService engineService : scriptEngines.values()) {
|
||||
for (String s : engineService.extensions()) {
|
||||
if (s.equals(scriptNameExt.v2())) {
|
||||
found = true;
|
||||
try {
|
||||
logger.info("compiling script file [{}]", file.toAbsolutePath());
|
||||
String script = Streams.copyToString(new InputStreamReader(Files.newInputStream(file), Charsets.UTF_8));
|
||||
staticCache.put(scriptNameExt.v1(), new CompiledScript(engineService.types()[0], engineService.compile(script)));
|
||||
} catch (Throwable e) {
|
||||
logger.warn("failed to load/compile script [{}]", e, scriptNameExt.v1());
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (found) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (!found) {
|
||||
ScriptEngineService engineService = getScriptEngineServiceForFileExt(scriptNameExt.v2());
|
||||
if (engineService == null) {
|
||||
logger.warn("no script engine found for [{}]", scriptNameExt.v2());
|
||||
} else {
|
||||
try {
|
||||
logger.info("compiling script file [{}]", file.toAbsolutePath());
|
||||
try(InputStreamReader reader = new InputStreamReader(Files.newInputStream(file), Charsets.UTF_8)) {
|
||||
String script = Streams.copyToString(reader);
|
||||
CacheKey cacheKey = newCacheKey(engineService, scriptNameExt.v1());
|
||||
staticCache.put(cacheKey, new CompiledScript(engineService.types()[0], engineService.compile(script)));
|
||||
}
|
||||
} catch (Throwable e) {
|
||||
logger.warn("failed to load/compile script [{}]", e, scriptNameExt.v1());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -596,8 +496,10 @@ public class ScriptService extends AbstractComponent {
|
||||
public void onFileDeleted(Path file) {
|
||||
Tuple<String, String> scriptNameExt = scriptNameExt(file);
|
||||
if (scriptNameExt != null) {
|
||||
ScriptEngineService engineService = getScriptEngineServiceForFileExt(scriptNameExt.v2());
|
||||
assert engineService != null;
|
||||
logger.info("removing script file [{}]", file.toAbsolutePath());
|
||||
staticCache.remove(scriptNameExt.v1());
|
||||
staticCache.remove(newCacheKey(engineService, scriptNameExt.v1()));
|
||||
}
|
||||
}
|
||||
|
||||
@ -608,7 +510,63 @@ public class ScriptService extends AbstractComponent {
|
||||
|
||||
}
|
||||
|
||||
public final static class CacheKey {
|
||||
/**
|
||||
* The type of a script, more specifically where it gets loaded from:
|
||||
* - provided dynamically at request time
|
||||
* - loaded from an index
|
||||
* - loaded from file
|
||||
*/
|
||||
public static enum ScriptType {
|
||||
|
||||
INLINE,
|
||||
INDEXED,
|
||||
FILE;
|
||||
|
||||
private static final int INLINE_VAL = 0;
|
||||
private static final int INDEXED_VAL = 1;
|
||||
private static final int FILE_VAL = 2;
|
||||
|
||||
public static ScriptType readFrom(StreamInput in) throws IOException {
|
||||
int scriptTypeVal = in.readVInt();
|
||||
switch (scriptTypeVal) {
|
||||
case INDEXED_VAL:
|
||||
return INDEXED;
|
||||
case INLINE_VAL:
|
||||
return INLINE;
|
||||
case FILE_VAL:
|
||||
return FILE;
|
||||
default:
|
||||
throw new ElasticsearchIllegalArgumentException("Unexpected value read for ScriptType got [" + scriptTypeVal +
|
||||
"] expected one of [" + INLINE_VAL + "," + INDEXED_VAL + "," + FILE_VAL + "]");
|
||||
}
|
||||
}
|
||||
|
||||
public static void writeTo(ScriptType scriptType, StreamOutput out) throws IOException{
|
||||
if (scriptType != null) {
|
||||
switch (scriptType){
|
||||
case INDEXED:
|
||||
out.writeVInt(INDEXED_VAL);
|
||||
return;
|
||||
case INLINE:
|
||||
out.writeVInt(INLINE_VAL);
|
||||
return;
|
||||
case FILE:
|
||||
out.writeVInt(FILE_VAL);
|
||||
return;
|
||||
default:
|
||||
throw new ElasticsearchIllegalStateException("Unknown ScriptType " + scriptType);
|
||||
}
|
||||
} else {
|
||||
out.writeVInt(INLINE_VAL); //Default to inline
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private static CacheKey newCacheKey(ScriptEngineService engineService, String script) {
|
||||
return new CacheKey(engineService.types()[0], script);
|
||||
}
|
||||
|
||||
private static class CacheKey {
|
||||
public final String lang;
|
||||
public final String script;
|
||||
|
||||
@ -631,4 +589,46 @@ public class ScriptService extends AbstractComponent {
|
||||
return lang.hashCode() + 31 * script.hashCode();
|
||||
}
|
||||
}
|
||||
|
||||
private static class IndexedScript {
|
||||
private final String lang;
|
||||
private final String id;
|
||||
|
||||
IndexedScript(String lang, String script) {
|
||||
this.lang = lang;
|
||||
final String[] parts = script.split("/");
|
||||
if (parts.length == 1) {
|
||||
this.id = script;
|
||||
} else {
|
||||
if (parts.length != 3) {
|
||||
throw new ElasticsearchIllegalArgumentException("Illegal index script format [" + script + "]" +
|
||||
" should be /lang/id");
|
||||
} else {
|
||||
if (!parts[1].equals(this.lang)) {
|
||||
throw new ElasticsearchIllegalStateException("Conflicting script language, found [" + parts[1] + "] expected + ["+ this.lang + "]");
|
||||
}
|
||||
this.id = parts[2];
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private class ApplySettings implements NodeSettingsService.Listener {
|
||||
@Override
|
||||
public void onRefreshSettings(Settings settings) {
|
||||
GroovyScriptEngineService engine = (GroovyScriptEngineService) ScriptService.this.scriptEnginesByLang.get(GroovyScriptEngineService.NAME);
|
||||
if (engine != null) {
|
||||
String[] patches = settings.getAsArray(GroovyScriptEngineService.GROOVY_SCRIPT_BLACKLIST_PATCH, Strings.EMPTY_ARRAY);
|
||||
boolean blacklistChanged = engine.addToBlacklist(patches);
|
||||
if (blacklistChanged) {
|
||||
logger.info("adding {} to [{}], new blacklisted methods: {}", patches,
|
||||
GroovyScriptEngineService.GROOVY_SCRIPT_BLACKLIST_PATCH, engine.blacklistAdditions());
|
||||
engine.reloadConfig();
|
||||
// Because the GroovyScriptEngineService knows nothing about the
|
||||
// cache, we need to clear it here if the setting changes
|
||||
ScriptService.this.clearCache();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -20,8 +20,6 @@ package org.elasticsearch.script;
|
||||
|
||||
import org.elasticsearch.common.lucene.ReaderContextAware;
|
||||
import org.elasticsearch.common.lucene.ScorerAware;
|
||||
import org.elasticsearch.search.internal.SearchContext;
|
||||
import org.elasticsearch.search.lookup.SearchLookup;
|
||||
|
||||
import java.util.Map;
|
||||
|
||||
@ -39,36 +37,4 @@ public interface SearchScript extends ExecutableScript, ReaderContextAware, Scor
|
||||
long runAsLong();
|
||||
|
||||
double runAsDouble();
|
||||
|
||||
public static class Builder {
|
||||
|
||||
private String script;
|
||||
private ScriptService.ScriptType scriptType;
|
||||
private String lang;
|
||||
private Map<String, Object> params;
|
||||
|
||||
public Builder script(String script, ScriptService.ScriptType scriptType) {
|
||||
this.script = script;
|
||||
this.scriptType = scriptType;
|
||||
return this;
|
||||
}
|
||||
|
||||
public Builder lang(String lang) {
|
||||
this.lang = lang;
|
||||
return this;
|
||||
}
|
||||
|
||||
public Builder params(Map<String, Object> params) {
|
||||
this.params = params;
|
||||
return this;
|
||||
}
|
||||
|
||||
public SearchScript build(SearchContext context) {
|
||||
return build(context.scriptService(), context.lookup());
|
||||
}
|
||||
|
||||
public SearchScript build(ScriptService service, SearchLookup lookup) {
|
||||
return service.search(lookup, lang, script, scriptType, params);
|
||||
}
|
||||
}
|
||||
}
|
@ -48,6 +48,8 @@ import java.util.Map;
|
||||
*/
|
||||
public class ExpressionScriptEngineService extends AbstractComponent implements ScriptEngineService {
|
||||
|
||||
public static final String NAME = "expression";
|
||||
|
||||
@Inject
|
||||
public ExpressionScriptEngineService(Settings settings) {
|
||||
super(settings);
|
||||
@ -55,12 +57,12 @@ public class ExpressionScriptEngineService extends AbstractComponent implements
|
||||
|
||||
@Override
|
||||
public String[] types() {
|
||||
return new String[]{"expression"};
|
||||
return new String[]{NAME};
|
||||
}
|
||||
|
||||
@Override
|
||||
public String[] extensions() {
|
||||
return new String[]{"expression"};
|
||||
return new String[]{NAME};
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -48,10 +48,7 @@ import org.elasticsearch.search.lookup.SearchLookup;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.math.BigDecimal;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.*;
|
||||
import java.util.concurrent.atomic.AtomicLong;
|
||||
|
||||
/**
|
||||
@ -59,6 +56,7 @@ import java.util.concurrent.atomic.AtomicLong;
|
||||
*/
|
||||
public class GroovyScriptEngineService extends AbstractComponent implements ScriptEngineService {
|
||||
|
||||
public static final String NAME = "groovy";
|
||||
public static String GROOVY_SCRIPT_SANDBOX_ENABLED = "script.groovy.sandbox.enabled";
|
||||
public static String GROOVY_SCRIPT_BLACKLIST_PATCH = "script.groovy.sandbox.method_blacklist_patch";
|
||||
|
||||
@ -85,9 +83,7 @@ public class GroovyScriptEngineService extends AbstractComponent implements Scri
|
||||
*/
|
||||
public boolean addToBlacklist(String... additions) {
|
||||
Set<String> newBlackList = new HashSet<>(blacklistAdditions);
|
||||
for (String addition : additions) {
|
||||
newBlackList.add(addition);
|
||||
}
|
||||
Collections.addAll(newBlackList, additions);
|
||||
boolean changed = this.blacklistAdditions.equals(newBlackList) == false;
|
||||
this.blacklistAdditions = ImmutableSet.copyOf(newBlackList);
|
||||
return changed;
|
||||
@ -120,7 +116,7 @@ public class GroovyScriptEngineService extends AbstractComponent implements Scri
|
||||
@Override
|
||||
public void scriptRemoved(@Nullable CompiledScript script) {
|
||||
// script could be null, meaning the script has already been garbage collected
|
||||
if (script == null || "groovy".equals(script.lang())) {
|
||||
if (script == null || NAME.equals(script.lang())) {
|
||||
// Clear the cache, this removes old script versions from the
|
||||
// cache to prevent running out of PermGen space
|
||||
loader.clearCache();
|
||||
@ -129,12 +125,12 @@ public class GroovyScriptEngineService extends AbstractComponent implements Scri
|
||||
|
||||
@Override
|
||||
public String[] types() {
|
||||
return new String[]{"groovy"};
|
||||
return new String[]{NAME};
|
||||
}
|
||||
|
||||
@Override
|
||||
public String[] extensions() {
|
||||
return new String[]{"groovy"};
|
||||
return new String[]{NAME};
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -157,6 +153,7 @@ public class GroovyScriptEngineService extends AbstractComponent implements Scri
|
||||
/**
|
||||
* Return a script object with the given vars from the compiled script object
|
||||
*/
|
||||
@SuppressWarnings("unchecked")
|
||||
private Script createScript(Object compiledScript, Map<String, Object> vars) throws InstantiationException, IllegalAccessException {
|
||||
Class scriptClass = (Class) compiledScript;
|
||||
Script scriptObject = (Script) scriptClass.newInstance();
|
||||
@ -225,12 +222,12 @@ public class GroovyScriptEngineService extends AbstractComponent implements Scri
|
||||
private final SearchLookup lookup;
|
||||
private final Map<String, Object> variables;
|
||||
private final ESLogger logger;
|
||||
private Scorer scorer;
|
||||
|
||||
public GroovyScript(Script script, ESLogger logger) {
|
||||
this(script, null, logger);
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
public GroovyScript(Script script, @Nullable SearchLookup lookup, ESLogger logger) {
|
||||
this.script = script;
|
||||
this.lookup = lookup;
|
||||
@ -240,7 +237,6 @@ public class GroovyScriptEngineService extends AbstractComponent implements Scri
|
||||
|
||||
@Override
|
||||
public void setScorer(Scorer scorer) {
|
||||
this.scorer = scorer;
|
||||
this.variables.put("_score", new ScoreAccessor(scorer));
|
||||
}
|
||||
|
||||
|
@ -47,6 +47,8 @@ import java.util.Map;
|
||||
*/
|
||||
public class MustacheScriptEngineService extends AbstractComponent implements ScriptEngineService {
|
||||
|
||||
public static final String NAME = "mustache";
|
||||
|
||||
/** Thread local UTF8StreamWriter to store template execution results in, thread local to save object creation.*/
|
||||
private static ThreadLocal<SoftReference<UTF8StreamWriter>> utf8StreamWriter = new ThreadLocal<>();
|
||||
|
||||
@ -116,12 +118,12 @@ public class MustacheScriptEngineService extends AbstractComponent implements Sc
|
||||
|
||||
@Override
|
||||
public String[] types() {
|
||||
return new String[] {"mustache"};
|
||||
return new String[] {NAME};
|
||||
}
|
||||
|
||||
@Override
|
||||
public String[] extensions() {
|
||||
return new String[] {"mustache"};
|
||||
return new String[] {NAME};
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -172,7 +174,7 @@ public class MustacheScriptEngineService extends AbstractComponent implements Sc
|
||||
public MustacheExecutableScript(Mustache mustache,
|
||||
Map<String, Object> vars) {
|
||||
this.mustache = mustache;
|
||||
this.vars = vars == null ? Collections.EMPTY_MAP : vars;
|
||||
this.vars = vars == null ? Collections.<String, Object>emptyMap() : vars;
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -184,7 +186,7 @@ public class MustacheScriptEngineService extends AbstractComponent implements Sc
|
||||
public Object run() {
|
||||
BytesStreamOutput result = new BytesStreamOutput();
|
||||
UTF8StreamWriter writer = utf8StreamWriter().setOutput(result);
|
||||
((Mustache) mustache).execute(writer, vars);
|
||||
mustache.execute(writer, vars);
|
||||
try {
|
||||
writer.flush();
|
||||
} catch (IOException e) {
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user