Merge branch 'master' into feature/aggs_2_0
This commit is contained in:
commit
63f3281f12
|
@ -96,7 +96,7 @@ if [ "x$ES_INCLUDE" = "x" ]; then
|
|||
/usr/local/share/elasticsearch/elasticsearch.in.sh \
|
||||
/opt/elasticsearch/elasticsearch.in.sh \
|
||||
~/.elasticsearch.in.sh \
|
||||
$ES_HOME/bin/elasticsearch.in.sh \
|
||||
"$ES_HOME/bin/elasticsearch.in.sh" \
|
||||
"`dirname "$0"`"/elasticsearch.in.sh; do
|
||||
if [ -r "$include" ]; then
|
||||
. "$include"
|
||||
|
@ -151,13 +151,13 @@ launch_service()
|
|||
# The es-foreground option will tell Elasticsearch not to close stdout/stderr, but it's up to us not to daemonize.
|
||||
if [ "x$daemonized" = "x" ]; then
|
||||
es_parms="$es_parms -Des.foreground=yes"
|
||||
exec "$JAVA" $JAVA_OPTS $ES_JAVA_OPTS $es_parms -Des.path.home="$ES_HOME" -cp "$ES_CLASSPATH" $props \
|
||||
eval exec "$JAVA" $JAVA_OPTS $ES_JAVA_OPTS $es_parms "\"-Des.path.home=$ES_HOME\"" -cp "\"$ES_CLASSPATH\"" $props \
|
||||
org.elasticsearch.bootstrap.Elasticsearch
|
||||
# exec without running it in the background, makes it replace this shell, we'll never get here...
|
||||
# no need to return something
|
||||
else
|
||||
# Startup Elasticsearch, background it, and write the pid.
|
||||
exec "$JAVA" $JAVA_OPTS $ES_JAVA_OPTS $es_parms -Des.path.home="$ES_HOME" -cp "$ES_CLASSPATH" $props \
|
||||
eval exec "$JAVA" $JAVA_OPTS $ES_JAVA_OPTS $es_parms "\"-Des.path.home=$ES_HOME\"" -cp "\"$ES_CLASSPATH\"" $props \
|
||||
org.elasticsearch.bootstrap.Elasticsearch <&- &
|
||||
return $?
|
||||
fi
|
||||
|
@ -207,7 +207,7 @@ eval set -- "$args"
|
|||
while true; do
|
||||
case $1 in
|
||||
-v)
|
||||
"$JAVA" $JAVA_OPTS $ES_JAVA_OPTS $es_parms -Des.path.home="$ES_HOME" -cp "$ES_CLASSPATH" $props \
|
||||
eval "$JAVA" $JAVA_OPTS $ES_JAVA_OPTS $es_parms "\"-Des.path.home=$ES_HOME\"" -cp "\"$ES_CLASSPATH\"" $props \
|
||||
org.elasticsearch.Version
|
||||
exit 0
|
||||
;;
|
||||
|
|
|
@ -59,12 +59,19 @@ set JAVA_OPTS=%JAVA_OPTS% -XX:+UseCMSInitiatingOccupancyOnly
|
|||
REM When running under Java 7
|
||||
REM JAVA_OPTS=%JAVA_OPTS% -XX:+UseCondCardMark
|
||||
|
||||
if NOT "%ES_USE_GC_LOGGING%" == "" set JAVA_OPTS=%JAVA_OPTS% -XX:+PrintGCDetails
|
||||
if NOT "%ES_USE_GC_LOGGING%" == "" set JAVA_OPTS=%JAVA_OPTS% -XX:+PrintGCTimeStamps
|
||||
if NOT "%ES_USE_GC_LOGGING%" == "" set JAVA_OPTS=%JAVA_OPTS% -XX:+PrintClassHistogram
|
||||
if NOT "%ES_USE_GC_LOGGING%" == "" set JAVA_OPTS=%JAVA_OPTS% -XX:+PrintTenuringDistribution
|
||||
if NOT "%ES_USE_GC_LOGGING%" == "" set JAVA_OPTS=%JAVA_OPTS% -XX:+PrintGCApplicationStoppedTime
|
||||
if NOT "%ES_USE_GC_LOGGING%" == "" set JAVA_OPTS=%JAVA_OPTS% -Xloggc:%ES_HOME%/logs/gc.log
|
||||
if "%ES_GC_LOG_FILE%" == "" goto nogclog
|
||||
|
||||
:gclog
|
||||
set JAVA_OPTS=%JAVA_OPTS% -XX:+PrintGCDetails
|
||||
set JAVA_OPTS=%JAVA_OPTS% -XX:+PrintGCTimeStamps
|
||||
set JAVA_OPTS=%JAVA_OPTS% -XX:+PrintClassHistogram
|
||||
set JAVA_OPTS=%JAVA_OPTS% -XX:+PrintTenuringDistribution
|
||||
set JAVA_OPTS=%JAVA_OPTS% -XX:+PrintGCApplicationStoppedTime
|
||||
set JAVA_OPTS=%JAVA_OPTS% -Xloggc:%ES_GC_LOG_FILE%
|
||||
for %%F in ("%ES_GC_LOG_FILE%") do set ES_GC_LOG_FILE_DIRECTORY=%%~dpF
|
||||
if NOT EXIST "%ES_GC_LOG_FILE_DIRECTORY%\." mkdir "%ES_GC_LOG_FILE_DIRECTORY%"
|
||||
|
||||
:nogclog
|
||||
|
||||
REM Causes the JVM to dump its heap on OutOfMemory.
|
||||
set JAVA_OPTS=%JAVA_OPTS% -XX:+HeapDumpOnOutOfMemoryError
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
#!/bin/sh
|
||||
|
||||
ES_CLASSPATH=$ES_CLASSPATH:$ES_HOME/lib/${project.build.finalName}.jar:$ES_HOME/lib/*:$ES_HOME/lib/sigar/*
|
||||
ES_CLASSPATH="$ES_CLASSPATH:$ES_HOME/lib/${project.build.finalName}.jar:$ES_HOME/lib/*:$ES_HOME/lib/sigar/*"
|
||||
|
||||
if [ "x$ES_MIN_MEM" = "x" ]; then
|
||||
ES_MIN_MEM=256m
|
||||
|
@ -45,13 +45,16 @@ JAVA_OPTS="$JAVA_OPTS -XX:CMSInitiatingOccupancyFraction=75"
|
|||
JAVA_OPTS="$JAVA_OPTS -XX:+UseCMSInitiatingOccupancyOnly"
|
||||
|
||||
# GC logging options
|
||||
if [ "x$ES_USE_GC_LOGGING" != "x" ]; then
|
||||
if [ -n "$ES_GC_LOG_FILE" ]; then
|
||||
JAVA_OPTS="$JAVA_OPTS -XX:+PrintGCDetails"
|
||||
JAVA_OPTS="$JAVA_OPTS -XX:+PrintGCTimeStamps"
|
||||
JAVA_OPTS="$JAVA_OPTS -XX:+PrintClassHistogram"
|
||||
JAVA_OPTS="$JAVA_OPTS -XX:+PrintTenuringDistribution"
|
||||
JAVA_OPTS="$JAVA_OPTS -XX:+PrintGCApplicationStoppedTime"
|
||||
JAVA_OPTS="$JAVA_OPTS -Xloggc:/var/log/elasticsearch/gc.log"
|
||||
JAVA_OPTS="$JAVA_OPTS \"-Xloggc:$ES_GC_LOG_FILE\""
|
||||
|
||||
# Ensure that the directory for the log file exists: the JVM will not create it.
|
||||
mkdir -p "`dirname \"$ES_GC_LOG_FILE\"`"
|
||||
fi
|
||||
|
||||
# Causes the JVM to dump its heap on OutOfMemory.
|
||||
|
|
|
@ -61,7 +61,9 @@ Once it's done it will print all the remaining steps.
|
|||
"""
|
||||
env = os.environ
|
||||
|
||||
PLUGINS = [('bigdesk', 'lukas-vlcek/bigdesk'),
|
||||
PLUGINS = [('license', 'elasticsearch/license/latest'),
|
||||
('marvel', 'elasticsearch/marvel/latest'),
|
||||
('bigdesk', 'lukas-vlcek/bigdesk'),
|
||||
('paramedic', 'karmi/elasticsearch-paramedic'),
|
||||
('segmentspy', 'polyfractal/elasticsearch-segmentspy'),
|
||||
('inquisitor', 'polyfractal/elasticsearch-inquisitor'),
|
||||
|
|
|
@ -186,3 +186,49 @@ curl -XGET 'localhost:9200/index/t1,t2/_search'
|
|||
}
|
||||
---------------
|
||||
|
||||
==== Removed short name field access
|
||||
Field names in queries, aggregations, etc. must now use the complete name. Use of the short name
|
||||
caused ambiguities in field lookups when the same name existed within multiple object mappings.
|
||||
|
||||
The following example illustrates the difference between 1.x and 2.0.
|
||||
|
||||
Given these mappings:
|
||||
---------------
|
||||
curl -XPUT 'localhost:9200/index'
|
||||
{
|
||||
"mappings": {
|
||||
"type": {
|
||||
"properties": {
|
||||
"name": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"first": {"type": "string"},
|
||||
"last": {"type": "string"}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
---------------
|
||||
|
||||
The following query was possible in 1.x:
|
||||
---------------
|
||||
curl -XGET 'localhost:9200/index/type/_search'
|
||||
{
|
||||
"query": {
|
||||
"match": { "first": "foo" }
|
||||
}
|
||||
}
|
||||
---------------
|
||||
|
||||
In 2.0, the same query should now be:
|
||||
---------------
|
||||
curl -XGET 'localhost:9200/index/type/_search'
|
||||
{
|
||||
"query": {
|
||||
"match": { "name.first": "foo" }
|
||||
}
|
||||
}
|
||||
---------------
|
||||
|
||||
|
|
|
@ -172,7 +172,7 @@ Ranges with one side unbounded can use the following syntax:
|
|||
To combine an upper and lower bound with the simplified syntax, you
|
||||
would need to join two clauses with an `AND` operator:
|
||||
|
||||
age:(>=10 AND < 20)
|
||||
age:(>=10 AND <20)
|
||||
age:(+>=10 +<20)
|
||||
|
||||
===================================================================
|
||||
|
|
|
@ -26,6 +26,7 @@ Each package features a configuration file, which allows you to set the followin
|
|||
`CONF_FILE`:: Path to configuration file, defaults to `/etc/elasticsearch/elasticsearch.yml`
|
||||
`ES_JAVA_OPTS`:: Any additional java options you may want to apply. This may be useful, if you need to set the `node.name` property, but do not want to change the `elasticsearch.yml` configuration file, because it is distributed via a provisioning system like puppet or chef. Example: `ES_JAVA_OPTS="-Des.node.name=search-01"`
|
||||
`RESTART_ON_UPGRADE`:: Configure restart on package upgrade, defaults to `false`. This means you will have to restart your elasticsearch instance after installing a package manually. The reason for this is to ensure, that upgrades in a cluster do not result in a continuous shard reallocation resulting in high network traffic and reducing the response times of your cluster.
|
||||
`ES_GC_LOG_FILE` :: The absolute log file path for creating a garbage collection logfile, which is done by the JVM. Note that this logfile can grow pretty quick and thus is disabled by default.
|
||||
|
||||
[float]
|
||||
==== Debian/Ubuntu
|
||||
|
|
6
pom.xml
6
pom.xml
|
@ -219,7 +219,7 @@
|
|||
<artifactId>joda-time</artifactId>
|
||||
<!-- joda 2.0 moved to using volatile fields for datetime -->
|
||||
<!-- When updating to a new version, make sure to update our copy of BaseDateTime -->
|
||||
<version>2.3</version>
|
||||
<version>2.7</version>
|
||||
<scope>compile</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
|
@ -948,7 +948,7 @@
|
|||
</data>
|
||||
<data>
|
||||
<src>${project.build.directory}/lib</src>
|
||||
<includes>lucene*, *log4j*, jna*, spatial4j*, jts*, groovy*</includes>
|
||||
<includes>lucene*, *log4j*, jna*, spatial4j*, jts*, groovy*, antlr-runtime*, asm*</includes>
|
||||
<type>directory</type>
|
||||
<mapper>
|
||||
<type>perm</type>
|
||||
|
@ -1154,6 +1154,8 @@
|
|||
<include>spatial4j*</include>
|
||||
<include>jts*</include>
|
||||
<include>groovy*</include>
|
||||
<include>antlr-runtime*</include>
|
||||
<include>asm*</include>
|
||||
</includes>
|
||||
</source>
|
||||
<source>
|
||||
|
|
|
@ -7,12 +7,16 @@
|
|||
"paths": ["/_template/{name}"],
|
||||
"parts": {
|
||||
"name": {
|
||||
"type" : "string",
|
||||
"required" : true,
|
||||
"description" : "The name of the template"
|
||||
"type": "string",
|
||||
"required": true,
|
||||
"description": "The name of the template"
|
||||
}
|
||||
},
|
||||
"params": {
|
||||
"master_timeout": {
|
||||
"type": "time",
|
||||
"description": "Explicit operation timeout for connection to master node"
|
||||
},
|
||||
"local": {
|
||||
"type": "boolean",
|
||||
"description": "Return local information, do not retrieve the state from master node (default: false)"
|
||||
|
|
|
@ -4,12 +4,15 @@
|
|||
"methods": ["GET"],
|
||||
"url": {
|
||||
"path": "/_template/{name}",
|
||||
"paths": ["/_template", "/_template/{name}"],
|
||||
"paths": [
|
||||
"/_template",
|
||||
"/_template/{name}"
|
||||
],
|
||||
"parts": {
|
||||
"name": {
|
||||
"type" : "string",
|
||||
"required" : false,
|
||||
"description" : "The name of the template"
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "The name of the template"
|
||||
}
|
||||
},
|
||||
"params": {
|
||||
|
@ -17,6 +20,10 @@
|
|||
"type": "boolean",
|
||||
"description": "Return settings in flat format (default: false)"
|
||||
},
|
||||
"master_timeout": {
|
||||
"type": "time",
|
||||
"description": "Explicit operation timeout for connection to master node"
|
||||
},
|
||||
"local": {
|
||||
"type": "boolean",
|
||||
"description": "Return local information, do not retrieve the state from master node (default: false)"
|
||||
|
|
|
@ -24,6 +24,14 @@
|
|||
---
|
||||
"Test cat segments output":
|
||||
|
||||
- do:
|
||||
cat.segments:
|
||||
v: false
|
||||
|
||||
- match:
|
||||
$body: |
|
||||
/^$/
|
||||
|
||||
- do:
|
||||
indices.create:
|
||||
index: index1
|
||||
|
@ -39,14 +47,14 @@
|
|||
refresh: true
|
||||
- do:
|
||||
cluster.health:
|
||||
wait_for_status: yellow
|
||||
wait_for_status: green
|
||||
- do:
|
||||
cat.segments:
|
||||
v: false
|
||||
- match:
|
||||
$body: |
|
||||
/^(index1 \s+ \d \s+ (p|r) \s+ \d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3} \s+ _\d (\s\d){3} \s+
|
||||
(\d+|\d+[.]\d+)(kb|b) \s+ \d+ (\s+ (false|true)){2} \s+ \d+\.\d+\.\d+ \s+ (false|true) \s? \n?)$/
|
||||
(\d+|\d+[.]\d+)(kb|b) \s+ \d+ (\s+ (false|true)){2} \s+ \d+\.\d+(\.\d+)? \s+ (false|true) \s? \n?)$/
|
||||
|
||||
- do:
|
||||
indices.create:
|
||||
|
@ -57,7 +65,7 @@
|
|||
number_of_replicas: "0"
|
||||
- do:
|
||||
cluster.health:
|
||||
wait_for_status: yellow
|
||||
wait_for_status: green
|
||||
wait_for_relocating_shards: 0
|
||||
|
||||
- do:
|
||||
|
@ -68,7 +76,7 @@
|
|||
refresh: true
|
||||
- do:
|
||||
cluster.health:
|
||||
wait_for_status: yellow
|
||||
wait_for_status: green
|
||||
|
||||
|
||||
- do:
|
||||
|
@ -85,3 +93,28 @@
|
|||
- match:
|
||||
$body: |
|
||||
/^(index2 .+ \n?)$/
|
||||
|
||||
---
|
||||
"Test cat segments on closed index behaviour":
|
||||
|
||||
- do:
|
||||
indices.create:
|
||||
index: index1
|
||||
body:
|
||||
settings:
|
||||
number_of_shards: "1"
|
||||
number_of_replicas: "0"
|
||||
|
||||
- do:
|
||||
cluster.health:
|
||||
wait_for_status: green
|
||||
|
||||
- do:
|
||||
indices.close:
|
||||
index: index1
|
||||
|
||||
- do:
|
||||
catch: forbidden
|
||||
cat.segments:
|
||||
index: index1
|
||||
v: false
|
||||
|
|
|
@ -24,6 +24,7 @@ setup:
|
|||
- do:
|
||||
indices.exists_template:
|
||||
name: test
|
||||
master_timeout: 1m
|
||||
|
||||
- is_true: ''
|
||||
|
||||
|
|
|
@ -46,11 +46,12 @@ setup:
|
|||
- is_true: test
|
||||
|
||||
---
|
||||
"Get template with flat settings":
|
||||
"Get template with flat settings and master timeout":
|
||||
|
||||
- do:
|
||||
indices.get_template:
|
||||
name: test
|
||||
flat_settings: true
|
||||
master_timeout: 1m
|
||||
|
||||
- match: {test.settings: {index.number_of_shards: '1', index.number_of_replicas: '0'}}
|
||||
|
|
|
@ -1,5 +1,75 @@
|
|||
---
|
||||
"segments test":
|
||||
"no segments test":
|
||||
- do:
|
||||
indices.segments:
|
||||
allow_no_indices: true
|
||||
|
||||
- match: { _shards.total: 0}
|
||||
- match: { indices: {}}
|
||||
|
||||
- do:
|
||||
catch: missing
|
||||
indices.segments:
|
||||
allow_no_indices: false
|
||||
|
||||
---
|
||||
"basic segments test":
|
||||
|
||||
- do:
|
||||
indices.create:
|
||||
index: index1
|
||||
body:
|
||||
settings:
|
||||
number_of_shards: "1"
|
||||
number_of_replicas: "0"
|
||||
- do:
|
||||
index:
|
||||
index: index1
|
||||
type: type
|
||||
body: { foo: bar }
|
||||
refresh: true
|
||||
|
||||
- do:
|
||||
cluster.health:
|
||||
wait_for_status: green
|
||||
|
||||
- do:
|
||||
indices.segments:
|
||||
index: index1
|
||||
|
||||
- match: { _shards.total: 1}
|
||||
- match: { indices.index1.shards.0.0.routing.primary: true}
|
||||
- match: { indices.index1.shards.0.0.segments._0.num_docs: 1}
|
||||
|
||||
---
|
||||
"closed segments test":
|
||||
|
||||
- do:
|
||||
indices.create:
|
||||
index: index1
|
||||
body:
|
||||
settings:
|
||||
number_of_shards: "1"
|
||||
number_of_replicas: "0"
|
||||
- do:
|
||||
index:
|
||||
index: index1
|
||||
type: type
|
||||
body: { foo: bar }
|
||||
refresh: true
|
||||
|
||||
- do:
|
||||
indices.close:
|
||||
index: index1
|
||||
|
||||
- do:
|
||||
catch: forbidden
|
||||
indices.segments:
|
||||
index: index1
|
||||
|
||||
- do:
|
||||
indices.segments:
|
||||
index: index1
|
||||
ignore_unavailable: true
|
||||
|
||||
- match: { _shards.total: 0}
|
||||
|
|
|
@ -1,6 +1,9 @@
|
|||
---
|
||||
"Indexed script":
|
||||
|
||||
- skip:
|
||||
features: groovy_scripting
|
||||
|
||||
- do:
|
||||
put_script:
|
||||
id: "1"
|
||||
|
|
|
@ -1,6 +1,10 @@
|
|||
---
|
||||
"External version":
|
||||
|
||||
- skip:
|
||||
features: groovy_scripting
|
||||
|
||||
|
||||
- do:
|
||||
put_script:
|
||||
lang: groovy
|
||||
|
|
|
@ -1,6 +1,9 @@
|
|||
---
|
||||
"Script":
|
||||
|
||||
- skip:
|
||||
features: groovy_scripting
|
||||
|
||||
- do:
|
||||
index:
|
||||
index: test_1
|
||||
|
|
|
@ -1,6 +1,9 @@
|
|||
---
|
||||
"Script upsert":
|
||||
|
||||
- skip:
|
||||
features: groovy_scripting
|
||||
|
||||
- do:
|
||||
update:
|
||||
index: test_1
|
||||
|
|
|
@ -20,6 +20,9 @@
|
|||
---
|
||||
"Missing document (script)":
|
||||
|
||||
- skip:
|
||||
features: groovy_scripting
|
||||
|
||||
|
||||
- do:
|
||||
catch: missing
|
||||
|
|
|
@ -42,3 +42,6 @@
|
|||
|
||||
# Configure restart on package upgrade (true, every other setting will lead to not restarting)
|
||||
#RESTART_ON_UPGRADE=true
|
||||
|
||||
# Path to the GC log file
|
||||
#ES_GC_LOG_FILE=/var/log/elasticsearch/gc.log
|
||||
|
|
|
@ -94,6 +94,9 @@ CONF_FILE=$CONF_DIR/elasticsearch.yml
|
|||
# Maximum number of VMA (Virtual Memory Areas) a process can own
|
||||
MAX_MAP_COUNT=262144
|
||||
|
||||
# Path to the GC log file
|
||||
#ES_GC_LOG_FILE=/var/log/elasticsearch/gc.log
|
||||
|
||||
# End of variables that can be overwritten in $DEFAULT
|
||||
|
||||
# overwrite settings from default file
|
||||
|
@ -110,6 +113,7 @@ export ES_HEAP_SIZE
|
|||
export ES_HEAP_NEWSIZE
|
||||
export ES_DIRECT_SIZE
|
||||
export ES_JAVA_OPTS
|
||||
export ES_GC_LOG_FILE
|
||||
|
||||
# Check DAEMON exists
|
||||
test -x $DAEMON || exit 0
|
||||
|
|
|
@ -37,7 +37,6 @@ public class IndicesSegmentsRequest extends BroadcastOperationRequest<IndicesSeg
|
|||
|
||||
public IndicesSegmentsRequest(String... indices) {
|
||||
super(indices);
|
||||
indicesOptions(IndicesOptions.fromOptions(false, false, true, false));
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -33,11 +33,9 @@ import org.elasticsearch.common.util.concurrent.AbstractRunnable;
|
|||
import org.elasticsearch.env.NodeEnvironment;
|
||||
import org.elasticsearch.env.ShardLock;
|
||||
import org.elasticsearch.index.Index;
|
||||
import org.elasticsearch.indices.IndicesService;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.*;
|
||||
|
||||
import java.io.Closeable;
|
||||
import java.io.IOException;
|
||||
import java.util.List;
|
||||
import java.util.concurrent.CopyOnWriteArrayList;
|
||||
|
@ -87,6 +85,10 @@ public class NodeIndexDeletedAction extends AbstractComponent {
|
|||
@Override
|
||||
protected void doRun() throws Exception {
|
||||
innerNodeIndexDeleted(index, nodeId);
|
||||
if (nodes.localNode().isDataNode() == false) {
|
||||
logger.trace("[{}] not acking store deletion (not a data node)");
|
||||
return;
|
||||
}
|
||||
lockIndexAndAck(index, nodes, nodeId, clusterState);
|
||||
|
||||
}
|
||||
|
@ -94,6 +96,10 @@ public class NodeIndexDeletedAction extends AbstractComponent {
|
|||
} else {
|
||||
transportService.sendRequest(clusterState.nodes().masterNode(),
|
||||
INDEX_DELETED_ACTION_NAME, new NodeIndexDeletedMessage(index, nodeId), EmptyTransportResponseHandler.INSTANCE_SAME);
|
||||
if (nodes.localNode().isDataNode() == false) {
|
||||
logger.trace("[{}] not acking store deletion (not a data node)");
|
||||
return;
|
||||
}
|
||||
threadPool.generic().execute(new AbstractRunnable() {
|
||||
@Override
|
||||
public void onFailure(Throwable t) {
|
||||
|
|
|
@ -32,8 +32,8 @@ import org.elasticsearch.cluster.metadata.ProcessClusterEventTimeoutException;
|
|||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNodeService;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNodes;
|
||||
import org.elasticsearch.cluster.routing.RoutingTable;
|
||||
import org.elasticsearch.cluster.routing.OperationRouting;
|
||||
import org.elasticsearch.cluster.routing.RoutingTable;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.Priority;
|
||||
import org.elasticsearch.common.component.AbstractLifecycleComponent;
|
||||
|
@ -235,7 +235,7 @@ public class InternalClusterService extends AbstractLifecycleComponent<ClusterSe
|
|||
}
|
||||
// call the post added notification on the same event thread
|
||||
try {
|
||||
updateTasksExecutor.execute(new PrioritizedRunnable(Priority.HIGH) {
|
||||
updateTasksExecutor.execute(new TimedPrioritizedRunnable(Priority.HIGH, "_add_listener_") {
|
||||
@Override
|
||||
public void run() {
|
||||
NotifyTimeout notifyTimeout = new NotifyTimeout(listener, timeout);
|
||||
|
@ -272,7 +272,7 @@ public class InternalClusterService extends AbstractLifecycleComponent<ClusterSe
|
|||
threadPool.generic().execute(new Runnable() {
|
||||
@Override
|
||||
public void run() {
|
||||
timeoutUpdateTask.onFailure(task.source, new ProcessClusterEventTimeoutException(timeoutUpdateTask.timeout(), task.source));
|
||||
timeoutUpdateTask.onFailure(task.source(), new ProcessClusterEventTimeoutException(timeoutUpdateTask.timeout(), task.source()));
|
||||
}
|
||||
});
|
||||
}
|
||||
|
@ -291,19 +291,23 @@ public class InternalClusterService extends AbstractLifecycleComponent<ClusterSe
|
|||
|
||||
@Override
|
||||
public List<PendingClusterTask> pendingTasks() {
|
||||
long now = System.currentTimeMillis();
|
||||
PrioritizedEsThreadPoolExecutor.Pending[] pendings = updateTasksExecutor.getPending();
|
||||
List<PendingClusterTask> pendingClusterTasks = new ArrayList<>(pendings.length);
|
||||
for (PrioritizedEsThreadPoolExecutor.Pending pending : pendings) {
|
||||
final String source;
|
||||
final long timeInQueue;
|
||||
if (pending.task instanceof UpdateTask) {
|
||||
UpdateTask updateTask = (UpdateTask) pending.task;
|
||||
source = updateTask.source;
|
||||
timeInQueue = now - updateTask.addedAt;
|
||||
// we have to capture the task as it will be nulled after execution and we don't want to change while we check things here.
|
||||
final Object task = pending.task;
|
||||
if (task == null) {
|
||||
continue;
|
||||
} else if (task instanceof TimedPrioritizedRunnable) {
|
||||
TimedPrioritizedRunnable runnable = (TimedPrioritizedRunnable) task;
|
||||
source = runnable.source();
|
||||
timeInQueue = runnable.timeSinceCreatedInMillis();
|
||||
} else {
|
||||
assert false : "expected TimedPrioritizedRunnable got " + task.getClass();
|
||||
source = "unknown";
|
||||
timeInQueue = -1;
|
||||
timeInQueue = 0;
|
||||
}
|
||||
|
||||
pendingClusterTasks.add(new PendingClusterTask(pending.insertionOrder, pending.priority, new StringText(source), timeInQueue, pending.executing));
|
||||
|
@ -311,15 +315,34 @@ public class InternalClusterService extends AbstractLifecycleComponent<ClusterSe
|
|||
return pendingClusterTasks;
|
||||
}
|
||||
|
||||
class UpdateTask extends PrioritizedRunnable {
|
||||
static abstract class TimedPrioritizedRunnable extends PrioritizedRunnable {
|
||||
private final long creationTime;
|
||||
protected final String source;
|
||||
|
||||
public final String source;
|
||||
public final ClusterStateUpdateTask updateTask;
|
||||
public final long addedAt = System.currentTimeMillis();
|
||||
|
||||
UpdateTask(String source, Priority priority, ClusterStateUpdateTask updateTask) {
|
||||
protected TimedPrioritizedRunnable(Priority priority, String source) {
|
||||
super(priority);
|
||||
this.source = source;
|
||||
this.creationTime = System.currentTimeMillis();
|
||||
}
|
||||
|
||||
public long timeSinceCreatedInMillis() {
|
||||
// max with 0 to make sure we always return a non negative number
|
||||
// even if time shifts.
|
||||
return Math.max(0, System.currentTimeMillis() - creationTime);
|
||||
}
|
||||
|
||||
public String source() {
|
||||
return source;
|
||||
}
|
||||
}
|
||||
|
||||
class UpdateTask extends TimedPrioritizedRunnable {
|
||||
|
||||
public final ClusterStateUpdateTask updateTask;
|
||||
|
||||
|
||||
UpdateTask(String source, Priority priority, ClusterStateUpdateTask updateTask) {
|
||||
super(priority, source);
|
||||
this.updateTask = updateTask;
|
||||
}
|
||||
|
||||
|
|
|
@ -19,7 +19,6 @@
|
|||
|
||||
package org.elasticsearch.cluster.service;
|
||||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.common.Priority;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
|
@ -43,6 +42,8 @@ public class PendingClusterTask implements Streamable {
|
|||
}
|
||||
|
||||
public PendingClusterTask(long insertOrder, Priority priority, Text source, long timeInQueue, boolean executing) {
|
||||
assert timeInQueue >= 0 : "got a negative timeInQueue [" + timeInQueue + "]";
|
||||
assert insertOrder >= 0 : "got a negative insertOrder [" + insertOrder + "]";
|
||||
this.insertOrder = insertOrder;
|
||||
this.priority = priority;
|
||||
this.source = source;
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
|
||||
package org.elasticsearch.common.rounding;
|
||||
|
||||
import org.elasticsearch.ElasticsearchIllegalArgumentException;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
|
@ -62,6 +63,8 @@ public abstract class TimeZoneRounding extends Rounding {
|
|||
|
||||
public Builder(TimeValue interval) {
|
||||
this.unit = null;
|
||||
if (interval.millis() < 1)
|
||||
throw new ElasticsearchIllegalArgumentException("Zero or negative time interval not supported");
|
||||
this.interval = interval.millis();
|
||||
}
|
||||
|
||||
|
@ -303,6 +306,8 @@ public abstract class TimeZoneRounding extends Rounding {
|
|||
}
|
||||
|
||||
UTCIntervalTimeZoneRounding(long interval) {
|
||||
if (interval < 1)
|
||||
throw new ElasticsearchIllegalArgumentException("Zero or negative time interval not supported");
|
||||
this.interval = interval;
|
||||
}
|
||||
|
||||
|
@ -350,6 +355,8 @@ public abstract class TimeZoneRounding extends Rounding {
|
|||
}
|
||||
|
||||
TimeIntervalTimeZoneRounding(long interval, DateTimeZone preTz, DateTimeZone postTz) {
|
||||
if (interval < 1)
|
||||
throw new ElasticsearchIllegalArgumentException("Zero or negative time interval not supported");
|
||||
this.interval = interval;
|
||||
this.preTz = preTz;
|
||||
this.postTz = postTz;
|
||||
|
@ -408,6 +415,8 @@ public abstract class TimeZoneRounding extends Rounding {
|
|||
}
|
||||
|
||||
DayIntervalTimeZoneRounding(long interval, DateTimeZone preTz, DateTimeZone postTz) {
|
||||
if (interval < 1)
|
||||
throw new ElasticsearchIllegalArgumentException("Zero or negative time interval not supported");
|
||||
this.interval = interval;
|
||||
this.preTz = preTz;
|
||||
this.postTz = postTz;
|
||||
|
|
|
@ -26,30 +26,36 @@ import org.apache.lucene.search.Query;
|
|||
import org.apache.lucene.search.SearcherManager;
|
||||
import org.apache.lucene.search.join.BitDocIdSetFilter;
|
||||
import org.apache.lucene.util.Accountable;
|
||||
import org.apache.lucene.util.Accountables;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.ElasticsearchIllegalStateException;
|
||||
import org.elasticsearch.ExceptionsHelper;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.Preconditions;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.lease.Releasable;
|
||||
import org.elasticsearch.common.lease.Releasables;
|
||||
import org.elasticsearch.common.logging.ESLogger;
|
||||
import org.elasticsearch.common.logging.Loggers;
|
||||
import org.elasticsearch.common.lucene.Lucene;
|
||||
import org.elasticsearch.common.lucene.uid.Versions;
|
||||
import org.elasticsearch.common.unit.ByteSizeValue;
|
||||
import org.elasticsearch.common.util.concurrent.ReleasableLock;
|
||||
import org.elasticsearch.index.VersionType;
|
||||
import org.elasticsearch.index.deletionpolicy.SnapshotDeletionPolicy;
|
||||
import org.elasticsearch.index.deletionpolicy.SnapshotIndexCommit;
|
||||
import org.elasticsearch.index.mapper.DocumentMapper;
|
||||
import org.elasticsearch.index.mapper.ParseContext.Document;
|
||||
import org.elasticsearch.index.mapper.ParsedDocument;
|
||||
import org.elasticsearch.index.mapper.Uid;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
import org.elasticsearch.index.store.Store;
|
||||
import org.elasticsearch.index.translog.Translog;
|
||||
|
||||
import java.io.Closeable;
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.io.IOException;
|
||||
import java.util.*;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
import java.util.concurrent.locks.Condition;
|
||||
import java.util.concurrent.locks.Lock;
|
||||
import java.util.concurrent.locks.ReentrantLock;
|
||||
|
@ -59,8 +65,15 @@ import java.util.concurrent.locks.ReentrantLock;
|
|||
*/
|
||||
public abstract class Engine implements Closeable {
|
||||
|
||||
protected final ShardId shardId;
|
||||
protected final ESLogger logger;
|
||||
protected final EngineConfig engineConfig;
|
||||
protected final Store store;
|
||||
protected final AtomicBoolean isClosed = new AtomicBoolean(false);
|
||||
protected final FailedEngineListener failedEngineListener;
|
||||
protected final SnapshotDeletionPolicy deletionPolicy;
|
||||
|
||||
protected volatile Throwable failedEngine = null;
|
||||
|
||||
protected Engine(EngineConfig engineConfig) {
|
||||
Preconditions.checkNotNull(engineConfig.getStore(), "Store must be provided to the engine");
|
||||
|
@ -68,7 +81,11 @@ public abstract class Engine implements Closeable {
|
|||
Preconditions.checkNotNull(engineConfig.getTranslog(), "Translog must be provided to the engine");
|
||||
|
||||
this.engineConfig = engineConfig;
|
||||
this.shardId = engineConfig.getShardId();
|
||||
this.store = engineConfig.getStore();
|
||||
this.logger = Loggers.getLogger(getClass(), engineConfig.getIndexSettings(), engineConfig.getShardId());
|
||||
this.failedEngineListener = engineConfig.getFailedEngineListener();
|
||||
this.deletionPolicy = engineConfig.getDeletionPolicy();
|
||||
}
|
||||
|
||||
/** Returns 0 in the case where accountable is null, otherwise returns {@code ramBytesUsed()} */
|
||||
|
@ -107,7 +124,7 @@ public abstract class Engine implements Closeable {
|
|||
}
|
||||
|
||||
protected Searcher newSearcher(String source, IndexSearcher searcher, SearcherManager manager) {
|
||||
return new EngineSearcher(source, searcher, manager, engineConfig.getStore(), logger);
|
||||
return new EngineSearcher(source, searcher, manager, store, logger);
|
||||
}
|
||||
|
||||
public final EngineConfig config() {
|
||||
|
@ -181,6 +198,35 @@ public abstract class Engine implements Closeable {
|
|||
|
||||
public abstract void delete(DeleteByQuery delete) throws EngineException;
|
||||
|
||||
final protected GetResult getFromSearcher(Get get) throws EngineException {
|
||||
final Searcher searcher = acquireSearcher("get");
|
||||
final Versions.DocIdAndVersion docIdAndVersion;
|
||||
try {
|
||||
docIdAndVersion = Versions.loadDocIdAndVersion(searcher.reader(), get.uid());
|
||||
} catch (Throwable e) {
|
||||
Releasables.closeWhileHandlingException(searcher);
|
||||
//TODO: A better exception goes here
|
||||
throw new EngineException(shardId, "Couldn't resolve version", e);
|
||||
}
|
||||
|
||||
if (docIdAndVersion != null) {
|
||||
if (get.versionType().isVersionConflictForReads(docIdAndVersion.version, get.version())) {
|
||||
Releasables.close(searcher);
|
||||
Uid uid = Uid.createUid(get.uid().text());
|
||||
throw new VersionConflictEngineException(shardId, uid.type(), uid.id(), docIdAndVersion.version, get.version());
|
||||
}
|
||||
}
|
||||
|
||||
if (docIdAndVersion != null) {
|
||||
// don't release the searcher on this path, it is the
|
||||
// responsibility of the caller to call GetResult.release
|
||||
return new GetResult(searcher, docIdAndVersion);
|
||||
} else {
|
||||
Releasables.close(searcher);
|
||||
return GetResult.NOT_EXISTS;
|
||||
}
|
||||
}
|
||||
|
||||
public abstract GetResult get(Get get) throws EngineException;
|
||||
|
||||
/**
|
||||
|
@ -190,22 +236,144 @@ public abstract class Engine implements Closeable {
|
|||
*
|
||||
* @see Searcher#close()
|
||||
*/
|
||||
public abstract Searcher acquireSearcher(String source) throws EngineException;
|
||||
public final Searcher acquireSearcher(String source) throws EngineException {
|
||||
boolean success = false;
|
||||
/* Acquire order here is store -> manager since we need
|
||||
* to make sure that the store is not closed before
|
||||
* the searcher is acquired. */
|
||||
store.incRef();
|
||||
try {
|
||||
final SearcherManager manager = getSearcherManager(); // can never be null
|
||||
/* This might throw NPE but that's fine we will run ensureOpen()
|
||||
* in the catch block and throw the right exception */
|
||||
final IndexSearcher searcher = manager.acquire();
|
||||
try {
|
||||
final Searcher retVal = newSearcher(source, searcher, manager);
|
||||
success = true;
|
||||
return retVal;
|
||||
} finally {
|
||||
if (!success) {
|
||||
manager.release(searcher);
|
||||
}
|
||||
}
|
||||
} catch (EngineClosedException ex) {
|
||||
throw ex;
|
||||
} catch (Throwable ex) {
|
||||
ensureOpen(); // throw EngineCloseException here if we are already closed
|
||||
logger.error("failed to acquire searcher, source {}", ex, source);
|
||||
throw new EngineException(shardId, "failed to acquire searcher, source " + source, ex);
|
||||
} finally {
|
||||
if (!success) { // release the ref in the case of an error...
|
||||
store.decRef();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
protected void ensureOpen() {
|
||||
if (isClosed.get()) {
|
||||
throw new EngineClosedException(shardId, failedEngine);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Global stats on segments.
|
||||
*/
|
||||
public abstract SegmentsStats segmentsStats();
|
||||
|
||||
protected Segment[] getSegmentInfo(SegmentInfos lastCommittedSegmentInfos, boolean verbose) {
|
||||
ensureOpen();
|
||||
Map<String, Segment> segments = new HashMap<>();
|
||||
|
||||
// first, go over and compute the search ones...
|
||||
Searcher searcher = acquireSearcher("segments");
|
||||
try {
|
||||
for (LeafReaderContext reader : searcher.reader().leaves()) {
|
||||
SegmentCommitInfo info = segmentReader(reader.reader()).getSegmentInfo();
|
||||
assert !segments.containsKey(info.info.name);
|
||||
Segment segment = new Segment(info.info.name);
|
||||
segment.search = true;
|
||||
segment.docCount = reader.reader().numDocs();
|
||||
segment.delDocCount = reader.reader().numDeletedDocs();
|
||||
segment.version = info.info.getVersion();
|
||||
segment.compound = info.info.getUseCompoundFile();
|
||||
try {
|
||||
segment.sizeInBytes = info.sizeInBytes();
|
||||
} catch (IOException e) {
|
||||
logger.trace("failed to get size for [{}]", e, info.info.name);
|
||||
}
|
||||
final SegmentReader segmentReader = segmentReader(reader.reader());
|
||||
segment.memoryInBytes = segmentReader.ramBytesUsed();
|
||||
if (verbose) {
|
||||
segment.ramTree = Accountables.namedAccountable("root", segmentReader);
|
||||
}
|
||||
// TODO: add more fine grained mem stats values to per segment info here
|
||||
segments.put(info.info.name, segment);
|
||||
}
|
||||
} finally {
|
||||
searcher.close();
|
||||
}
|
||||
|
||||
// now, correlate or add the committed ones...
|
||||
if (lastCommittedSegmentInfos != null) {
|
||||
SegmentInfos infos = lastCommittedSegmentInfos;
|
||||
for (SegmentCommitInfo info : infos) {
|
||||
Segment segment = segments.get(info.info.name);
|
||||
if (segment == null) {
|
||||
segment = new Segment(info.info.name);
|
||||
segment.search = false;
|
||||
segment.committed = true;
|
||||
segment.docCount = info.info.getDocCount();
|
||||
segment.delDocCount = info.getDelCount();
|
||||
segment.version = info.info.getVersion();
|
||||
segment.compound = info.info.getUseCompoundFile();
|
||||
try {
|
||||
segment.sizeInBytes = info.sizeInBytes();
|
||||
} catch (IOException e) {
|
||||
logger.trace("failed to get size for [{}]", e, info.info.name);
|
||||
}
|
||||
segments.put(info.info.name, segment);
|
||||
} else {
|
||||
segment.committed = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Segment[] segmentsArr = segments.values().toArray(new Segment[segments.values().size()]);
|
||||
Arrays.sort(segmentsArr, new Comparator<Segment>() {
|
||||
@Override
|
||||
public int compare(Segment o1, Segment o2) {
|
||||
return (int) (o1.getGeneration() - o2.getGeneration());
|
||||
}
|
||||
});
|
||||
|
||||
return segmentsArr;
|
||||
}
|
||||
|
||||
/**
|
||||
* The list of segments in the engine.
|
||||
*/
|
||||
public abstract List<Segment> segments(boolean verbose);
|
||||
|
||||
/**
|
||||
* Returns <tt>true</tt> if a refresh is really needed.
|
||||
public final boolean refreshNeeded() {
|
||||
if (store.tryIncRef()) {
|
||||
/*
|
||||
we need to inc the store here since searcherManager.isSearcherCurrent()
|
||||
acquires a searcher internally and that might keep a file open on the
|
||||
store. this violates the assumption that all files are closed when
|
||||
the store is closed so we need to make sure we increment it here
|
||||
*/
|
||||
public abstract boolean refreshNeeded();
|
||||
try {
|
||||
return !getSearcherManager().isSearcherCurrent();
|
||||
} catch (IOException e) {
|
||||
logger.error("failed to access searcher manager", e);
|
||||
failEngine("failed to access searcher manager", e);
|
||||
throw new EngineException(shardId, "failed to access searcher manager", e);
|
||||
} finally {
|
||||
store.decRef();
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* Refreshes the engine for new search operations to reflect the latest
|
||||
|
@ -250,6 +418,34 @@ public abstract class Engine implements Closeable {
|
|||
/** fail engine due to some error. the engine will also be closed. */
|
||||
public abstract void failEngine(String reason, Throwable failure);
|
||||
|
||||
/** Check whether the engine should be failed */
|
||||
protected boolean maybeFailEngine(String source, Throwable t) {
|
||||
if (Lucene.isCorruptionException(t)) {
|
||||
if (engineConfig.isFailEngineOnCorruption()) {
|
||||
failEngine("corrupt file detected source: [" + source + "]", t);
|
||||
return true;
|
||||
} else {
|
||||
logger.warn("corrupt file detected source: [{}] but [{}] is set to [{}]", t, source,
|
||||
EngineConfig.INDEX_FAIL_ON_CORRUPTION_SETTING, engineConfig.isFailEngineOnCorruption());
|
||||
}
|
||||
} else if (ExceptionsHelper.isOOM(t)) {
|
||||
failEngine("out of memory", t);
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
/** Wrap a Throwable in an {@code EngineClosedException} if the engine is already closed */
|
||||
protected Throwable wrapIfClosed(Throwable t) {
|
||||
if (isClosed.get()) {
|
||||
if (t != failedEngine && failedEngine != null) {
|
||||
t.addSuppressed(failedEngine);
|
||||
}
|
||||
return new EngineClosedException(shardId, t);
|
||||
}
|
||||
return t;
|
||||
}
|
||||
|
||||
public static interface FailedEngineListener {
|
||||
void onFailedEngine(ShardId shardId, String reason, @Nullable Throwable t);
|
||||
}
|
||||
|
@ -765,4 +961,6 @@ public abstract class Engine implements Closeable {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
protected abstract SearcherManager getSearcherManager();
|
||||
}
|
||||
|
|
|
@ -25,7 +25,6 @@ import org.apache.lucene.index.IndexWriter.IndexReaderWarmer;
|
|||
import org.apache.lucene.search.*;
|
||||
import org.apache.lucene.store.AlreadyClosedException;
|
||||
import org.apache.lucene.store.LockObtainFailedException;
|
||||
import org.apache.lucene.util.Accountables;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.apache.lucene.util.IOUtils;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
|
@ -34,18 +33,13 @@ import org.elasticsearch.cluster.routing.DjbHashFunction;
|
|||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.lease.Releasable;
|
||||
import org.elasticsearch.common.lease.Releasables;
|
||||
import org.elasticsearch.common.logging.ESLogger;
|
||||
import org.elasticsearch.common.logging.Loggers;
|
||||
import org.elasticsearch.common.lucene.LoggerInfoStream;
|
||||
import org.elasticsearch.common.lucene.Lucene;
|
||||
import org.elasticsearch.common.lucene.index.ElasticsearchDirectoryReader;
|
||||
import org.elasticsearch.common.lucene.uid.Versions;
|
||||
import org.elasticsearch.common.math.MathUtils;
|
||||
import org.elasticsearch.common.unit.ByteSizeValue;
|
||||
import org.elasticsearch.common.util.concurrent.AbstractRunnable;
|
||||
import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException;
|
||||
import org.elasticsearch.common.util.concurrent.ReleasableLock;
|
||||
import org.elasticsearch.index.deletionpolicy.SnapshotDeletionPolicy;
|
||||
import org.elasticsearch.index.deletionpolicy.SnapshotIndexCommit;
|
||||
import org.elasticsearch.index.indexing.ShardIndexingService;
|
||||
import org.elasticsearch.index.mapper.Uid;
|
||||
|
@ -54,8 +48,6 @@ import org.elasticsearch.index.merge.policy.ElasticsearchMergePolicy;
|
|||
import org.elasticsearch.index.merge.policy.MergePolicyProvider;
|
||||
import org.elasticsearch.index.merge.scheduler.MergeSchedulerProvider;
|
||||
import org.elasticsearch.index.search.nested.IncludeNestedDocsQuery;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
import org.elasticsearch.index.store.Store;
|
||||
import org.elasticsearch.index.translog.Translog;
|
||||
import org.elasticsearch.indices.IndicesWarmer;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
|
@ -75,7 +67,6 @@ import java.util.concurrent.locks.ReentrantReadWriteLock;
|
|||
*/
|
||||
public class InternalEngine extends Engine {
|
||||
|
||||
protected final ShardId shardId;
|
||||
private final FailEngineOnMergeFailure mergeSchedulerFailureListener;
|
||||
private final MergeSchedulerListener mergeSchedulerListener;
|
||||
|
||||
|
@ -85,8 +76,6 @@ public class InternalEngine extends Engine {
|
|||
private final ShardIndexingService indexingService;
|
||||
@Nullable
|
||||
private final IndicesWarmer warmer;
|
||||
private final Store store;
|
||||
private final SnapshotDeletionPolicy deletionPolicy;
|
||||
private final Translog translog;
|
||||
private final MergePolicyProvider mergePolicyProvider;
|
||||
private final MergeSchedulerProvider mergeScheduler;
|
||||
|
@ -100,7 +89,6 @@ public class InternalEngine extends Engine {
|
|||
private final SearcherFactory searcherFactory;
|
||||
private final SearcherManager searcherManager;
|
||||
|
||||
private final AtomicBoolean isClosed = new AtomicBoolean(false);
|
||||
private final AtomicBoolean optimizeMutex = new AtomicBoolean();
|
||||
// we use flushNeeded here, since if there are no changes, then the commit won't write
|
||||
// will not really happen, and then the commitUserData and the new translog will not be reflected
|
||||
|
@ -113,9 +101,7 @@ public class InternalEngine extends Engine {
|
|||
private final LiveVersionMap versionMap;
|
||||
|
||||
private final Object[] dirtyLocks;
|
||||
private volatile Throwable failedEngine = null;
|
||||
private final ReentrantLock failEngineLock = new ReentrantLock();
|
||||
private final FailedEngineListener failedEngineListener;
|
||||
|
||||
private final AtomicLong translogIdGenerator = new AtomicLong();
|
||||
private final AtomicBoolean versionMapRefreshPending = new AtomicBoolean();
|
||||
|
@ -126,8 +112,6 @@ public class InternalEngine extends Engine {
|
|||
|
||||
public InternalEngine(EngineConfig engineConfig) throws EngineException {
|
||||
super(engineConfig);
|
||||
this.store = engineConfig.getStore();
|
||||
this.shardId = engineConfig.getShardId();
|
||||
this.versionMap = new LiveVersionMap();
|
||||
store.incRef();
|
||||
IndexWriter writer = null;
|
||||
|
@ -138,7 +122,6 @@ public class InternalEngine extends Engine {
|
|||
this.lastDeleteVersionPruneTimeMSec = engineConfig.getThreadPool().estimatedTimeInMillis();
|
||||
this.indexingService = engineConfig.getIndexingService();
|
||||
this.warmer = engineConfig.getWarmer();
|
||||
this.deletionPolicy = engineConfig.getDeletionPolicy();
|
||||
this.translog = engineConfig.getTranslog();
|
||||
this.mergePolicyProvider = engineConfig.getMergePolicyProvider();
|
||||
this.mergeScheduler = engineConfig.getMergeScheduler();
|
||||
|
@ -147,7 +130,6 @@ public class InternalEngine extends Engine {
|
|||
dirtyLocks[i] = new Object();
|
||||
}
|
||||
|
||||
this.failedEngineListener = engineConfig.getFailedEngineListener();
|
||||
throttle = new IndexThrottle();
|
||||
this.searcherFactory = new SearchFactory(engineConfig);
|
||||
try {
|
||||
|
@ -251,31 +233,7 @@ public class InternalEngine extends Engine {
|
|||
}
|
||||
|
||||
// no version, get the version from the index, we know that we refresh on flush
|
||||
final Searcher searcher = acquireSearcher("get");
|
||||
final Versions.DocIdAndVersion docIdAndVersion;
|
||||
try {
|
||||
docIdAndVersion = Versions.loadDocIdAndVersion(searcher.reader(), get.uid());
|
||||
} catch (Throwable e) {
|
||||
Releasables.closeWhileHandlingException(searcher);
|
||||
//TODO: A better exception goes here
|
||||
throw new EngineException(shardId, "Couldn't resolve version", e);
|
||||
}
|
||||
|
||||
if (docIdAndVersion != null) {
|
||||
if (get.versionType().isVersionConflictForReads(docIdAndVersion.version, get.version())) {
|
||||
Releasables.close(searcher);
|
||||
Uid uid = Uid.createUid(get.uid().text());
|
||||
throw new VersionConflictEngineException(shardId, uid.type(), uid.id(), docIdAndVersion.version, get.version());
|
||||
}
|
||||
}
|
||||
|
||||
if (docIdAndVersion != null) {
|
||||
// don't release the searcher on this path, it is the responsability of the caller to call GetResult.release
|
||||
return new GetResult(searcher, docIdAndVersion);
|
||||
} else {
|
||||
Releasables.close(searcher);
|
||||
return GetResult.NOT_EXISTS;
|
||||
}
|
||||
return getFromSearcher(get);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -579,63 +537,6 @@ public class InternalEngine extends Engine {
|
|||
refresh("delete_by_query");
|
||||
}
|
||||
|
||||
@Override
|
||||
public final Searcher acquireSearcher(String source) throws EngineException {
|
||||
boolean success = false;
|
||||
/* Acquire order here is store -> manager since we need
|
||||
* to make sure that the store is not closed before
|
||||
* the searcher is acquired. */
|
||||
store.incRef();
|
||||
try {
|
||||
final SearcherManager manager = this.searcherManager; // can never be null
|
||||
assert manager != null : "SearcherManager is null";
|
||||
/* This might throw NPE but that's fine we will run ensureOpen()
|
||||
* in the catch block and throw the right exception */
|
||||
final IndexSearcher searcher = manager.acquire();
|
||||
try {
|
||||
final Searcher retVal = newSearcher(source, searcher, manager);
|
||||
success = true;
|
||||
return retVal;
|
||||
} finally {
|
||||
if (!success) {
|
||||
manager.release(searcher);
|
||||
}
|
||||
}
|
||||
} catch (EngineClosedException ex) {
|
||||
throw ex;
|
||||
} catch (Throwable ex) {
|
||||
ensureOpen(); // throw EngineCloseException here if we are already closed
|
||||
logger.error("failed to acquire searcher, source {}", ex, source);
|
||||
throw new EngineException(shardId, "failed to acquire searcher, source " + source, ex);
|
||||
} finally {
|
||||
if (!success) { // release the ref in the case of an error...
|
||||
store.decRef();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean refreshNeeded() {
|
||||
if (store.tryIncRef()) {
|
||||
/*
|
||||
we need to inc the store here since searcherManager.isSearcherCurrent()
|
||||
acquires a searcher internally and that might keep a file open on the
|
||||
store. this violates the assumption that all files are closed when
|
||||
the store is closed so we need to make sure we increment it here
|
||||
*/
|
||||
try {
|
||||
return !searcherManager.isSearcherCurrent();
|
||||
} catch (IOException e) {
|
||||
logger.error("failed to access searcher manager", e);
|
||||
failEngine("failed to access searcher manager", e);
|
||||
throw new EngineException(shardId, "failed to access searcher manager", e);
|
||||
} finally {
|
||||
store.decRef();
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void refresh(String source) throws EngineException {
|
||||
// we obtain a read lock here, since we don't want a flush to happen while we are refreshing
|
||||
|
@ -770,12 +671,6 @@ public class InternalEngine extends Engine {
|
|||
}
|
||||
}
|
||||
|
||||
private void ensureOpen() {
|
||||
if (isClosed.get()) {
|
||||
throw new EngineClosedException(shardId, failedEngine);
|
||||
}
|
||||
}
|
||||
|
||||
private void pruneDeletedTombstones() {
|
||||
long timeMSec = engineConfig.getThreadPool().estimatedTimeInMillis();
|
||||
|
||||
|
@ -858,7 +753,6 @@ public class InternalEngine extends Engine {
|
|||
waitForMerges(flush, upgrade);
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public SnapshotIndexCommit snapshotIndex() throws EngineException {
|
||||
// we have to flush outside of the readlock otherwise we might have a problem upgrading
|
||||
|
@ -931,18 +825,15 @@ public class InternalEngine extends Engine {
|
|||
}
|
||||
}
|
||||
|
||||
private boolean maybeFailEngine(String source, Throwable t) {
|
||||
if (Lucene.isCorruptionException(t)) {
|
||||
if (engineConfig.isFailEngineOnCorruption()) {
|
||||
failEngine("corrupt file detected source: [" + source + "]", t);
|
||||
@Override
|
||||
protected boolean maybeFailEngine(String source, Throwable t) {
|
||||
boolean shouldFail = super.maybeFailEngine(source, t);
|
||||
if (shouldFail) {
|
||||
return true;
|
||||
} else {
|
||||
logger.warn("corrupt file detected source: [{}] but [{}] is set to [{}]", t, source, EngineConfig.INDEX_FAIL_ON_CORRUPTION_SETTING, engineConfig.isFailEngineOnCorruption());
|
||||
}
|
||||
} else if (ExceptionsHelper.isOOM(t)) {
|
||||
failEngine("out of memory", t);
|
||||
return true;
|
||||
} else if (t instanceof AlreadyClosedException) {
|
||||
|
||||
// Check for AlreadyClosedException
|
||||
if (t instanceof AlreadyClosedException) {
|
||||
// if we are already closed due to some tragic exception
|
||||
// we need to fail the engine. it might have already been failed before
|
||||
// but we are double-checking it's failed and closed
|
||||
|
@ -959,16 +850,6 @@ public class InternalEngine extends Engine {
|
|||
return false;
|
||||
}
|
||||
|
||||
private Throwable wrapIfClosed(Throwable t) {
|
||||
if (isClosed.get()) {
|
||||
if (t != failedEngine && failedEngine != null) {
|
||||
t.addSuppressed(failedEngine);
|
||||
}
|
||||
return new EngineClosedException(shardId, t);
|
||||
}
|
||||
return t;
|
||||
}
|
||||
|
||||
@Override
|
||||
public SegmentsStats segmentsStats() {
|
||||
ensureOpen();
|
||||
|
@ -993,70 +874,7 @@ public class InternalEngine extends Engine {
|
|||
@Override
|
||||
public List<Segment> segments(boolean verbose) {
|
||||
try (ReleasableLock _ = readLock.acquire()) {
|
||||
ensureOpen();
|
||||
Map<String, Segment> segments = new HashMap<>();
|
||||
|
||||
// first, go over and compute the search ones...
|
||||
Searcher searcher = acquireSearcher("segments");
|
||||
try {
|
||||
for (LeafReaderContext reader : searcher.reader().leaves()) {
|
||||
SegmentCommitInfo info = segmentReader(reader.reader()).getSegmentInfo();
|
||||
assert !segments.containsKey(info.info.name);
|
||||
Segment segment = new Segment(info.info.name);
|
||||
segment.search = true;
|
||||
segment.docCount = reader.reader().numDocs();
|
||||
segment.delDocCount = reader.reader().numDeletedDocs();
|
||||
segment.version = info.info.getVersion();
|
||||
segment.compound = info.info.getUseCompoundFile();
|
||||
try {
|
||||
segment.sizeInBytes = info.sizeInBytes();
|
||||
} catch (IOException e) {
|
||||
logger.trace("failed to get size for [{}]", e, info.info.name);
|
||||
}
|
||||
final SegmentReader segmentReader = segmentReader(reader.reader());
|
||||
segment.memoryInBytes = segmentReader.ramBytesUsed();
|
||||
if (verbose) {
|
||||
segment.ramTree = Accountables.namedAccountable("root", segmentReader);
|
||||
}
|
||||
// TODO: add more fine grained mem stats values to per segment info here
|
||||
segments.put(info.info.name, segment);
|
||||
}
|
||||
} finally {
|
||||
searcher.close();
|
||||
}
|
||||
|
||||
// now, correlate or add the committed ones...
|
||||
if (lastCommittedSegmentInfos != null) {
|
||||
SegmentInfos infos = lastCommittedSegmentInfos;
|
||||
for (SegmentCommitInfo info : infos) {
|
||||
Segment segment = segments.get(info.info.name);
|
||||
if (segment == null) {
|
||||
segment = new Segment(info.info.name);
|
||||
segment.search = false;
|
||||
segment.committed = true;
|
||||
segment.docCount = info.info.getDocCount();
|
||||
segment.delDocCount = info.getDelCount();
|
||||
segment.version = info.info.getVersion();
|
||||
segment.compound = info.info.getUseCompoundFile();
|
||||
try {
|
||||
segment.sizeInBytes = info.sizeInBytes();
|
||||
} catch (IOException e) {
|
||||
logger.trace("failed to get size for [{}]", e, info.info.name);
|
||||
}
|
||||
segments.put(info.info.name, segment);
|
||||
} else {
|
||||
segment.committed = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Segment[] segmentsArr = segments.values().toArray(new Segment[segments.values().size()]);
|
||||
Arrays.sort(segmentsArr, new Comparator<Segment>() {
|
||||
@Override
|
||||
public int compare(Segment o1, Segment o2) {
|
||||
return (int) (o1.getGeneration() - o2.getGeneration());
|
||||
}
|
||||
});
|
||||
Segment[] segmentsArr = getSegmentInfo(lastCommittedSegmentInfos, verbose);
|
||||
|
||||
// fill in the merges flag
|
||||
Set<OnGoingMerge> onGoingMerges = mergeScheduler.onGoingMerges();
|
||||
|
@ -1070,7 +888,6 @@ public class InternalEngine extends Engine {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
return Arrays.asList(segmentsArr);
|
||||
}
|
||||
}
|
||||
|
@ -1162,6 +979,11 @@ public class InternalEngine extends Engine {
|
|||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
protected SearcherManager getSearcherManager() {
|
||||
return searcherManager;
|
||||
}
|
||||
|
||||
private Object dirtyLock(BytesRef uid) {
|
||||
int hash = DjbHashFunction.DJB_HASH(uid.bytes, uid.offset, uid.length);
|
||||
return dirtyLocks[MathUtils.mod(hash, dirtyLocks.length)];
|
||||
|
|
|
@ -79,8 +79,9 @@ public final class DocumentFieldMappers extends ForwardingSet<FieldMapper<?>> {
|
|||
return new DocumentFieldMappers(fieldMappers, indexAnalyzer, searchAnalyzer, searchQuoteAnalyzer);
|
||||
}
|
||||
|
||||
// TODO: replace all uses of this with fullName, or change the meaning of name to be fullName
|
||||
public FieldMappers name(String name) {
|
||||
return fieldMappers.name(name);
|
||||
return fieldMappers.fullName(name);
|
||||
}
|
||||
|
||||
public FieldMappers indexName(String indexName) {
|
||||
|
|
|
@ -103,7 +103,10 @@ public class FieldMappersLookup extends ForwardingSet<FieldMapper<?>> {
|
|||
|
||||
/** Create a new empty instance. */
|
||||
public FieldMappersLookup() {
|
||||
this(new CopyOnWriteHashSet<FieldMapper<?>>(), new MappersLookup(new CopyOnWriteHashMap<String, FieldMappers>(), new CopyOnWriteHashMap<String, FieldMappers>(), new CopyOnWriteHashMap<String, FieldMappers>()));
|
||||
this(new CopyOnWriteHashSet<FieldMapper<?>>(),
|
||||
new MappersLookup(new CopyOnWriteHashMap<String, FieldMappers>(),
|
||||
new CopyOnWriteHashMap<String, FieldMappers>(),
|
||||
new CopyOnWriteHashMap<String, FieldMappers>()));
|
||||
}
|
||||
|
||||
private FieldMappersLookup(CopyOnWriteHashSet<FieldMapper<?>> mappers, MappersLookup lookup) {
|
||||
|
@ -130,13 +133,6 @@ public class FieldMappersLookup extends ForwardingSet<FieldMapper<?>> {
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the field mappers based on the mapper name.
|
||||
*/
|
||||
public FieldMappers name(String name) {
|
||||
return lookup.name.get(name);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the field mappers based on the mapper index name.
|
||||
*/
|
||||
|
@ -152,7 +148,7 @@ public class FieldMappersLookup extends ForwardingSet<FieldMapper<?>> {
|
|||
}
|
||||
|
||||
/**
|
||||
* Returns a list of the index names of a simple match regex like pattern against full name, name and index name.
|
||||
* Returns a list of the index names of a simple match regex like pattern against full name and index name.
|
||||
*/
|
||||
public List<String> simpleMatchToIndexNames(String pattern) {
|
||||
List<String> fields = Lists.newArrayList();
|
||||
|
@ -161,15 +157,13 @@ public class FieldMappersLookup extends ForwardingSet<FieldMapper<?>> {
|
|||
fields.add(fieldMapper.names().indexName());
|
||||
} else if (Regex.simpleMatch(pattern, fieldMapper.names().indexName())) {
|
||||
fields.add(fieldMapper.names().indexName());
|
||||
} else if (Regex.simpleMatch(pattern, fieldMapper.names().name())) {
|
||||
fields.add(fieldMapper.names().indexName());
|
||||
}
|
||||
}
|
||||
return fields;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns a list of the full names of a simple match regex like pattern against full name, name and index name.
|
||||
* Returns a list of the full names of a simple match regex like pattern against full name and index name.
|
||||
*/
|
||||
public List<String> simpleMatchToFullName(String pattern) {
|
||||
List<String> fields = Lists.newArrayList();
|
||||
|
@ -178,16 +172,13 @@ public class FieldMappersLookup extends ForwardingSet<FieldMapper<?>> {
|
|||
fields.add(fieldMapper.names().fullName());
|
||||
} else if (Regex.simpleMatch(pattern, fieldMapper.names().indexName())) {
|
||||
fields.add(fieldMapper.names().fullName());
|
||||
} else if (Regex.simpleMatch(pattern, fieldMapper.names().name())) {
|
||||
fields.add(fieldMapper.names().fullName());
|
||||
}
|
||||
}
|
||||
return fields;
|
||||
}
|
||||
|
||||
/**
|
||||
* Tries to find first based on {@link #fullName(String)}, then by {@link #indexName(String)}, and last
|
||||
* by {@link #name(String)}.
|
||||
* Tries to find first based on {@link #fullName(String)}, then by {@link #indexName(String)}.
|
||||
*/
|
||||
@Nullable
|
||||
public FieldMappers smartName(String name) {
|
||||
|
@ -195,16 +186,12 @@ public class FieldMappersLookup extends ForwardingSet<FieldMapper<?>> {
|
|||
if (fieldMappers != null) {
|
||||
return fieldMappers;
|
||||
}
|
||||
fieldMappers = indexName(name);
|
||||
if (fieldMappers != null) {
|
||||
return fieldMappers;
|
||||
}
|
||||
return name(name);
|
||||
return indexName(name);
|
||||
}
|
||||
|
||||
/**
|
||||
* Tries to find first based on {@link #fullName(String)}, then by {@link #indexName(String)}, and last
|
||||
* by {@link #name(String)} and return the first mapper for it (see {@link org.elasticsearch.index.mapper.FieldMappers#mapper()}).
|
||||
* Tries to find first based on {@link #fullName(String)}, then by {@link #indexName(String)}
|
||||
* and return the first mapper for it (see {@link org.elasticsearch.index.mapper.FieldMappers#mapper()}).
|
||||
*/
|
||||
@Nullable
|
||||
public FieldMapper<?> smartNameFieldMapper(String name) {
|
||||
|
|
|
@ -558,17 +558,6 @@ public class MapperService extends AbstractIndexComponent {
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns {@link FieldMappers} for all the {@link FieldMapper}s that are registered
|
||||
* under the given name across all the different {@link DocumentMapper} types.
|
||||
*
|
||||
* @param name The name to return all the {@link FieldMappers} for across all {@link DocumentMapper}s.
|
||||
* @return All the {@link FieldMappers} for across all {@link DocumentMapper}s
|
||||
*/
|
||||
public FieldMappers name(String name) {
|
||||
return fieldMappers.name(name);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns {@link FieldMappers} for all the {@link FieldMapper}s that are registered
|
||||
* under the given indexName across all the different {@link DocumentMapper} types.
|
||||
|
@ -706,11 +695,7 @@ public class MapperService extends AbstractIndexComponent {
|
|||
if (mappers != null) {
|
||||
return mappers;
|
||||
}
|
||||
mappers = indexName(smartName);
|
||||
if (mappers != null) {
|
||||
return mappers;
|
||||
}
|
||||
return name(smartName);
|
||||
return indexName(smartName);
|
||||
}
|
||||
|
||||
public SmartNameFieldMappers smartName(String smartName, @Nullable String[] types) {
|
||||
|
@ -755,10 +740,6 @@ public class MapperService extends AbstractIndexComponent {
|
|||
if (fieldMappers != null) {
|
||||
return new SmartNameFieldMappers(this, fieldMappers, null, false);
|
||||
}
|
||||
fieldMappers = name(smartName);
|
||||
if (fieldMappers != null) {
|
||||
return new SmartNameFieldMappers(this, fieldMappers, null, false);
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
|
|
|
@ -556,7 +556,6 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref
|
|||
if (!sourceMetaData.contains(existingFile) && !Store.isChecksum(existingFile)) {
|
||||
try {
|
||||
dir.deleteFile(reason, existingFile);
|
||||
dir.deleteFile(existingFile);
|
||||
} catch (Exception e) {
|
||||
// ignore, we don't really care, will get deleted later on
|
||||
}
|
||||
|
@ -666,8 +665,6 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref
|
|||
public final static class MetadataSnapshot implements Iterable<StoreFileMetaData>, Streamable {
|
||||
private static final ESLogger logger = Loggers.getLogger(MetadataSnapshot.class);
|
||||
private static final Version FIRST_LUCENE_CHECKSUM_VERSION = Version.LUCENE_4_8;
|
||||
// we stopped writing legacy checksums in 1.3.0 so all segments here must use the new CRC32 version
|
||||
private static final Version FIRST_ES_CRC32_VERSION = org.elasticsearch.Version.V_1_3_0.luceneVersion;
|
||||
|
||||
private Map<String, StoreFileMetaData> metadata;
|
||||
|
||||
|
@ -714,7 +711,13 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref
|
|||
if (maxVersion.onOrAfter(FIRST_LUCENE_CHECKSUM_VERSION)) {
|
||||
checksumFromLuceneFile(directory, segmentsFile, builder, logger, maxVersion, true);
|
||||
} else {
|
||||
builder.put(segmentsFile, new StoreFileMetaData(segmentsFile, directory.fileLength(segmentsFile), legacyChecksum, maxVersion, hashFile(directory, segmentsFile)));
|
||||
final BytesRefBuilder fileHash = new BytesRefBuilder();
|
||||
final long length;
|
||||
try (final IndexInput in = directory.openInput(segmentsFile, IOContext.READONCE)) {
|
||||
length = in.length();
|
||||
hashFile(fileHash, new InputStreamIndexInput(in, length), length);
|
||||
}
|
||||
builder.put(segmentsFile, new StoreFileMetaData(segmentsFile, length, legacyChecksum, maxVersion, fileHash.get()));
|
||||
}
|
||||
} catch (CorruptIndexException | IndexNotFoundException | IndexFormatTooOldException | IndexFormatTooNewException ex) {
|
||||
// we either know the index is corrupted or it's just not there
|
||||
|
@ -799,14 +802,16 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref
|
|||
final String checksum;
|
||||
final BytesRefBuilder fileHash = new BytesRefBuilder();
|
||||
try (final IndexInput in = directory.openInput(file, IOContext.READONCE)) {
|
||||
final long length;
|
||||
try {
|
||||
if (in.length() < CodecUtil.footerLength()) {
|
||||
length = in.length();
|
||||
if (length < CodecUtil.footerLength()) {
|
||||
// truncated files trigger IAE if we seek negative... these files are really corrupted though
|
||||
throw new CorruptIndexException("Can't retrieve checksum from file: " + file + " file length must be >= " + CodecUtil.footerLength() + " but was: " + in.length(), in);
|
||||
}
|
||||
if (readFileAsHash) {
|
||||
final VerifyingIndexInput verifyingIndexInput = new VerifyingIndexInput(in); // additional safety we checksum the entire file we read the hash for...
|
||||
hashFile(fileHash, new InputStreamIndexInput(verifyingIndexInput, in.length()), in.length());
|
||||
hashFile(fileHash, new InputStreamIndexInput(verifyingIndexInput, length), length);
|
||||
checksum = digestToString(verifyingIndexInput.verify());
|
||||
} else {
|
||||
checksum = digestToString(CodecUtil.retrieveChecksum(in));
|
||||
|
@ -816,7 +821,7 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref
|
|||
logger.debug("Can retrieve checksum from file [{}]", ex, file);
|
||||
throw ex;
|
||||
}
|
||||
builder.put(file, new StoreFileMetaData(file, directory.fileLength(file), checksum, version, fileHash.get()));
|
||||
builder.put(file, new StoreFileMetaData(file, length, checksum, version, fileHash.get()));
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -19,6 +19,9 @@
|
|||
|
||||
package org.elasticsearch.indices.recovery;
|
||||
|
||||
import org.apache.lucene.index.CorruptIndexException;
|
||||
import org.apache.lucene.index.IndexFormatTooNewException;
|
||||
import org.apache.lucene.index.IndexFormatTooOldException;
|
||||
import org.apache.lucene.store.AlreadyClosedException;
|
||||
import org.apache.lucene.store.IndexOutput;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
|
@ -398,6 +401,14 @@ public class RecoveryTarget extends AbstractComponent {
|
|||
Store.MetadataSnapshot sourceMetaData = request.sourceMetaSnapshot();
|
||||
try {
|
||||
store.cleanupAndVerify("recovery CleanFilesRequestHandler", sourceMetaData);
|
||||
} catch (CorruptIndexException | IndexFormatTooNewException | IndexFormatTooOldException ex) {
|
||||
// this is a fatal exception at this stage.
|
||||
// this means we transferred files from the remote that have not be checksummed and they are
|
||||
// broken. We have to clean up this shard entirely, remove all files and bubble it up to the
|
||||
// source shard since this index might be broken there as well? The Source can handle this and checks
|
||||
// its content on disk if possible.
|
||||
store.deleteContent(); // clean up and delete all files
|
||||
throw new RecoveryFailedException(recoveryStatus.state(), "failed to clean after recovery", ex);
|
||||
} catch (Exception ex) {
|
||||
throw new RecoveryFailedException(recoveryStatus.state(), "failed to clean after recovery", ex);
|
||||
}
|
||||
|
|
|
@ -24,6 +24,7 @@ import com.google.common.collect.Lists;
|
|||
import org.apache.lucene.index.CorruptIndexException;
|
||||
import org.apache.lucene.store.IOContext;
|
||||
import org.apache.lucene.store.IndexInput;
|
||||
import org.apache.lucene.util.ArrayUtil;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.ExceptionsHelper;
|
||||
import org.elasticsearch.cluster.ClusterService;
|
||||
|
@ -41,6 +42,7 @@ import org.elasticsearch.common.compress.CompressorFactory;
|
|||
import org.elasticsearch.common.logging.ESLogger;
|
||||
import org.elasticsearch.common.unit.ByteSizeValue;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.util.ArrayUtils;
|
||||
import org.elasticsearch.common.util.CancellableThreads;
|
||||
import org.elasticsearch.common.util.CancellableThreads.Interruptable;
|
||||
import org.elasticsearch.common.util.concurrent.AbstractRunnable;
|
||||
|
@ -62,6 +64,9 @@ import org.elasticsearch.transport.RemoteTransportException;
|
|||
import org.elasticsearch.transport.TransportRequestOptions;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Comparator;
|
||||
import java.util.List;
|
||||
import java.util.concurrent.*;
|
||||
import java.util.concurrent.atomic.AtomicReference;
|
||||
|
@ -345,13 +350,49 @@ public final class ShardRecoveryHandler implements Engine.RecoveryHandler {
|
|||
// Once the files have been renamed, any other files that are not
|
||||
// related to this recovery (out of date segments, for example)
|
||||
// are deleted
|
||||
try {
|
||||
transportService.submitRequest(request.targetNode(), RecoveryTarget.Actions.CLEAN_FILES,
|
||||
new RecoveryCleanFilesRequest(request.recoveryId(), shard.shardId(), recoverySourceMetadata),
|
||||
TransportRequestOptions.options().withTimeout(recoverySettings.internalActionTimeout()),
|
||||
EmptyTransportResponseHandler.INSTANCE_SAME).txGet();
|
||||
} catch (RemoteTransportException remoteException) {
|
||||
final IOException corruptIndexException;
|
||||
// we realized that after the index was copied and we wanted to finalize the recovery
|
||||
// the index was corrupted:
|
||||
// - maybe due to a broken segments file on an empty index (transferred with no checksum)
|
||||
// - maybe due to old segments without checksums or length only checks
|
||||
if ((corruptIndexException = ExceptionsHelper.unwrapCorruption(remoteException)) != null) {
|
||||
try {
|
||||
final Store.MetadataSnapshot recoverySourceMetadata = store.getMetadata(snapshot);
|
||||
StoreFileMetaData[] metadata = Iterables.toArray(recoverySourceMetadata, StoreFileMetaData.class);
|
||||
ArrayUtil.timSort(metadata, new Comparator<StoreFileMetaData>() {
|
||||
@Override
|
||||
public int compare(StoreFileMetaData o1, StoreFileMetaData o2) {
|
||||
return Long.compare(o1.length(), o2.length()); // check small files first
|
||||
}
|
||||
});
|
||||
for (StoreFileMetaData md : metadata) {
|
||||
logger.debug("{} checking integrity for file {} after remove corruption exception", shard.shardId(), md);
|
||||
if (store.checkIntegrityNoException(md) == false) { // we are corrupted on the primary -- fail!
|
||||
logger.warn("{} Corrupted file detected {} checksum mismatch", shard.shardId(), md);
|
||||
throw corruptIndexException;
|
||||
}
|
||||
}
|
||||
} catch (IOException ex) {
|
||||
remoteException.addSuppressed(ex);
|
||||
throw remoteException;
|
||||
}
|
||||
// corruption has happened on the way to replica
|
||||
RemoteTransportException exception = new RemoteTransportException("File corruption occurred on recovery but checksums are ok", null);
|
||||
exception.addSuppressed(remoteException);
|
||||
logger.warn("{} Remote file corruption during finalization on node {}, recovering {}. local checksum OK",
|
||||
corruptIndexException, shard.shardId(), request.targetNode());
|
||||
} else {
|
||||
throw remoteException;
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
stopWatch.stop();
|
||||
logger.trace("[{}][{}] recovery [phase1] to {}: took [{}]", indexName, shardId, request.targetNode(), stopWatch.totalTime());
|
||||
response.phase1Time = stopWatch.totalTime().millis();
|
||||
|
|
|
@ -170,7 +170,7 @@ public class IndicesTTLService extends AbstractLifecycleComponent<IndicesTTLServ
|
|||
}
|
||||
|
||||
// should be optimized with the hasTTL flag
|
||||
FieldMappers ttlFieldMappers = indexService.mapperService().name(TTLFieldMapper.NAME);
|
||||
FieldMappers ttlFieldMappers = indexService.mapperService().fullName(TTLFieldMapper.NAME);
|
||||
if (ttlFieldMappers == null) {
|
||||
continue;
|
||||
}
|
||||
|
|
|
@ -56,6 +56,8 @@ public class RestGetIndexTemplateAction extends BaseRestHandler {
|
|||
|
||||
GetIndexTemplatesRequest getIndexTemplatesRequest = new GetIndexTemplatesRequest(names);
|
||||
getIndexTemplatesRequest.local(request.paramAsBoolean("local", getIndexTemplatesRequest.local()));
|
||||
getIndexTemplatesRequest.masterNodeTimeout(request.paramAsTime("master_timeout", getIndexTemplatesRequest.masterNodeTimeout()));
|
||||
|
||||
getIndexTemplatesRequest.listenerThreaded(false);
|
||||
|
||||
final boolean implicitAll = getIndexTemplatesRequest.names().length == 0;
|
||||
|
|
|
@ -46,6 +46,7 @@ public class RestHeadIndexTemplateAction extends BaseRestHandler {
|
|||
public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) {
|
||||
GetIndexTemplatesRequest getIndexTemplatesRequest = new GetIndexTemplatesRequest(request.param("name"));
|
||||
getIndexTemplatesRequest.local(request.paramAsBoolean("local", getIndexTemplatesRequest.local()));
|
||||
getIndexTemplatesRequest.masterNodeTimeout(request.paramAsTime("master_timeout", getIndexTemplatesRequest.masterNodeTimeout()));
|
||||
client.admin().indices().getTemplates(getIndexTemplatesRequest, new RestResponseListener<GetIndexTemplatesResponse>(channel) {
|
||||
@Override
|
||||
public RestResponse buildResponse(GetIndexTemplatesResponse getIndexTemplatesResponse) {
|
||||
|
|
|
@ -115,7 +115,7 @@ public class HistogramParser implements Aggregator.Parser {
|
|||
}
|
||||
}
|
||||
|
||||
if (interval < 0) {
|
||||
if (interval < 1) {
|
||||
throw new SearchParseException(context, "Missing required field [interval] for histogram aggregation [" + aggregationName + "]");
|
||||
}
|
||||
|
||||
|
|
|
@ -134,6 +134,10 @@ public abstract class BaseDateTime
|
|||
super();
|
||||
iChronology = checkChronology(chronology);
|
||||
iMillis = checkInstant(instant, iChronology);
|
||||
// validate not over maximum
|
||||
if (iChronology.year().isSupported()) {
|
||||
iChronology.year().set(iMillis, iChronology.year().get(iMillis));
|
||||
}
|
||||
}
|
||||
|
||||
//-----------------------------------------------------------------------
|
||||
|
|
|
@ -41,6 +41,7 @@ export ES_HEAP_SIZE
|
|||
export ES_HEAP_NEWSIZE
|
||||
export ES_DIRECT_SIZE
|
||||
export ES_JAVA_OPTS
|
||||
export ES_GC_LOG_FILE
|
||||
export JAVA_HOME
|
||||
|
||||
lockfile=/var/lock/subsys/$prog
|
||||
|
@ -84,6 +85,8 @@ start() {
|
|||
mkdir -p "$WORK_DIR"
|
||||
chown "$ES_USER":"$ES_GROUP" "$WORK_DIR"
|
||||
fi
|
||||
export ES_GC_LOG_FILE
|
||||
|
||||
echo -n $"Starting $prog: "
|
||||
# if not running, start it up here, usually something like "daemon $exec"
|
||||
daemon --user $ES_USER --pidfile $pidfile $exec -p $pidfile -d -Des.default.path.home=$ES_HOME -Des.default.path.logs=$LOG_DIR -Des.default.path.data=$DATA_DIR -Des.default.path.work=$WORK_DIR -Des.default.path.conf=$CONF_DIR
|
||||
|
|
|
@ -44,3 +44,6 @@ ES_USER=elasticsearch
|
|||
|
||||
# Configure restart on package upgrade (true, every other setting will lead to not restarting)
|
||||
#RESTART_ON_UPGRADE=true
|
||||
|
||||
# Path to the GC log file
|
||||
#ES_GC_LOG_FILE=/var/log/elasticsearch/gc.log
|
||||
|
|
|
@ -19,11 +19,15 @@
|
|||
|
||||
package org.elasticsearch.action.admin.indices.segments;
|
||||
|
||||
import org.elasticsearch.action.ListenableActionFuture;
|
||||
import org.elasticsearch.action.support.IndicesOptions;
|
||||
import org.elasticsearch.common.settings.ImmutableSettings;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.index.engine.Segment;
|
||||
import org.elasticsearch.indices.IndexClosedException;
|
||||
import org.elasticsearch.test.ElasticsearchSingleNodeTest;
|
||||
import org.junit.Before;
|
||||
import org.junit.Test;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
|
@ -56,4 +60,33 @@ public class IndicesSegmentsRequestTests extends ElasticsearchSingleNodeTest {
|
|||
List<Segment> segments = rsp.getIndices().get("test").iterator().next().getShards()[0].getSegments();
|
||||
assertNotNull(segments.get(0).ramTree);
|
||||
}
|
||||
|
||||
/**
|
||||
* with the default IndicesOptions inherited from BroadcastOperationRequest this will raise an exception
|
||||
*/
|
||||
@Test(expected=org.elasticsearch.indices.IndexClosedException.class)
|
||||
public void testRequestOnClosedIndex() {
|
||||
client().admin().indices().prepareClose("test").get();
|
||||
client().admin().indices().prepareSegments("test").get();
|
||||
}
|
||||
|
||||
/**
|
||||
* setting the "ignoreUnavailable" option prevents IndexClosedException
|
||||
*/
|
||||
public void testRequestOnClosedIndexIgnoreUnavailable() {
|
||||
client().admin().indices().prepareClose("test").get();
|
||||
IndicesOptions defaultOptions = new IndicesSegmentsRequest().indicesOptions();
|
||||
IndicesOptions testOptions = IndicesOptions.fromOptions(true, true, true, false, defaultOptions);
|
||||
IndicesSegmentResponse rsp = client().admin().indices().prepareSegments("test").setIndicesOptions(testOptions).get();
|
||||
assertEquals(0, rsp.getIndices().size());
|
||||
}
|
||||
|
||||
/**
|
||||
* by default IndicesOptions setting IndicesSegmentsRequest should not throw exception when no index present
|
||||
*/
|
||||
public void testAllowNoIndex() {
|
||||
client().admin().indices().prepareDelete("test").get();
|
||||
IndicesSegmentResponse rsp = client().admin().indices().prepareSegments().get();
|
||||
assertEquals(0, rsp.getIndices().size());
|
||||
}
|
||||
}
|
||||
|
|
|
@ -102,7 +102,7 @@ public class DocumentActionsTests extends ElasticsearchIntegrationTest {
|
|||
getResult = client().prepareGet("test", "type1", "1").setOperationThreaded(false).execute().actionGet();
|
||||
assertThat(getResult.getIndex(), equalTo(getConcreteIndexName()));
|
||||
assertThat("cycle #" + i, getResult.getSourceAsString(), equalTo(source("1", "test").string()));
|
||||
assertThat("cycle(map) #" + i, (String) ((Map) getResult.getSourceAsMap().get("type1")).get("name"), equalTo("test"));
|
||||
assertThat("cycle(map) #" + i, (String) getResult.getSourceAsMap().get("name"), equalTo("test"));
|
||||
getResult = client().get(getRequest("test").type("type1").id("1").operationThreaded(true)).actionGet();
|
||||
assertThat("cycle #" + i, getResult.getSourceAsString(), equalTo(source("1", "test").string()));
|
||||
assertThat(getResult.getIndex(), equalTo(getConcreteIndexName()));
|
||||
|
@ -110,11 +110,11 @@ public class DocumentActionsTests extends ElasticsearchIntegrationTest {
|
|||
|
||||
logger.info("Get [type1/1] with script");
|
||||
for (int i = 0; i < 5; i++) {
|
||||
getResult = client().prepareGet("test", "type1", "1").setFields("type1.name").execute().actionGet();
|
||||
getResult = client().prepareGet("test", "type1", "1").setFields("name").execute().actionGet();
|
||||
assertThat(getResult.getIndex(), equalTo(getConcreteIndexName()));
|
||||
assertThat(getResult.isExists(), equalTo(true));
|
||||
assertThat(getResult.getSourceAsBytes(), nullValue());
|
||||
assertThat(getResult.getField("type1.name").getValues().get(0).toString(), equalTo("test"));
|
||||
assertThat(getResult.getField("name").getValues().get(0).toString(), equalTo("test"));
|
||||
}
|
||||
|
||||
logger.info("Get [type1/2] (should be empty)");
|
||||
|
@ -321,6 +321,6 @@ public class DocumentActionsTests extends ElasticsearchIntegrationTest {
|
|||
}
|
||||
|
||||
private XContentBuilder source(String id, String nameValue) throws IOException {
|
||||
return XContentFactory.jsonBuilder().startObject().startObject("type1").field("id", id).field("name", nameValue).endObject().endObject();
|
||||
return XContentFactory.jsonBuilder().startObject().field("id", id).field("name", nameValue).endObject();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -53,8 +53,7 @@ public class SimpleMapperTests extends ElasticsearchSingleNodeTest {
|
|||
BytesReference json = new BytesArray(copyToBytesFromClasspath("/org/elasticsearch/index/mapper/simple/test1.json"));
|
||||
Document doc = docMapper.parse("person", "1", json).rootDoc();
|
||||
|
||||
assertThat(doc.get(docMapper.mappers().name("first").mapper().names().indexName()), equalTo("shay"));
|
||||
assertThat(docMapper.mappers().name("first").mapper().names().fullName(), equalTo("name.first"));
|
||||
assertThat(doc.get(docMapper.mappers().name("name.first").mapper().names().indexName()), equalTo("shay"));
|
||||
// System.out.println("Document: " + doc);
|
||||
// System.out.println("Json: " + docMapper.sourceMapper().value(doc));
|
||||
doc = docMapper.parse(json).rootDoc();
|
||||
|
@ -74,7 +73,7 @@ public class SimpleMapperTests extends ElasticsearchSingleNodeTest {
|
|||
BytesReference json = new BytesArray(copyToBytesFromClasspath("/org/elasticsearch/index/mapper/simple/test1.json"));
|
||||
Document doc = builtDocMapper.parse(json).rootDoc();
|
||||
assertThat(doc.get(docMapper.uidMapper().names().indexName()), equalTo(Uid.createUid("person", "1")));
|
||||
assertThat(doc.get(docMapper.mappers().name("first").mapper().names().indexName()), equalTo("shay"));
|
||||
assertThat(doc.get(docMapper.mappers().name("name.first").mapper().names().indexName()), equalTo("shay"));
|
||||
// System.out.println("Document: " + doc);
|
||||
// System.out.println("Json: " + docMapper.sourceMapper().value(doc));
|
||||
}
|
||||
|
@ -89,7 +88,7 @@ public class SimpleMapperTests extends ElasticsearchSingleNodeTest {
|
|||
BytesReference json = new BytesArray(copyToBytesFromClasspath("/org/elasticsearch/index/mapper/simple/test1.json"));
|
||||
Document doc = docMapper.parse(json).rootDoc();
|
||||
assertThat(doc.get(docMapper.uidMapper().names().indexName()), equalTo(Uid.createUid("person", "1")));
|
||||
assertThat(doc.get(docMapper.mappers().name("first").mapper().names().indexName()), equalTo("shay"));
|
||||
assertThat(doc.get(docMapper.mappers().name("name.first").mapper().names().indexName()), equalTo("shay"));
|
||||
// System.out.println("Document: " + doc);
|
||||
// System.out.println("Json: " + docMapper.sourceMapper().value(doc));
|
||||
}
|
||||
|
@ -101,7 +100,7 @@ public class SimpleMapperTests extends ElasticsearchSingleNodeTest {
|
|||
BytesReference json = new BytesArray(copyToBytesFromClasspath("/org/elasticsearch/index/mapper/simple/test1-notype-noid.json"));
|
||||
Document doc = docMapper.parse("person", "1", json).rootDoc();
|
||||
assertThat(doc.get(docMapper.uidMapper().names().indexName()), equalTo(Uid.createUid("person", "1")));
|
||||
assertThat(doc.get(docMapper.mappers().name("first").mapper().names().indexName()), equalTo("shay"));
|
||||
assertThat(doc.get(docMapper.mappers().name("name.first").mapper().names().indexName()), equalTo("shay"));
|
||||
// System.out.println("Document: " + doc);
|
||||
// System.out.println("Json: " + docMapper.sourceMapper().value(doc));
|
||||
}
|
||||
|
|
|
@ -20,6 +20,7 @@ package org.elasticsearch.index.store;
|
|||
|
||||
import com.carrotsearch.ant.tasks.junit4.dependencies.com.google.common.collect.Lists;
|
||||
import com.carrotsearch.randomizedtesting.LifecycleScope;
|
||||
import com.carrotsearch.randomizedtesting.annotations.Repeat;
|
||||
import com.carrotsearch.randomizedtesting.generators.RandomPicks;
|
||||
import com.google.common.base.Charsets;
|
||||
import com.google.common.base.Predicate;
|
||||
|
@ -82,6 +83,7 @@ import java.util.concurrent.CopyOnWriteArrayList;
|
|||
import java.util.concurrent.CountDownLatch;
|
||||
import java.util.concurrent.ExecutionException;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
|
||||
import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
|
||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.*;
|
||||
|
@ -298,6 +300,65 @@ public class CorruptedFileTest extends ElasticsearchIntegrationTest {
|
|||
assertThat(corruptedFile, notNullValue());
|
||||
}
|
||||
|
||||
/**
|
||||
* This test triggers a corrupt index exception during finalization size if an empty commit point is transferred
|
||||
* during recovery we don't know the version of the segments_N file because it has no segments we can take it from.
|
||||
* This simulates recoveries from old indices or even without checksums and makes sure if we fail during finalization
|
||||
* we also check if the primary is ok. Without the relevant checks this test fails with a RED cluster
|
||||
*/
|
||||
public void testCorruptionOnNetworkLayerFinalizingRecovery() throws ExecutionException, InterruptedException, IOException {
|
||||
internalCluster().ensureAtLeastNumDataNodes(2);
|
||||
NodesStatsResponse nodeStats = client().admin().cluster().prepareNodesStats().get();
|
||||
List<NodeStats> dataNodeStats = new ArrayList<>();
|
||||
for (NodeStats stat : nodeStats.getNodes()) {
|
||||
if (stat.getNode().isDataNode()) {
|
||||
dataNodeStats.add(stat);
|
||||
}
|
||||
}
|
||||
|
||||
assertThat(dataNodeStats.size(), greaterThanOrEqualTo(2));
|
||||
Collections.shuffle(dataNodeStats, getRandom());
|
||||
NodeStats primariesNode = dataNodeStats.get(0);
|
||||
NodeStats unluckyNode = dataNodeStats.get(1);
|
||||
assertAcked(prepareCreate("test").setSettings(ImmutableSettings.builder()
|
||||
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, "0")
|
||||
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1)
|
||||
.put(EngineConfig.INDEX_FAIL_ON_CORRUPTION_SETTING, true)
|
||||
.put("index.routing.allocation.include._name", primariesNode.getNode().name())
|
||||
.put(EnableAllocationDecider.INDEX_ROUTING_REBALANCE_ENABLE, EnableAllocationDecider.Rebalance.NONE)
|
||||
|
||||
));
|
||||
ensureGreen(); // allocated with empty commit
|
||||
final AtomicBoolean corrupt = new AtomicBoolean(true);
|
||||
final CountDownLatch hasCorrupted = new CountDownLatch(1);
|
||||
for (NodeStats dataNode : dataNodeStats) {
|
||||
MockTransportService mockTransportService = ((MockTransportService) internalCluster().getInstance(TransportService.class, dataNode.getNode().name()));
|
||||
mockTransportService.addDelegate(internalCluster().getInstance(Discovery.class, unluckyNode.getNode().name()).localNode(), new MockTransportService.DelegateTransport(mockTransportService.original()) {
|
||||
|
||||
@Override
|
||||
public void sendRequest(DiscoveryNode node, long requestId, String action, TransportRequest request, TransportRequestOptions options) throws IOException, TransportException {
|
||||
if (corrupt.get() && action.equals(RecoveryTarget.Actions.FILE_CHUNK)) {
|
||||
RecoveryFileChunkRequest req = (RecoveryFileChunkRequest) request;
|
||||
byte[] array = req.content().array();
|
||||
int i = randomIntBetween(0, req.content().length() - 1);
|
||||
array[i] = (byte) ~array[i]; // flip one byte in the content
|
||||
hasCorrupted.countDown();
|
||||
}
|
||||
super.sendRequest(node, requestId, action, request, options);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
Settings build = ImmutableSettings.builder()
|
||||
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, "1")
|
||||
.put("index.routing.allocation.include._name", primariesNode.getNode().name() + "," + unluckyNode.getNode().name()).build();
|
||||
client().admin().indices().prepareUpdateSettings("test").setSettings(build).get();
|
||||
client().admin().cluster().prepareReroute().get();
|
||||
hasCorrupted.await();
|
||||
corrupt.set(false);
|
||||
ensureGreen();
|
||||
}
|
||||
|
||||
/**
|
||||
* Tests corruption that happens on the network layer and that the primary does not get affected by corruption that happens on the way
|
||||
* to the replica. The file on disk stays uncorrupted
|
||||
|
|
|
@ -362,7 +362,7 @@ public class IndicesOptionsIntegrationTests extends ElasticsearchIntegrationTest
|
|||
verify(count(indices), false);
|
||||
verify(clearCache(indices), false);
|
||||
verify(_flush(indices),false);
|
||||
verify(segments(indices), true);
|
||||
verify(segments(indices), false);
|
||||
verify(stats(indices), false);
|
||||
verify(optimize(indices), false);
|
||||
verify(refresh(indices), false);
|
||||
|
@ -437,7 +437,7 @@ public class IndicesOptionsIntegrationTests extends ElasticsearchIntegrationTest
|
|||
verify(count(indices), false, 1);
|
||||
verify(clearCache(indices), false);
|
||||
verify(_flush(indices),false);
|
||||
verify(segments(indices), true);
|
||||
verify(segments(indices), false);
|
||||
verify(stats(indices), false);
|
||||
verify(optimize(indices), false);
|
||||
verify(refresh(indices), false);
|
||||
|
|
|
@ -33,12 +33,8 @@ import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
|
|||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
|
||||
import static org.hamcrest.Matchers.*;
|
||||
|
||||
/**
|
||||
*
|
||||
*/
|
||||
public class SimpleGetFieldMappingsTests extends ElasticsearchIntegrationTest {
|
||||
|
||||
@Test
|
||||
public void getMappingsWhereThereAreNone() {
|
||||
createIndex("index");
|
||||
ensureYellow();
|
||||
|
@ -56,7 +52,6 @@ public class SimpleGetFieldMappingsTests extends ElasticsearchIntegrationTest {
|
|||
.endObject().endObject().endObject();
|
||||
}
|
||||
|
||||
@Test
|
||||
public void simpleGetFieldMappings() throws Exception {
|
||||
|
||||
assertAcked(prepareCreate("indexa")
|
||||
|
@ -80,55 +75,54 @@ public class SimpleGetFieldMappingsTests extends ElasticsearchIntegrationTest {
|
|||
assertThat(response.fieldMappings("indexb", "typeB", "field1"), nullValue());
|
||||
|
||||
// Get mappings by name
|
||||
response = client().admin().indices().prepareGetFieldMappings("indexa").setTypes("typeA").setFields("field1", "subfield").get();
|
||||
response = client().admin().indices().prepareGetFieldMappings("indexa").setTypes("typeA").setFields("field1", "obj.subfield").get();
|
||||
assertThat(response.fieldMappings("indexa", "typeA", "field1").fullName(), equalTo("field1"));
|
||||
assertThat(response.fieldMappings("indexa", "typeA", "field1").sourceAsMap(), hasKey("field1"));
|
||||
assertThat(response.fieldMappings("indexa", "typeA", "subfield").fullName(), equalTo("obj.subfield"));
|
||||
assertThat(response.fieldMappings("indexa", "typeA", "subfield").sourceAsMap(), hasKey("subfield"));
|
||||
assertThat(response.fieldMappings("indexa", "typeA", "obj.subfield").fullName(), equalTo("obj.subfield"));
|
||||
assertThat(response.fieldMappings("indexa", "typeA", "obj.subfield").sourceAsMap(), hasKey("subfield"));
|
||||
assertThat(response.fieldMappings("indexa", "typeB", "field1"), nullValue());
|
||||
assertThat(response.fieldMappings("indexb", "typeB", "field1"), nullValue());
|
||||
|
||||
// get mappings by name across multiple indices
|
||||
response = client().admin().indices().prepareGetFieldMappings().setTypes("typeA").setFields("subfield").get();
|
||||
assertThat(response.fieldMappings("indexa", "typeA", "subfield").fullName(), equalTo("obj.subfield"));
|
||||
assertThat(response.fieldMappings("indexa", "typeA", "subfield").sourceAsMap(), hasKey("subfield"));
|
||||
assertThat(response.fieldMappings("indexa", "typeB", "subfield"), nullValue());
|
||||
assertThat(response.fieldMappings("indexb", "typeA", "subfield").fullName(), equalTo("obj.subfield"));
|
||||
assertThat(response.fieldMappings("indexb", "typeA", "subfield").sourceAsMap(), hasKey("subfield"));
|
||||
assertThat(response.fieldMappings("indexb", "typeB", "subfield"), nullValue());
|
||||
response = client().admin().indices().prepareGetFieldMappings().setTypes("typeA").setFields("obj.subfield").get();
|
||||
assertThat(response.fieldMappings("indexa", "typeA", "obj.subfield").fullName(), equalTo("obj.subfield"));
|
||||
assertThat(response.fieldMappings("indexa", "typeA", "obj.subfield").sourceAsMap(), hasKey("subfield"));
|
||||
assertThat(response.fieldMappings("indexa", "typeB", "obj.subfield"), nullValue());
|
||||
assertThat(response.fieldMappings("indexb", "typeA", "obj.subfield").fullName(), equalTo("obj.subfield"));
|
||||
assertThat(response.fieldMappings("indexb", "typeA", "obj.subfield").sourceAsMap(), hasKey("subfield"));
|
||||
assertThat(response.fieldMappings("indexb", "typeB", "obj.subfield"), nullValue());
|
||||
|
||||
// get mappings by name across multiple types
|
||||
response = client().admin().indices().prepareGetFieldMappings("indexa").setFields("subfield").get();
|
||||
assertThat(response.fieldMappings("indexa", "typeA", "subfield").fullName(), equalTo("obj.subfield"));
|
||||
assertThat(response.fieldMappings("indexa", "typeA", "subfield").sourceAsMap(), hasKey("subfield"));
|
||||
response = client().admin().indices().prepareGetFieldMappings("indexa").setFields("obj.subfield").get();
|
||||
assertThat(response.fieldMappings("indexa", "typeA", "obj.subfield").fullName(), equalTo("obj.subfield"));
|
||||
assertThat(response.fieldMappings("indexa", "typeA", "obj.subfield").sourceAsMap(), hasKey("subfield"));
|
||||
assertThat(response.fieldMappings("indexa", "typeA", "field1"), nullValue());
|
||||
assertThat(response.fieldMappings("indexa", "typeB", "subfield").fullName(), equalTo("obj.subfield"));
|
||||
assertThat(response.fieldMappings("indexa", "typeB", "subfield").sourceAsMap(), hasKey("subfield"));
|
||||
assertThat(response.fieldMappings("indexa", "typeB", "obj.subfield").fullName(), equalTo("obj.subfield"));
|
||||
assertThat(response.fieldMappings("indexa", "typeB", "obj.subfield").sourceAsMap(), hasKey("subfield"));
|
||||
assertThat(response.fieldMappings("indexa", "typeB", "field1"), nullValue());
|
||||
assertThat(response.fieldMappings("indexb", "typeA", "subfield"), nullValue());
|
||||
assertThat(response.fieldMappings("indexb", "typeA", "obj.subfield"), nullValue());
|
||||
assertThat(response.fieldMappings("indexb", "typeA", "field1"), nullValue());
|
||||
assertThat(response.fieldMappings("indexb", "typeB", "subfield"), nullValue());
|
||||
assertThat(response.fieldMappings("indexb", "typeB", "obj.subfield"), nullValue());
|
||||
assertThat(response.fieldMappings("indexb", "typeB", "field1"), nullValue());
|
||||
|
||||
// get mappings by name across multiple types & indices
|
||||
response = client().admin().indices().prepareGetFieldMappings().setFields("subfield").get();
|
||||
assertThat(response.fieldMappings("indexa", "typeA", "subfield").fullName(), equalTo("obj.subfield"));
|
||||
assertThat(response.fieldMappings("indexa", "typeA", "subfield").sourceAsMap(), hasKey("subfield"));
|
||||
response = client().admin().indices().prepareGetFieldMappings().setFields("obj.subfield").get();
|
||||
assertThat(response.fieldMappings("indexa", "typeA", "obj.subfield").fullName(), equalTo("obj.subfield"));
|
||||
assertThat(response.fieldMappings("indexa", "typeA", "obj.subfield").sourceAsMap(), hasKey("subfield"));
|
||||
assertThat(response.fieldMappings("indexa", "typeA", "field1"), nullValue());
|
||||
assertThat(response.fieldMappings("indexa", "typeB", "subfield").fullName(), equalTo("obj.subfield"));
|
||||
assertThat(response.fieldMappings("indexa", "typeB", "subfield").sourceAsMap(), hasKey("subfield"));
|
||||
assertThat(response.fieldMappings("indexa", "typeB", "obj.subfield").fullName(), equalTo("obj.subfield"));
|
||||
assertThat(response.fieldMappings("indexa", "typeB", "obj.subfield").sourceAsMap(), hasKey("subfield"));
|
||||
assertThat(response.fieldMappings("indexa", "typeB", "field1"), nullValue());
|
||||
assertThat(response.fieldMappings("indexb", "typeA", "subfield").fullName(), equalTo("obj.subfield"));
|
||||
assertThat(response.fieldMappings("indexb", "typeA", "subfield").sourceAsMap(), hasKey("subfield"));
|
||||
assertThat(response.fieldMappings("indexb", "typeA", "obj.subfield").fullName(), equalTo("obj.subfield"));
|
||||
assertThat(response.fieldMappings("indexb", "typeA", "obj.subfield").sourceAsMap(), hasKey("subfield"));
|
||||
assertThat(response.fieldMappings("indexb", "typeB", "field1"), nullValue());
|
||||
assertThat(response.fieldMappings("indexb", "typeB", "subfield").fullName(), equalTo("obj.subfield"));
|
||||
assertThat(response.fieldMappings("indexb", "typeB", "subfield").sourceAsMap(), hasKey("subfield"));
|
||||
assertThat(response.fieldMappings("indexb", "typeB", "obj.subfield").fullName(), equalTo("obj.subfield"));
|
||||
assertThat(response.fieldMappings("indexb", "typeB", "obj.subfield").sourceAsMap(), hasKey("subfield"));
|
||||
assertThat(response.fieldMappings("indexb", "typeB", "field1"), nullValue());
|
||||
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
@Test
|
||||
public void simpleGetFieldMappingsWithDefaults() throws Exception {
|
||||
assertAcked(prepareCreate("test").addMapping("type", getMappingForType("type")));
|
||||
|
||||
|
@ -136,20 +130,19 @@ public class SimpleGetFieldMappingsTests extends ElasticsearchIntegrationTest {
|
|||
ensureYellow();
|
||||
waitForConcreteMappingsOnAll("test", "type", "num"); // for num, we need to wait...
|
||||
|
||||
GetFieldMappingsResponse response = client().admin().indices().prepareGetFieldMappings().setFields("num", "field1", "subfield").includeDefaults(true).get();
|
||||
GetFieldMappingsResponse response = client().admin().indices().prepareGetFieldMappings().setFields("num", "field1", "obj.subfield").includeDefaults(true).get();
|
||||
|
||||
assertThat((Map<String, Object>) response.fieldMappings("test", "type", "num").sourceAsMap().get("num"), hasEntry("index", (Object) "not_analyzed"));
|
||||
assertThat((Map<String, Object>) response.fieldMappings("test", "type", "num").sourceAsMap().get("num"), hasEntry("type", (Object) "long"));
|
||||
assertThat((Map<String, Object>) response.fieldMappings("test", "type", "field1").sourceAsMap().get("field1"), hasEntry("index", (Object) "analyzed"));
|
||||
assertThat((Map<String, Object>) response.fieldMappings("test", "type", "field1").sourceAsMap().get("field1"), hasEntry("type", (Object) "string"));
|
||||
assertThat((Map<String, Object>) response.fieldMappings("test", "type", "subfield").sourceAsMap().get("subfield"), hasEntry("index", (Object) "not_analyzed"));
|
||||
assertThat((Map<String, Object>) response.fieldMappings("test", "type", "subfield").sourceAsMap().get("subfield"), hasEntry("type", (Object) "string"));
|
||||
assertThat((Map<String, Object>) response.fieldMappings("test", "type", "obj.subfield").sourceAsMap().get("subfield"), hasEntry("index", (Object) "not_analyzed"));
|
||||
assertThat((Map<String, Object>) response.fieldMappings("test", "type", "obj.subfield").sourceAsMap().get("subfield"), hasEntry("type", (Object) "string"));
|
||||
|
||||
|
||||
}
|
||||
|
||||
//fix #6552
|
||||
@Test
|
||||
public void simpleGetFieldMappingsWithPretty() throws Exception {
|
||||
assertAcked(prepareCreate("index").addMapping("type", getMappingForType("type")));
|
||||
Map<String, String> params = Maps.newHashMap();
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
|
||||
package org.elasticsearch.nested;
|
||||
|
||||
import com.carrotsearch.randomizedtesting.annotations.Seed;
|
||||
import org.apache.lucene.search.Explanation;
|
||||
import org.elasticsearch.action.admin.cluster.health.ClusterHealthStatus;
|
||||
import org.elasticsearch.action.admin.cluster.stats.ClusterStatsResponse;
|
||||
|
@ -101,11 +102,11 @@ public class SimpleNestedTests extends ElasticsearchIntegrationTest {
|
|||
assertThat(searchResponse.getHits().totalHits(), equalTo(0l));
|
||||
|
||||
// now, do a nested query
|
||||
searchResponse = client().prepareSearch("test").setQuery(nestedQuery("nested1", termQuery("n_field1", "n_value1_1"))).get();
|
||||
searchResponse = client().prepareSearch("test").setQuery(nestedQuery("nested1", termQuery("nested1.n_field1", "n_value1_1"))).get();
|
||||
assertNoFailures(searchResponse);
|
||||
assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
|
||||
|
||||
searchResponse = client().prepareSearch("test").setQuery(nestedQuery("nested1", termQuery("n_field1", "n_value1_1"))).setSearchType(SearchType.DFS_QUERY_THEN_FETCH).get();
|
||||
searchResponse = client().prepareSearch("test").setQuery(nestedQuery("nested1", termQuery("nested1.n_field1", "n_value1_1"))).setSearchType(SearchType.DFS_QUERY_THEN_FETCH).get();
|
||||
assertNoFailures(searchResponse);
|
||||
assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
|
||||
|
||||
|
@ -130,19 +131,19 @@ public class SimpleNestedTests extends ElasticsearchIntegrationTest {
|
|||
assertDocumentCount("test", 6);
|
||||
|
||||
searchResponse = client().prepareSearch("test").setQuery(nestedQuery("nested1",
|
||||
boolQuery().must(termQuery("n_field1", "n_value1_1")).must(termQuery("n_field2", "n_value2_1")))).execute().actionGet();
|
||||
boolQuery().must(termQuery("nested1.n_field1", "n_value1_1")).must(termQuery("nested1.n_field2", "n_value2_1")))).execute().actionGet();
|
||||
assertNoFailures(searchResponse);
|
||||
assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
|
||||
|
||||
// filter
|
||||
searchResponse = client().prepareSearch("test").setQuery(filteredQuery(matchAllQuery(), nestedFilter("nested1",
|
||||
boolQuery().must(termQuery("n_field1", "n_value1_1")).must(termQuery("n_field2", "n_value2_1"))))).execute().actionGet();
|
||||
boolQuery().must(termQuery("nested1.n_field1", "n_value1_1")).must(termQuery("nested1.n_field2", "n_value2_1"))))).execute().actionGet();
|
||||
assertNoFailures(searchResponse);
|
||||
assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
|
||||
|
||||
// check with type prefix
|
||||
searchResponse = client().prepareSearch("test").setQuery(nestedQuery("nested1",
|
||||
boolQuery().must(termQuery("n_field1", "n_value1_1")).must(termQuery("n_field2", "n_value2_1")))).execute().actionGet();
|
||||
boolQuery().must(termQuery("nested1.n_field1", "n_value1_1")).must(termQuery("nested1.n_field2", "n_value2_1")))).execute().actionGet();
|
||||
assertNoFailures(searchResponse);
|
||||
assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
|
||||
|
||||
|
@ -154,11 +155,11 @@ public class SimpleNestedTests extends ElasticsearchIntegrationTest {
|
|||
flush();
|
||||
assertDocumentCount("test", 3);
|
||||
|
||||
searchResponse = client().prepareSearch("test").setQuery(nestedQuery("nested1", termQuery("n_field1", "n_value1_1"))).execute().actionGet();
|
||||
searchResponse = client().prepareSearch("test").setQuery(nestedQuery("nested1", termQuery("nested1.n_field1", "n_value1_1"))).execute().actionGet();
|
||||
assertNoFailures(searchResponse);
|
||||
assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
|
||||
|
||||
searchResponse = client().prepareSearch("test").setTypes("type1", "type2").setQuery(nestedQuery("nested1", termQuery("n_field1", "n_value1_1"))).execute().actionGet();
|
||||
searchResponse = client().prepareSearch("test").setTypes("type1", "type2").setQuery(nestedQuery("nested1", termQuery("nested1.n_field1", "n_value1_1"))).execute().actionGet();
|
||||
assertNoFailures(searchResponse);
|
||||
assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
|
||||
}
|
||||
|
@ -1211,7 +1212,7 @@ public class SimpleNestedTests extends ElasticsearchIntegrationTest {
|
|||
|
||||
// only when querying with nested the fixed bitsets are loaded
|
||||
SearchResponse searchResponse = client().prepareSearch("test")
|
||||
.setQuery(nestedQuery("array1", termQuery("field1", "value1")))
|
||||
.setQuery(nestedQuery("array1", termQuery("array1.field1", "value1")))
|
||||
.get();
|
||||
assertNoFailures(searchResponse);
|
||||
assertThat(searchResponse.getHits().totalHits(), equalTo(5l));
|
||||
|
|
|
@ -1748,10 +1748,10 @@ public class PercolatorTests extends ElasticsearchIntegrationTest {
|
|||
// The previous percolate request introduced the custom.color field, so now we register the query again
|
||||
// and the field name `color` will be resolved to `custom.color` field in mapping via smart field mapping resolving.
|
||||
client().prepareIndex("idx", PercolatorService.TYPE_NAME, "1")
|
||||
.setSource(jsonBuilder().startObject().field("query", QueryBuilders.queryStringQuery("color:red")).endObject())
|
||||
.setSource(jsonBuilder().startObject().field("query", QueryBuilders.queryStringQuery("custom.color:red")).endObject())
|
||||
.get();
|
||||
client().prepareIndex("idx", PercolatorService.TYPE_NAME, "2")
|
||||
.setSource(jsonBuilder().startObject().field("query", QueryBuilders.queryStringQuery("color:blue")).field("type", "type").endObject())
|
||||
.setSource(jsonBuilder().startObject().field("query", QueryBuilders.queryStringQuery("custom.color:blue")).field("type", "type").endObject())
|
||||
.get();
|
||||
|
||||
// The second request will yield a match, since the query during the proper field during parsing.
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
package org.elasticsearch.search.aggregations.bucket;
|
||||
|
||||
import org.elasticsearch.action.index.IndexRequestBuilder;
|
||||
import org.elasticsearch.action.search.SearchPhaseExecutionException;
|
||||
import org.elasticsearch.action.search.SearchResponse;
|
||||
import org.elasticsearch.common.joda.Joda;
|
||||
import org.elasticsearch.common.settings.ImmutableSettings;
|
||||
|
@ -43,6 +44,7 @@ import java.io.IOException;
|
|||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
|
||||
import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
|
||||
|
@ -52,8 +54,7 @@ import static org.elasticsearch.search.aggregations.AggregationBuilders.max;
|
|||
import static org.elasticsearch.search.aggregations.AggregationBuilders.stats;
|
||||
import static org.elasticsearch.search.aggregations.AggregationBuilders.sum;
|
||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.hamcrest.Matchers.is;
|
||||
import static org.hamcrest.Matchers.*;
|
||||
import static org.hamcrest.core.IsNull.notNullValue;
|
||||
|
||||
/**
|
||||
|
@ -1348,4 +1349,18 @@ public class DateHistogramTests extends ElasticsearchIntegrationTest {
|
|||
assertThat(((DateTime) bucket.getKey()), equalTo(key));
|
||||
assertThat(bucket.getDocCount(), equalTo(3l));
|
||||
}
|
||||
|
||||
/**
|
||||
* see issue #9634, negative interval in date_histogram should raise exception
|
||||
*/
|
||||
public void testExeptionOnNegativerInterval() {
|
||||
try {
|
||||
client().prepareSearch("idx")
|
||||
.addAggregation(dateHistogram("histo").field("date").interval(-TimeUnit.DAYS.toMillis(1)).minDocCount(0)).execute()
|
||||
.actionGet();
|
||||
fail();
|
||||
} catch (SearchPhaseExecutionException e) {
|
||||
assertThat(e.getMessage(), containsString("IllegalArgumentException"));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -20,7 +20,9 @@ package org.elasticsearch.search.aggregations.bucket;
|
|||
|
||||
import com.carrotsearch.hppc.LongOpenHashSet;
|
||||
|
||||
import org.apache.tools.ant.filters.TokenFilter.ContainsString;
|
||||
import org.elasticsearch.action.index.IndexRequestBuilder;
|
||||
import org.elasticsearch.action.search.SearchPhaseExecutionException;
|
||||
import org.elasticsearch.action.search.SearchResponse;
|
||||
import org.elasticsearch.search.aggregations.Aggregator.SubAggCollectionMode;
|
||||
import org.elasticsearch.search.aggregations.bucket.filter.Filter;
|
||||
|
@ -49,6 +51,7 @@ import static org.elasticsearch.search.aggregations.AggregationBuilders.sum;
|
|||
import static org.elasticsearch.search.aggregations.AggregationBuilders.terms;
|
||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
|
||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse;
|
||||
import static org.hamcrest.Matchers.containsString;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.hamcrest.Matchers.greaterThanOrEqualTo;
|
||||
import static org.hamcrest.Matchers.is;
|
||||
|
@ -992,4 +995,17 @@ public class HistogramTests extends ElasticsearchIntegrationTest {
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* see issue #9634, negative interval in histogram should raise exception
|
||||
*/
|
||||
public void testExeptionOnNegativerInterval() {
|
||||
try {
|
||||
client().prepareSearch("empty_bucket_idx")
|
||||
.addAggregation(histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(-1).minDocCount(0)).execute().actionGet();
|
||||
fail();
|
||||
} catch (SearchPhaseExecutionException e) {
|
||||
assertThat(e.getMessage(), containsString("Missing required field [interval]"));
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -565,7 +565,7 @@ public class ReverseNestedTests extends ElasticsearchIntegrationTest {
|
|||
reverseNested("to_root").subAggregation(
|
||||
nested("nested_1").path("sku").subAggregation(
|
||||
filter("filter_by_sku").filter(termFilter("sku.sku_type", "bar1")).subAggregation(
|
||||
count("sku_count").field("sku_type")
|
||||
count("sku_count").field("sku.sku_type")
|
||||
)
|
||||
)
|
||||
)
|
||||
|
@ -603,7 +603,7 @@ public class ReverseNestedTests extends ElasticsearchIntegrationTest {
|
|||
nested("nested_2").path("sku.colors").subAggregation(
|
||||
filter("filter_sku_color").filter(termFilter("sku.colors.name", "red")).subAggregation(
|
||||
reverseNested("reverse_to_sku").path("sku").subAggregation(
|
||||
count("sku_count").field("sku_type")
|
||||
count("sku_count").field("sku.sku_type")
|
||||
)
|
||||
)
|
||||
)
|
||||
|
|
|
@ -46,6 +46,7 @@ import org.junit.Test;
|
|||
import java.util.ArrayList;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
|
||||
import static org.elasticsearch.common.xcontent.XContentFactory.smileBuilder;
|
||||
|
@ -778,13 +779,13 @@ public class TopHitsTests extends ElasticsearchIntegrationTest {
|
|||
@Test
|
||||
public void testNestedFetchFeatures() {
|
||||
String hlType = randomFrom("plain", "fvh", "postings");
|
||||
HighlightBuilder.Field hlField = new HighlightBuilder.Field("message")
|
||||
HighlightBuilder.Field hlField = new HighlightBuilder.Field("comments.message")
|
||||
.highlightQuery(matchQuery("comments.message", "comment"))
|
||||
.forceSource(randomBoolean()) // randomly from stored field or _source
|
||||
.highlighterType(hlType);
|
||||
|
||||
SearchResponse searchResponse = client().prepareSearch("articles")
|
||||
.setQuery(nestedQuery("comments", matchQuery("message", "comment").queryName("test")))
|
||||
.setQuery(nestedQuery("comments", matchQuery("comments.message", "comment").queryName("test")))
|
||||
.addAggregation(
|
||||
nested("to-comments")
|
||||
.path("comments")
|
||||
|
@ -811,7 +812,7 @@ public class TopHitsTests extends ElasticsearchIntegrationTest {
|
|||
assertThat(searchHit.getNestedIdentity().getField().string(), equalTo("comments"));
|
||||
assertThat(searchHit.getNestedIdentity().getOffset(), equalTo(0));
|
||||
|
||||
HighlightField highlightField = searchHit.getHighlightFields().get("message");
|
||||
HighlightField highlightField = searchHit.getHighlightFields().get("comments.message");
|
||||
assertThat(highlightField.getFragments().length, equalTo(1));
|
||||
assertThat(highlightField.getFragments()[0].string(), equalTo("some <em>comment</em>"));
|
||||
|
||||
|
@ -849,7 +850,7 @@ public class TopHitsTests extends ElasticsearchIntegrationTest {
|
|||
nested("to-comments")
|
||||
.path("comments")
|
||||
.subAggregation(topHits("comments")
|
||||
.addHighlightedField(new HighlightBuilder.Field("message").highlightQuery(matchQuery("comments.message", "text")))
|
||||
.addHighlightedField(new HighlightBuilder.Field("comments.message").highlightQuery(matchQuery("comments.message", "text")))
|
||||
.addSort("comments.id", SortOrder.ASC))
|
||||
)
|
||||
)
|
||||
|
@ -872,7 +873,7 @@ public class TopHitsTests extends ElasticsearchIntegrationTest {
|
|||
assertThat(searchHits.getAt(j).getNestedIdentity().getOffset(), equalTo(0));
|
||||
assertThat((Integer) searchHits.getAt(j).sourceAsMap().get("id"), equalTo(0));
|
||||
|
||||
HighlightField highlightField = searchHits.getAt(j).getHighlightFields().get("message");
|
||||
HighlightField highlightField = searchHits.getAt(j).getHighlightFields().get("comments.message");
|
||||
assertThat(highlightField.getFragments().length, equalTo(1));
|
||||
assertThat(highlightField.getFragments()[0].string(), equalTo("some <em>text</em>"));
|
||||
}
|
||||
|
|
|
@ -98,13 +98,12 @@ public class ExistsMissingTests extends ElasticsearchIntegrationTest {
|
|||
|
||||
final Map<String, Integer> expected = new LinkedHashMap<String, Integer>();
|
||||
expected.put("foo", 1);
|
||||
expected.put("f*", 2); // foo and bar.foo, that's how the expansion works
|
||||
expected.put("f*", 1);
|
||||
expected.put("bar", 2);
|
||||
expected.put("bar.*", 2);
|
||||
expected.put("bar.foo", 1);
|
||||
expected.put("bar.bar", 1);
|
||||
expected.put("bar.bar.bar", 1);
|
||||
expected.put("baz", 1);
|
||||
expected.put("foobar", 0);
|
||||
|
||||
ensureYellow("idx");
|
||||
|
|
|
@ -510,7 +510,7 @@ public class CompletionSuggestSearchTests extends ElasticsearchIntegrationTest {
|
|||
assertThat(putMappingResponse.isAcknowledged(), is(true));
|
||||
|
||||
SuggestResponse suggestResponse = client().prepareSuggest(INDEX).addSuggestion(
|
||||
new CompletionSuggestionBuilder("suggs").field("suggest").text("f").size(10)
|
||||
new CompletionSuggestionBuilder("suggs").field(FIELD + ".suggest").text("f").size(10)
|
||||
).execute().actionGet();
|
||||
assertSuggestions(suggestResponse, "suggs");
|
||||
|
||||
|
@ -553,7 +553,7 @@ public class CompletionSuggestSearchTests extends ElasticsearchIntegrationTest {
|
|||
assertThat(putMappingResponse.isAcknowledged(), is(true));
|
||||
|
||||
SuggestResponse suggestResponse = client().prepareSuggest(INDEX).addSuggestion(
|
||||
SuggestBuilders.completionSuggestion("suggs").field("suggest").text("f").size(10)
|
||||
SuggestBuilders.completionSuggestion("suggs").field(FIELD + ".suggest").text("f").size(10)
|
||||
).execute().actionGet();
|
||||
assertSuggestions(suggestResponse, "suggs");
|
||||
|
||||
|
|
|
@ -16,10 +16,9 @@
|
|||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.transport.netty;
|
||||
package org.elasticsearch.test.junit.rule;
|
||||
|
||||
import org.elasticsearch.common.logging.ESLogger;
|
||||
import org.elasticsearch.transport.BindTransportException;
|
||||
import org.junit.rules.TestRule;
|
||||
import org.junit.runner.Description;
|
||||
import org.junit.runners.model.Statement;
|
||||
|
@ -27,20 +26,28 @@ import org.junit.runners.model.Statement;
|
|||
/**
|
||||
* A helper rule to catch all BindTransportExceptions
|
||||
* and rerun the test for a configured number of times
|
||||
*
|
||||
* Note: Be aware, that when a test is repeated, the @After and @Before
|
||||
* annotated methods are not run a second time
|
||||
*
|
||||
*/
|
||||
public class RepeatOnBindExceptionRule implements TestRule {
|
||||
public class RepeatOnExceptionRule implements TestRule {
|
||||
|
||||
private ESLogger logger;
|
||||
private int retryCount;
|
||||
private Class expectedException;
|
||||
|
||||
/**
|
||||
*
|
||||
* @param logger the es logger from the test class
|
||||
* @param retryCount number of amounts to try a single test before failing
|
||||
* @param expectedException The exception class you want to catch
|
||||
*
|
||||
*/
|
||||
public RepeatOnBindExceptionRule(ESLogger logger, int retryCount) {
|
||||
public RepeatOnExceptionRule(ESLogger logger, int retryCount, Class expectedException) {
|
||||
this.logger = logger;
|
||||
this.retryCount = retryCount;
|
||||
this.expectedException = expectedException;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -55,9 +62,11 @@ public class RepeatOnBindExceptionRule implements TestRule {
|
|||
try {
|
||||
base.evaluate();
|
||||
return;
|
||||
} catch (BindTransportException t) {
|
||||
} catch (Throwable t) {
|
||||
if (t.getClass().equals(expectedException)) {
|
||||
caughtThrowable = t;
|
||||
logger.info("Bind exception occurred, rerunning the test after [{}] failures", t, i+1);
|
||||
logger.info("Exception [{}] occurred, rerunning the test after [{}] failures", t, t.getClass().getSimpleName(), i+1);
|
||||
}
|
||||
}
|
||||
}
|
||||
logger.error("Giving up after [{}] failures... marking test as failed", retryCount);
|
|
@ -25,10 +25,8 @@ import com.carrotsearch.randomizedtesting.annotations.ParametersFactory;
|
|||
import com.carrotsearch.randomizedtesting.annotations.TestGroup;
|
||||
import com.carrotsearch.randomizedtesting.annotations.TimeoutSuite;
|
||||
import com.google.common.collect.Lists;
|
||||
|
||||
import org.apache.lucene.util.AbstractRandomizedTest;
|
||||
import org.apache.lucene.util.TimeUnits;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.settings.ImmutableSettings;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
|
@ -38,6 +36,7 @@ import org.elasticsearch.test.rest.client.RestException;
|
|||
import org.elasticsearch.test.rest.parser.RestTestParseException;
|
||||
import org.elasticsearch.test.rest.parser.RestTestSuiteParser;
|
||||
import org.elasticsearch.test.rest.section.*;
|
||||
import org.elasticsearch.test.rest.spec.RestApi;
|
||||
import org.elasticsearch.test.rest.spec.RestSpec;
|
||||
import org.elasticsearch.test.rest.support.FileUtils;
|
||||
import org.junit.AfterClass;
|
||||
|
@ -45,7 +44,6 @@ import org.junit.Before;
|
|||
import org.junit.BeforeClass;
|
||||
import org.junit.Test;
|
||||
|
||||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
import java.nio.file.FileSystems;
|
||||
import java.nio.file.Path;
|
||||
|
@ -74,6 +72,10 @@ public class ElasticsearchRestTests extends ElasticsearchIntegrationTest {
|
|||
* e.g. -Dtests.rest.blacklist=get/10_basic/*
|
||||
*/
|
||||
public static final String REST_TESTS_BLACKLIST = "tests.rest.blacklist";
|
||||
/**
|
||||
* Property that allows to control whether spec validation is enabled or not (default true).
|
||||
*/
|
||||
public static final String REST_TESTS_VALIDATE_SPEC = "tests.rest.validate_spec";
|
||||
/**
|
||||
* Property that allows to control where the REST spec files need to be loaded from
|
||||
*/
|
||||
|
@ -185,9 +187,30 @@ public class ElasticsearchRestTests extends ElasticsearchIntegrationTest {
|
|||
public static void initExecutionContext() throws IOException, RestException {
|
||||
String[] specPaths = resolvePathsProperty(REST_TESTS_SPEC, DEFAULT_SPEC_PATH);
|
||||
RestSpec restSpec = RestSpec.parseFrom(DEFAULT_SPEC_PATH, specPaths);
|
||||
validateSpec(restSpec);
|
||||
restTestExecutionContext = new RestTestExecutionContext(restSpec);
|
||||
}
|
||||
|
||||
private static void validateSpec(RestSpec restSpec) {
|
||||
boolean validateSpec = RandomizedTest.systemPropertyAsBoolean(REST_TESTS_VALIDATE_SPEC, true);
|
||||
if (validateSpec) {
|
||||
StringBuilder errorMessage = new StringBuilder();
|
||||
for (RestApi restApi : restSpec.getApis()) {
|
||||
if (restApi.getMethods().contains("GET") && restApi.isBodySupported()) {
|
||||
if (!restApi.getMethods().contains("POST")) {
|
||||
errorMessage.append("\n- ").append(restApi.getName()).append(" supports GET with a body but doesn't support POST");
|
||||
}
|
||||
if (!restApi.getParams().contains("source")) {
|
||||
errorMessage.append("\n- ").append(restApi.getName()).append(" supports GET with a body but doesn't support the source query string parameter");
|
||||
}
|
||||
}
|
||||
}
|
||||
if (errorMessage.length() > 0) {
|
||||
throw new IllegalArgumentException(errorMessage.toString());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@AfterClass
|
||||
public static void close() {
|
||||
if (restTestExecutionContext != null) {
|
||||
|
|
|
@ -27,6 +27,7 @@ import java.io.IOException;
|
|||
import java.io.InputStream;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Path;
|
||||
import java.util.Collection;
|
||||
import java.util.Map;
|
||||
|
||||
/**
|
||||
|
@ -46,6 +47,10 @@ public class RestSpec {
|
|||
return restApiMap.get(api);
|
||||
}
|
||||
|
||||
public Collection<RestApi> getApis() {
|
||||
return restApiMap.values();
|
||||
}
|
||||
|
||||
/**
|
||||
* Parses the complete set of REST spec available under the provided directories
|
||||
*/
|
||||
|
@ -56,14 +61,6 @@ public class RestSpec {
|
|||
try (InputStream stream = Files.newInputStream(jsonFile)) {
|
||||
XContentParser parser = JsonXContent.jsonXContent.createParser(stream);
|
||||
RestApi restApi = new RestApiParser().parse(parser);
|
||||
if (restApi.getMethods().contains("GET") && restApi.isBodySupported()) {
|
||||
if (!restApi.getMethods().contains("POST")) {
|
||||
throw new IllegalArgumentException(restApi.getName() + " supports GET with a body but doesn't support POST");
|
||||
}
|
||||
if (!restApi.getParams().contains("source")) {
|
||||
throw new IllegalArgumentException(restApi.getName() + " supports GET with a body but doesn't support the source query string parameter");
|
||||
}
|
||||
}
|
||||
restSpec.addApi(restApi);
|
||||
} catch (Throwable ex) {
|
||||
throw new IOException("Can't parse rest spec file: [" + jsonFile + "]", ex);
|
||||
|
|
|
@ -34,7 +34,7 @@ import java.util.List;
|
|||
*/
|
||||
public final class Features {
|
||||
|
||||
private static final List<String> SUPPORTED = Lists.newArrayList("gtelte", "stash_in_path");
|
||||
private static final List<String> SUPPORTED = Lists.newArrayList("gtelte", "stash_in_path", "groovy_scripting");
|
||||
|
||||
private Features() {
|
||||
|
||||
|
|
|
@ -30,9 +30,10 @@ import org.elasticsearch.common.transport.InetSocketTransportAddress;
|
|||
import org.elasticsearch.common.util.BigArrays;
|
||||
import org.elasticsearch.indices.breaker.NoneCircuitBreakerService;
|
||||
import org.elasticsearch.test.ElasticsearchTestCase;
|
||||
import org.elasticsearch.test.junit.rule.RepeatOnExceptionRule;
|
||||
import org.elasticsearch.test.cache.recycler.MockBigArrays;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.junit.After;
|
||||
import org.elasticsearch.transport.BindTransportException;
|
||||
import org.junit.Rule;
|
||||
import org.junit.Test;
|
||||
|
||||
|
@ -47,23 +48,10 @@ import static org.hamcrest.Matchers.is;
|
|||
|
||||
public class NettyTransportMultiPortTests extends ElasticsearchTestCase {
|
||||
|
||||
public static int MAX_RETRIES = 10;
|
||||
private static final int MAX_RETRIES = 10;
|
||||
|
||||
@Rule
|
||||
public RepeatOnBindExceptionRule repeatOnBindExceptionRule = new RepeatOnBindExceptionRule(logger, MAX_RETRIES);
|
||||
|
||||
private NettyTransport nettyTransport;
|
||||
private ThreadPool threadPool;
|
||||
|
||||
@After
|
||||
public void shutdownNettyTransport() {
|
||||
if (nettyTransport != null) {
|
||||
nettyTransport.stop();
|
||||
}
|
||||
if (threadPool != null) {
|
||||
threadPool.shutdownNow();
|
||||
}
|
||||
}
|
||||
public RepeatOnExceptionRule repeatOnBindExceptionRule = new RepeatOnExceptionRule(logger, MAX_RETRIES, BindTransportException.class);
|
||||
|
||||
@Test
|
||||
public void testThatNettyCanBindToMultiplePorts() throws Exception {
|
||||
|
@ -76,11 +64,14 @@ public class NettyTransportMultiPortTests extends ElasticsearchTestCase {
|
|||
.put("transport.profiles.client1.port", ports[2])
|
||||
.build();
|
||||
|
||||
startNettyTransport(settings);
|
||||
|
||||
ThreadPool threadPool = new ThreadPool("tst");
|
||||
try (NettyTransport ignored = startNettyTransport(settings, threadPool)) {
|
||||
assertConnectionRefused(ports[0]);
|
||||
assertPortIsBound(ports[1]);
|
||||
assertPortIsBound(ports[2]);
|
||||
} finally {
|
||||
threadPool.shutdownNow();
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
|
@ -93,10 +84,13 @@ public class NettyTransportMultiPortTests extends ElasticsearchTestCase {
|
|||
.put("transport.profiles.client1.port", ports[1])
|
||||
.build();
|
||||
|
||||
startNettyTransport(settings);
|
||||
|
||||
ThreadPool threadPool = new ThreadPool("tst");
|
||||
try (NettyTransport ignored = startNettyTransport(settings, threadPool)) {
|
||||
assertPortIsBound(ports[0]);
|
||||
assertPortIsBound(ports[1]);
|
||||
} finally {
|
||||
threadPool.shutdownNow();
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
|
@ -109,9 +103,12 @@ public class NettyTransportMultiPortTests extends ElasticsearchTestCase {
|
|||
.put("transport.profiles.client1.whatever", "foo")
|
||||
.build();
|
||||
|
||||
startNettyTransport(settings);
|
||||
|
||||
ThreadPool threadPool = new ThreadPool("tst");
|
||||
try (NettyTransport ignored = startNettyTransport(settings, threadPool)) {
|
||||
assertPortIsBound(ports[0]);
|
||||
} finally {
|
||||
threadPool.shutdownNow();
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
|
@ -125,11 +122,14 @@ public class NettyTransportMultiPortTests extends ElasticsearchTestCase {
|
|||
.put("transport.profiles.default.port", ports[2])
|
||||
.build();
|
||||
|
||||
startNettyTransport(settings);
|
||||
|
||||
ThreadPool threadPool = new ThreadPool("tst");
|
||||
try (NettyTransport ignored = startNettyTransport(settings, threadPool)) {
|
||||
assertConnectionRefused(ports[0]);
|
||||
assertConnectionRefused(ports[1]);
|
||||
assertPortIsBound(ports[2]);
|
||||
} finally {
|
||||
threadPool.shutdownNow();
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
|
@ -145,11 +145,14 @@ public class NettyTransportMultiPortTests extends ElasticsearchTestCase {
|
|||
.put("transport.profiles.client1.port", ports[1])
|
||||
.build();
|
||||
|
||||
startNettyTransport(settings);
|
||||
|
||||
ThreadPool threadPool = new ThreadPool("tst");
|
||||
try (NettyTransport ignored = startNettyTransport(settings, threadPool)) {
|
||||
assertPortIsBound("127.0.0.1", ports[0]);
|
||||
assertPortIsBound(firstNonLoopbackAddress.getHostAddress(), ports[1]);
|
||||
assertConnectionRefused(ports[1]);
|
||||
} finally {
|
||||
threadPool.shutdownNow();
|
||||
}
|
||||
}
|
||||
|
||||
private int[] getRandomPorts(int numberOfPorts) {
|
||||
|
@ -166,14 +169,14 @@ public class NettyTransportMultiPortTests extends ElasticsearchTestCase {
|
|||
return ports.toArray();
|
||||
}
|
||||
|
||||
private void startNettyTransport(Settings settings) {
|
||||
threadPool = new ThreadPool("tst");
|
||||
private NettyTransport startNettyTransport(Settings settings, ThreadPool threadPool) {
|
||||
BigArrays bigArrays = new MockBigArrays(settings, new PageCacheRecycler(settings, threadPool), new NoneCircuitBreakerService());
|
||||
|
||||
nettyTransport = new NettyTransport(settings, threadPool, new NetworkService(settings), bigArrays, Version.CURRENT);
|
||||
NettyTransport nettyTransport = new NettyTransport(settings, threadPool, new NetworkService(settings), bigArrays, Version.CURRENT);
|
||||
nettyTransport.start();
|
||||
|
||||
assertThat(nettyTransport.lifecycleState(), is(Lifecycle.State.STARTED));
|
||||
return nettyTransport;
|
||||
}
|
||||
|
||||
private void assertConnectionRefused(int port) throws Exception {
|
||||
|
|
Loading…
Reference in New Issue