Merge branch 'master' into feature/query-refactoring
This commit is contained in:
commit
f66987acf2
59
bin/plugin
59
bin/plugin
|
@ -21,6 +21,43 @@ ES_HOME=`dirname "$SCRIPT"`/..
|
|||
# make ELASTICSEARCH_HOME absolute
|
||||
ES_HOME=`cd "$ES_HOME"; pwd`
|
||||
|
||||
# Sets the default values for elasticsearch variables used in this script
|
||||
if [ -z "$CONF_DIR" ]; then
|
||||
CONF_DIR="${packaging.plugin.default.config.dir}"
|
||||
|
||||
if [ -z "$CONF_FILE" ]; then
|
||||
CONF_FILE="$CONF_DIR/elasticsearch.yml"
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ -z "$CONF_FILE" ]; then
|
||||
CONF_FILE="${packaging.plugin.default.config.file}"
|
||||
fi
|
||||
|
||||
# The default env file is defined at building/packaging time.
|
||||
# For a ${packaging.type} package, the value is "${packaging.env.file}".
|
||||
ES_ENV_FILE="${packaging.env.file}"
|
||||
|
||||
# If an include is specified with the ES_INCLUDE environment variable, use it
|
||||
if [ -n "$ES_INCLUDE" ]; then
|
||||
ES_ENV_FILE="$ES_INCLUDE"
|
||||
fi
|
||||
|
||||
# Source the environment file
|
||||
if [ -n "$ES_ENV_FILE" ]; then
|
||||
|
||||
# If the ES_ENV_FILE is not found, try to resolve the path
|
||||
# against the ES_HOME directory
|
||||
if [ ! -f "$ES_ENV_FILE" ]; then
|
||||
ES_ENV_FILE="$ELASTIC_HOME/$ES_ENV_FILE"
|
||||
fi
|
||||
|
||||
. "$ES_ENV_FILE"
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Unable to source environment file: $ES_ENV_FILE" >&2
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ -x "$JAVA_HOME/bin/java" ]; then
|
||||
JAVA=$JAVA_HOME/bin/java
|
||||
|
@ -45,5 +82,25 @@ while [ $# -gt 0 ]; do
|
|||
shift
|
||||
done
|
||||
|
||||
exec "$JAVA" $JAVA_OPTS $ES_JAVA_OPTS -Xmx64m -Xms16m -Delasticsearch -Des.path.home="$ES_HOME" $properties -cp "$ES_HOME/lib/*" org.elasticsearch.plugins.PluginManager $args
|
||||
# check if properties already has a config file or config dir
|
||||
if [ -e "$CONF_DIR" ]; then
|
||||
case "$properties" in
|
||||
*-Des.default.path.conf=*|*-Des.path.conf=*)
|
||||
;;
|
||||
*)
|
||||
properties="$properties -Des.default.path.conf=$CONF_DIR"
|
||||
;;
|
||||
esac
|
||||
fi
|
||||
|
||||
if [ -e "$CONF_FILE" ]; then
|
||||
case "$properties" in
|
||||
*-Des.default.config=*|*-Des.config=*)
|
||||
;;
|
||||
*)
|
||||
properties="$properties -Des.default.config=$CONF_FILE"
|
||||
;;
|
||||
esac
|
||||
fi
|
||||
|
||||
exec "$JAVA" $JAVA_OPTS $ES_JAVA_OPTS -Xmx64m -Xms16m -Delasticsearch -Des.path.home="$ES_HOME" $properties -cp "$ES_HOME/lib/*" org.elasticsearch.plugins.PluginManager $args
|
||||
|
|
|
@ -1,16 +0,0 @@
|
|||
Licensed to Elasticsearch under one or more contributor
|
||||
license agreements. See the NOTICE file distributed with
|
||||
this work for additional information regarding copyright
|
||||
ownership. Elasticsearch licenses this file to you under
|
||||
the Apache License, Version 2.0 (the "License"); you may
|
||||
not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing,
|
||||
software distributed under the License is distributed on an
|
||||
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations
|
||||
under the License.
|
|
@ -1,47 +0,0 @@
|
|||
# Licensed to Elasticsearch under one or more contributor
|
||||
# license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright
|
||||
# ownership. Elasticsearch licenses this file to you under
|
||||
# the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing,
|
||||
# software distributed under the License is distributed on
|
||||
# an 'AS IS' BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
|
||||
# either express or implied. See the License for the specific
|
||||
# language governing permissions and limitations under the License.
|
||||
|
||||
@defaultMessage Convert to URI
|
||||
java.net.URL#getPath()
|
||||
java.net.URL#getFile()
|
||||
|
||||
@defaultMessage Use java.nio.file instead of java.io.File API
|
||||
java.util.jar.JarFile
|
||||
java.util.zip.ZipFile
|
||||
java.io.File
|
||||
java.io.FileInputStream
|
||||
java.io.FileOutputStream
|
||||
java.io.PrintStream#<init>(java.lang.String,java.lang.String)
|
||||
java.io.PrintWriter#<init>(java.lang.String,java.lang.String)
|
||||
java.util.Formatter#<init>(java.lang.String,java.lang.String,java.util.Locale)
|
||||
java.io.RandomAccessFile
|
||||
java.nio.file.Path#toFile()
|
||||
|
||||
@defaultMessage Don't use deprecated lucene apis
|
||||
org.apache.lucene.index.DocsEnum
|
||||
org.apache.lucene.index.DocsAndPositionsEnum
|
||||
org.apache.lucene.queries.TermFilter
|
||||
org.apache.lucene.queries.TermsFilter
|
||||
org.apache.lucene.search.TermRangeFilter
|
||||
org.apache.lucene.search.NumericRangeFilter
|
||||
org.apache.lucene.search.PrefixFilter
|
||||
|
||||
java.nio.file.Paths @ Use PathUtils.get instead.
|
||||
java.nio.file.FileSystems#getDefault() @ use PathUtils.getDefault instead.
|
||||
|
||||
@defaultMessage Specify a location for the temp file/directory instead.
|
||||
java.nio.file.Files#createTempDirectory(java.lang.String,java.nio.file.attribute.FileAttribute[])
|
||||
java.nio.file.Files#createTempFile(java.lang.String,java.lang.String,java.nio.file.attribute.FileAttribute[])
|
|
@ -1,137 +0,0 @@
|
|||
# Licensed to Elasticsearch under one or more contributor
|
||||
# license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright
|
||||
# ownership. Elasticsearch licenses this file to you under
|
||||
# the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing,
|
||||
# software distributed under the License is distributed on
|
||||
# an 'AS IS' BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
|
||||
# either express or implied. See the License for the specific
|
||||
# language governing permissions and limitations under the License.
|
||||
|
||||
@defaultMessage spawns threads with vague names; use a custom thread factory and name threads so that you can tell (by its name) which executor it is associated with
|
||||
|
||||
java.util.concurrent.Executors#newFixedThreadPool(int)
|
||||
java.util.concurrent.Executors#newSingleThreadExecutor()
|
||||
java.util.concurrent.Executors#newCachedThreadPool()
|
||||
java.util.concurrent.Executors#newSingleThreadScheduledExecutor()
|
||||
java.util.concurrent.Executors#newScheduledThreadPool(int)
|
||||
java.util.concurrent.Executors#defaultThreadFactory()
|
||||
java.util.concurrent.Executors#privilegedThreadFactory()
|
||||
|
||||
java.lang.Character#codePointBefore(char[],int) @ Implicit start offset is error-prone when the char[] is a buffer and the first chars are random chars
|
||||
java.lang.Character#codePointAt(char[],int) @ Implicit end offset is error-prone when the char[] is a buffer and the last chars are random chars
|
||||
|
||||
@defaultMessage Collections.sort dumps data into an array, sorts the array and reinserts data into the list, one should rather use Lucene's CollectionUtil sort methods which sort in place
|
||||
|
||||
java.util.Collections#sort(java.util.List)
|
||||
java.util.Collections#sort(java.util.List,java.util.Comparator)
|
||||
|
||||
java.io.StringReader#<init>(java.lang.String) @ Use FastStringReader instead
|
||||
|
||||
@defaultMessage Reference management is tricky, leave it to SearcherManager
|
||||
org.apache.lucene.index.IndexReader#decRef()
|
||||
org.apache.lucene.index.IndexReader#incRef()
|
||||
org.apache.lucene.index.IndexReader#tryIncRef()
|
||||
|
||||
@defaultMessage Pass the precision step from the mappings explicitly instead
|
||||
org.apache.lucene.search.NumericRangeQuery#newDoubleRange(java.lang.String,java.lang.Double,java.lang.Double,boolean,boolean)
|
||||
org.apache.lucene.search.NumericRangeQuery#newFloatRange(java.lang.String,java.lang.Float,java.lang.Float,boolean,boolean)
|
||||
org.apache.lucene.search.NumericRangeQuery#newIntRange(java.lang.String,java.lang.Integer,java.lang.Integer,boolean,boolean)
|
||||
org.apache.lucene.search.NumericRangeQuery#newLongRange(java.lang.String,java.lang.Long,java.lang.Long,boolean,boolean)
|
||||
org.apache.lucene.search.NumericRangeFilter#newDoubleRange(java.lang.String,java.lang.Double,java.lang.Double,boolean,boolean)
|
||||
org.apache.lucene.search.NumericRangeFilter#newFloatRange(java.lang.String,java.lang.Float,java.lang.Float,boolean,boolean)
|
||||
org.apache.lucene.search.NumericRangeFilter#newIntRange(java.lang.String,java.lang.Integer,java.lang.Integer,boolean,boolean)
|
||||
org.apache.lucene.search.NumericRangeFilter#newLongRange(java.lang.String,java.lang.Long,java.lang.Long,boolean,boolean)
|
||||
|
||||
@defaultMessage Only use wait / notify when really needed try to use concurrency primitives, latches or callbacks instead.
|
||||
java.lang.Object#wait()
|
||||
java.lang.Object#wait(long)
|
||||
java.lang.Object#wait(long,int)
|
||||
java.lang.Object#notify()
|
||||
java.lang.Object#notifyAll()
|
||||
|
||||
@defaultMessage Beware of the behavior of this method on MIN_VALUE
|
||||
java.lang.Math#abs(int)
|
||||
java.lang.Math#abs(long)
|
||||
|
||||
@defaultMessage Please do not try to stop the world
|
||||
java.lang.System#gc()
|
||||
|
||||
@defaultMessage Use Long.compare instead we are on Java7
|
||||
com.google.common.primitives.Longs#compare(long,long)
|
||||
|
||||
@defaultMessage Use Channels.* methods to write to channels. Do not write directly.
|
||||
java.nio.channels.WritableByteChannel#write(java.nio.ByteBuffer)
|
||||
java.nio.channels.FileChannel#write(java.nio.ByteBuffer, long)
|
||||
java.nio.channels.GatheringByteChannel#write(java.nio.ByteBuffer[], int, int)
|
||||
java.nio.channels.GatheringByteChannel#write(java.nio.ByteBuffer[])
|
||||
java.nio.channels.ReadableByteChannel#read(java.nio.ByteBuffer)
|
||||
java.nio.channels.ScatteringByteChannel#read(java.nio.ByteBuffer[])
|
||||
java.nio.channels.ScatteringByteChannel#read(java.nio.ByteBuffer[], int, int)
|
||||
java.nio.channels.FileChannel#read(java.nio.ByteBuffer, long)
|
||||
|
||||
@defaultMessage Use Lucene.parseLenient instead it strips off minor version
|
||||
org.apache.lucene.util.Version#parseLeniently(java.lang.String)
|
||||
|
||||
@defaultMessage unsafe encoders/decoders have problems in the lzf compress library. Use variants of encode/decode functions which take Encoder/Decoder.
|
||||
com.ning.compress.lzf.impl.UnsafeChunkEncoders#createEncoder(int)
|
||||
com.ning.compress.lzf.impl.UnsafeChunkEncoders#createNonAllocatingEncoder(int)
|
||||
com.ning.compress.lzf.impl.UnsafeChunkEncoders#createEncoder(int, com.ning.compress.BufferRecycler)
|
||||
com.ning.compress.lzf.impl.UnsafeChunkEncoders#createNonAllocatingEncoder(int, com.ning.compress.BufferRecycler)
|
||||
com.ning.compress.lzf.impl.UnsafeChunkDecoder#<init>()
|
||||
com.ning.compress.lzf.parallel.CompressTask
|
||||
com.ning.compress.lzf.util.ChunkEncoderFactory#optimalInstance()
|
||||
com.ning.compress.lzf.util.ChunkEncoderFactory#optimalInstance(int)
|
||||
com.ning.compress.lzf.util.ChunkEncoderFactory#optimalNonAllocatingInstance(int)
|
||||
com.ning.compress.lzf.util.ChunkEncoderFactory#optimalInstance(com.ning.compress.BufferRecycler)
|
||||
com.ning.compress.lzf.util.ChunkEncoderFactory#optimalInstance(int, com.ning.compress.BufferRecycler)
|
||||
com.ning.compress.lzf.util.ChunkEncoderFactory#optimalNonAllocatingInstance(int, com.ning.compress.BufferRecycler)
|
||||
com.ning.compress.lzf.util.ChunkDecoderFactory#optimalInstance()
|
||||
com.ning.compress.lzf.util.LZFFileInputStream#<init>(java.io.File)
|
||||
com.ning.compress.lzf.util.LZFFileInputStream#<init>(java.io.FileDescriptor)
|
||||
com.ning.compress.lzf.util.LZFFileInputStream#<init>(java.lang.String)
|
||||
com.ning.compress.lzf.util.LZFFileOutputStream#<init>(java.io.File)
|
||||
com.ning.compress.lzf.util.LZFFileOutputStream#<init>(java.io.File, boolean)
|
||||
com.ning.compress.lzf.util.LZFFileOutputStream#<init>(java.io.FileDescriptor)
|
||||
com.ning.compress.lzf.util.LZFFileOutputStream#<init>(java.lang.String)
|
||||
com.ning.compress.lzf.util.LZFFileOutputStream#<init>(java.lang.String, boolean)
|
||||
com.ning.compress.lzf.LZFEncoder#encode(byte[])
|
||||
com.ning.compress.lzf.LZFEncoder#encode(byte[], int, int)
|
||||
com.ning.compress.lzf.LZFEncoder#encode(byte[], int, int, com.ning.compress.BufferRecycler)
|
||||
com.ning.compress.lzf.LZFEncoder#appendEncoded(byte[], int, int, byte[], int)
|
||||
com.ning.compress.lzf.LZFEncoder#appendEncoded(byte[], int, int, byte[], int, com.ning.compress.BufferRecycler)
|
||||
com.ning.compress.lzf.LZFCompressingInputStream#<init>(java.io.InputStream)
|
||||
com.ning.compress.lzf.LZFDecoder#fastDecoder()
|
||||
com.ning.compress.lzf.LZFDecoder#decode(byte[])
|
||||
com.ning.compress.lzf.LZFDecoder#decode(byte[], int, int)
|
||||
com.ning.compress.lzf.LZFDecoder#decode(byte[], byte[])
|
||||
com.ning.compress.lzf.LZFDecoder#decode(byte[], int, int, byte[])
|
||||
com.ning.compress.lzf.LZFInputStream#<init>(java.io.InputStream)
|
||||
com.ning.compress.lzf.LZFInputStream#<init>(java.io.InputStream, boolean)
|
||||
com.ning.compress.lzf.LZFInputStream#<init>(java.io.InputStream, com.ning.compress.BufferRecycler)
|
||||
com.ning.compress.lzf.LZFInputStream#<init>(java.io.InputStream, com.ning.compress.BufferRecycler, boolean)
|
||||
com.ning.compress.lzf.LZFOutputStream#<init>(java.io.OutputStream)
|
||||
com.ning.compress.lzf.LZFOutputStream#<init>(java.io.OutputStream, com.ning.compress.BufferRecycler)
|
||||
com.ning.compress.lzf.LZFUncompressor#<init>(com.ning.compress.DataHandler)
|
||||
com.ning.compress.lzf.LZFUncompressor#<init>(com.ning.compress.DataHandler, com.ning.compress.BufferRecycler)
|
||||
|
||||
@defaultMessage Spawns a new thread which is solely under lucenes control use ThreadPool#estimatedTimeInMillisCounter instead
|
||||
org.apache.lucene.search.TimeLimitingCollector#getGlobalTimerThread()
|
||||
org.apache.lucene.search.TimeLimitingCollector#getGlobalCounter()
|
||||
|
||||
@defaultMessage Don't interrupt threads use FutureUtils#cancel(Future<T>) instead
|
||||
java.util.concurrent.Future#cancel(boolean)
|
||||
|
||||
@defaultMessage Don't try reading from paths that are not configured in Environment, resolve from Environment instead
|
||||
org.elasticsearch.common.io.PathUtils#get(java.lang.String, java.lang.String[])
|
||||
org.elasticsearch.common.io.PathUtils#get(java.net.URI)
|
||||
|
||||
@defaultMessage Use queries, not filters
|
||||
org.apache.lucene.search.FilteredQuery#<init>(org.apache.lucene.search.Query, org.apache.lucene.search.Filter)
|
||||
org.apache.lucene.search.FilteredQuery#<init>(org.apache.lucene.search.Query, org.apache.lucene.search.Filter, org.apache.lucene.search.FilteredQuery$FilterStrategy)
|
|
@ -1,20 +0,0 @@
|
|||
# Licensed to Elasticsearch under one or more contributor
|
||||
# license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright
|
||||
# ownership. Elasticsearch licenses this file to you under
|
||||
# the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing,
|
||||
# software distributed under the License is distributed on
|
||||
# an 'AS IS' BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
|
||||
# either express or implied. See the License for the specific
|
||||
# language governing permissions and limitations under the License.
|
||||
|
||||
com.carrotsearch.randomizedtesting.RandomizedTest#globalTempDir() @ Use newTempDirPath() instead
|
||||
com.carrotsearch.randomizedtesting.annotations.Seed @ Don't commit hardcoded seeds
|
||||
|
||||
org.apache.lucene.codecs.Codec#setDefault(org.apache.lucene.codecs.Codec) @ Use the SuppressCodecs("*") annotation instead
|
|
@ -1,13 +0,0 @@
|
|||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<additionalHeaders>
|
||||
<javadoc_style>
|
||||
<firstLine>/*</firstLine>
|
||||
<beforeEachLine> * </beforeEachLine>
|
||||
<endLine> */</endLine>
|
||||
<!--skipLine></skipLine-->
|
||||
<firstLineDetectionPattern>(\s|\t)*/\*.*$</firstLineDetectionPattern>
|
||||
<lastLineDetectionPattern>.*\*/(\s|\t)*$</lastLineDetectionPattern>
|
||||
<allowBlankLines>false</allowBlankLines>
|
||||
<isMultiline>true</isMultiline>
|
||||
</javadoc_style>
|
||||
</additionalHeaders>
|
|
@ -1,20 +0,0 @@
|
|||
<?xml version="1.0"?>
|
||||
<ruleset name="Custom ruleset"
|
||||
xmlns="http://pmd.sourceforge.net/ruleset/2.0.0"
|
||||
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
|
||||
xsi:schemaLocation="http://pmd.sourceforge.net/ruleset/2.0.0 http://pmd.sourceforge.net/ruleset_2_0_0.xsd">
|
||||
<description>
|
||||
Default ruleset for elasticsearch server project
|
||||
</description>
|
||||
<rule ref="rulesets/java/basic.xml"/>
|
||||
<rule ref="rulesets/java/braces.xml"/>
|
||||
<rule ref="rulesets/java/clone.xml"/>
|
||||
<rule ref="rulesets/java/codesize.xml"/>
|
||||
<rule ref="rulesets/java/coupling.xml">
|
||||
<exclude name="LawOfDemeter" />
|
||||
</rule>
|
||||
<rule ref="rulesets/java/design.xml"/>
|
||||
<rule ref="rulesets/java/unnecessary.xml">
|
||||
<exclude name="UselessParentheses" />
|
||||
</rule>
|
||||
</ruleset>
|
|
@ -90,7 +90,7 @@
|
|||
A Java Object Search Engine Mapping (OSEM) for Elasticsearch
|
||||
|
||||
* https://github.com/twitter/storehaus[Twitter Storehaus]:
|
||||
Thin asynchronous scala client for storehaus.
|
||||
Thin asynchronous Scala client for Storehaus.
|
||||
|
||||
* https://doc.tiki.org/Elasticsearch[Tiki Wiki CMS Groupware]
|
||||
Tiki has native support for Elasticsearch. This provides faster & better search (facets, etc), along with some Natural Language Processing features (ex.: More like this)
|
||||
|
|
|
@ -50,7 +50,7 @@ failure:
|
|||
[source,js]
|
||||
--------------------------------------------------
|
||||
indexR.success = {IndexResponse response ->
|
||||
pritnln "Indexed $response.id into $response.index/$response.type"
|
||||
println "Indexed $response.id into $response.index/$response.type"
|
||||
}
|
||||
indexR.failure = {Throwable t ->
|
||||
println "Failed to index: $t.message"
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
[[query-dsl-filters]]
|
||||
== Query DSL - Filters
|
||||
|
||||
elasticsearch provides a full Java query dsl in a similar manner to the
|
||||
elasticsearch provides a full Java query DSL in a similar manner to the
|
||||
REST {ref}/query-dsl.html[Query DSL]. The factory for filter
|
||||
builders is `FilterBuilders`.
|
||||
|
||||
|
@ -182,7 +182,7 @@ FilterBuilder filter = geoDistanceRangeFilter("pin.location") <1>
|
|||
<5> include lower value means that `from` is `gt` when `false` or `gte` when `true`
|
||||
<6> include upper value means that `to` is `lt` when `false` or `lte` when `true`
|
||||
<7> optimize bounding box: `memory`, `indexed` or `none`
|
||||
<8> distance computation mode: `GeoDistance.SLOPPY_ARC` (default), `GeoDistance.ARC` (slighly more precise but
|
||||
<8> distance computation mode: `GeoDistance.SLOPPY_ARC` (default), `GeoDistance.ARC` (slightly more precise but
|
||||
significantly slower) or `GeoDistance.PLANE` (faster, but inaccurate on long distances and close to the poles)
|
||||
|
||||
Note that you can cache the result using
|
||||
|
|
|
@ -58,7 +58,7 @@ document to not be deleted.
|
|||
|
||||
Many times, the routing value is not known when deleting a document. For
|
||||
those cases, when specifying the `_routing` mapping as `required`, and
|
||||
no routing value is specified, the delete will be broadcasted
|
||||
no routing value is specified, the delete will be broadcast
|
||||
automatically to all shards.
|
||||
|
||||
[float]
|
||||
|
|
|
@ -79,14 +79,18 @@ omit :
|
|||
each term in this field)
|
||||
|
||||
[float]
|
||||
==== Distributed frequencies coming[2.0]
|
||||
==== Distributed frequencies
|
||||
|
||||
coming[2.0]
|
||||
|
||||
Setting `dfs` to `true` (default is `false`) will return the term statistics
|
||||
or the field statistics of the entire index, and not just at the shard. Use it
|
||||
with caution as distributed frequencies can have a serious performance impact.
|
||||
|
||||
[float]
|
||||
==== Terms Filtering coming[2.0]
|
||||
==== Terms Filtering
|
||||
|
||||
coming[2.0]
|
||||
|
||||
With the parameter `filter`, the terms returned could also be filtered based
|
||||
on their tf-idf scores. This could be useful in order find out a good
|
||||
|
@ -124,8 +128,8 @@ whereas the absolute numbers have no meaning in this context. By default,
|
|||
when requesting term vectors of artificial documents, a shard to get the statistics
|
||||
from is randomly selected. Use `routing` only to hit a particular shard.
|
||||
|
||||
[float]
|
||||
=== Example 1
|
||||
.Returning stored term vectors
|
||||
==================================================
|
||||
|
||||
First, we create an index that stores term vectors, payloads etc. :
|
||||
|
||||
|
@ -263,9 +267,10 @@ Response:
|
|||
}
|
||||
}
|
||||
--------------------------------------------------
|
||||
==================================================
|
||||
|
||||
[float]
|
||||
=== Example 2
|
||||
.Generating term vectors on the fly
|
||||
==================================================
|
||||
|
||||
Term vectors which are not explicitly stored in the index are automatically
|
||||
computed on the fly. The following request returns all information and statistics for the
|
||||
|
@ -282,10 +287,12 @@ curl -XGET 'http://localhost:9200/twitter/tweet/1/_termvectors?pretty=true' -d '
|
|||
"field_statistics" : true
|
||||
}'
|
||||
--------------------------------------------------
|
||||
==================================================
|
||||
|
||||
[float]
|
||||
[[docs-termvectors-artificial-doc]]
|
||||
=== Example 3
|
||||
[example]
|
||||
.Artificial documents
|
||||
--
|
||||
|
||||
Term vectors can also be generated for artificial documents,
|
||||
that is for documents not present in the index. The syntax is similar to the
|
||||
|
@ -293,11 +300,8 @@ that is for documents not present in the index. The syntax is similar to the
|
|||
return the same results as in example 1. The mapping used is determined by the
|
||||
`index` and `type`.
|
||||
|
||||
[WARNING]
|
||||
======
|
||||
If dynamic mapping is turned on (default), the document fields not in the original
|
||||
mapping will be dynamically created.
|
||||
======
|
||||
*If dynamic mapping is turned on (default), the document fields not in the original
|
||||
mapping will be dynamically created.*
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
|
@ -308,10 +312,11 @@ curl -XGET 'http://localhost:9200/twitter/tweet/_termvectors' -d '{
|
|||
}
|
||||
}'
|
||||
--------------------------------------------------
|
||||
--
|
||||
|
||||
[float]
|
||||
[[docs-termvectors-per-field-analyzer]]
|
||||
=== Example 4
|
||||
.Per-field analyzer
|
||||
==================================================
|
||||
|
||||
Additionally, a different analyzer than the one at the field may be provided
|
||||
by using the `per_field_analyzer` parameter. This is useful in order to
|
||||
|
@ -365,10 +370,11 @@ Response:
|
|||
}
|
||||
}
|
||||
--------------------------------------------------
|
||||
==================================================
|
||||
|
||||
[float]
|
||||
[[docs-termvectors-terms-filtering]]
|
||||
=== Example 5
|
||||
.Terms filtering
|
||||
==================================================
|
||||
|
||||
Finally, the terms returned could be filtered based on their tf-idf scores. In
|
||||
the example below we obtain the three most "interesting" keywords from the
|
||||
|
@ -437,3 +443,4 @@ Response:
|
|||
}
|
||||
}
|
||||
--------------------------------------------------
|
||||
==================================================
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
[[elasticsearch-reference]]
|
||||
= Reference
|
||||
= Elasticsearch Reference
|
||||
|
||||
:version: 1.5.2
|
||||
:branch: 1.5
|
||||
|
@ -40,5 +40,8 @@ include::testing.asciidoc[]
|
|||
|
||||
include::glossary.asciidoc[]
|
||||
|
||||
include::redirects.asciidoc[]
|
||||
|
||||
|
||||
|
||||
|
||||
|
|
|
@ -1,32 +1,32 @@
|
|||
[[indices-seal]]
|
||||
== Seal
|
||||
|
||||
The seal API allows to flush one or more indices and adds a marker to
|
||||
primaries and replicas if there are no pending write operations. The seal
|
||||
marker is used during recovery after a node restarts. If a replica is
|
||||
allocated on a node which already has a shard copy with the same seal as the
|
||||
primary then no files will be copied during recovery. Sealing is a best effort
|
||||
operation. If write operations are ongoing while the sealing is in progress
|
||||
then writing the seal might fail on some copies.
|
||||
The seal API flushes and adds a "seal" marker to the shards of one or more
|
||||
indices. The seal is used during recovery or restarts to skip the first and
|
||||
most costly phase of the process if all copies of the shard have the same seal.
|
||||
No segment files need to be copied and the transaction log replay phase of the
|
||||
recovery can start immediately which makes recovery much faster.
|
||||
|
||||
A seal marks a point in time snapshot (a low level Lucene commit). This mark
|
||||
can be used to decide if the initial, rather resource heavy, recovery phase
|
||||
where segments or event the entire lucene index is copied over the network can
|
||||
be skipped. If the indices on both sides of the recovery have the same seal no
|
||||
segment files need to be copied and transaction log replay can start
|
||||
immediately. The seal breaks as soon as the shard issues a new lucene commit,
|
||||
uncommitted operations in the transaction log do not break the seal until they
|
||||
are committed.
|
||||
There are two important points about seals:
|
||||
1. They are best effort in that if there are any outstanding write operations
|
||||
while the seal operation is being performed then the shards which those writes
|
||||
target won't be sealed but all others will be. See below for more.
|
||||
2. The seal breaks as soon as the shard issues a new lucene commit. Uncommitted
|
||||
operations in the transaction log do not break the seal. That is because a seal
|
||||
marks a point in time snapshot of the segments, a low level lucene commit.
|
||||
Practically that means that every write operation on the index will remove the
|
||||
seal.
|
||||
|
||||
[source,js]
|
||||
[source,bash]
|
||||
--------------------------------------------------
|
||||
curl -XPOST 'http://localhost:9200/twitter/_seal'
|
||||
$ curl -XPOST 'http://localhost:9200/twitter/_seal'
|
||||
--------------------------------------------------
|
||||
|
||||
The response contains details about for which shards a seal was written and
|
||||
the reason in case of failure.
|
||||
The response contains details about which shards wrote the seal and the reason
|
||||
in case they failed to write the seal.
|
||||
|
||||
Response in case all copies of a shard successfully wrote the seal:
|
||||
Here is what it looks like when all copies single shard index successfully
|
||||
wrote the seal:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
|
@ -44,7 +44,7 @@ Response in case all copies of a shard successfully wrote the seal:
|
|||
--------------------------------------------------
|
||||
|
||||
|
||||
Response in case some copies of a shard failed:
|
||||
Here is what it looks like when one copy fails:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
|
@ -62,7 +62,7 @@ Response in case some copies of a shard failed:
|
|||
--------------------------------------------------
|
||||
|
||||
|
||||
Response in case all copies of a shard failed:
|
||||
Sometimes the failures can be shard wide and they'll look like this:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
|
|
|
@ -477,22 +477,6 @@ binary type:
|
|||
|
||||
Set to `true` to store field values in a column-stride fashion.
|
||||
|
||||
`compress`::
|
||||
|
||||
Set to `true` to compress the stored binary value.
|
||||
|
||||
`compress_threshold`::
|
||||
|
||||
Compression will only be applied to stored binary fields that are greater
|
||||
than this size. Defaults to `-1`
|
||||
|
||||
NOTE: Enabling compression on stored binary fields only makes sense on large
|
||||
and highly-compressible values. Otherwise per-field compression is usually not
|
||||
worth doing as the space savings do not compensate for the overhead of the
|
||||
compression format. Normally, you should not configure any compression and
|
||||
just rely on the block compression of stored fields (which is enabled by
|
||||
default and can't be disabled).
|
||||
|
||||
[float]
|
||||
[[fielddata-filters]]
|
||||
==== Fielddata filters
|
||||
|
|
|
@ -178,6 +178,7 @@ A `RoutingMissingException` is now thrown instead.
|
|||
* The setting `index.mapping.allow_type_wrapper` has been removed. Documents should always be sent without the type as the root element.
|
||||
* The delete mappings API has been removed. Mapping types can no longer be deleted.
|
||||
* The `ignore_conflicts` option of the put mappings API has been removed. Conflicts can't be ignored anymore.
|
||||
* The `binary` field does not support the `compress` and `compress_threshold` options anymore.
|
||||
|
||||
==== Removed type prefix on field names in queries
|
||||
Types can no longer be specified on fields within queries. Instead, specify type restrictions in the search request.
|
||||
|
|
|
@ -14,6 +14,8 @@ include::constant-score-query.asciidoc[]
|
|||
|
||||
include::dis-max-query.asciidoc[]
|
||||
|
||||
include::exists-query.asciidoc[]
|
||||
|
||||
include::filtered-query.asciidoc[]
|
||||
|
||||
include::function-score-query.asciidoc[]
|
||||
|
@ -44,6 +46,8 @@ include::limit-query.asciidoc[]
|
|||
|
||||
include::match-all-query.asciidoc[]
|
||||
|
||||
include::missing-query.asciidoc[]
|
||||
|
||||
include::mlt-query.asciidoc[]
|
||||
|
||||
include::nested-query.asciidoc[]
|
||||
|
|
|
@ -17,10 +17,11 @@ Note, `message` is the name of a field, you can substitute the name of
|
|||
any field (including `_all`) instead.
|
||||
|
||||
[float]
|
||||
==== Types of Match Queries
|
||||
=== Types of Match Queries
|
||||
|
||||
[float]
|
||||
===== boolean
|
||||
[[query-dsl-match-query-boolean]]
|
||||
==== boolean
|
||||
|
||||
The default `match` query is of type `boolean`. It means that the text
|
||||
provided is analyzed and the analysis process constructs a boolean query
|
||||
|
@ -34,6 +35,14 @@ The `analyzer` can be set to control which analyzer will perform the
|
|||
analysis process on the text. It defaults to the field explicit mapping
|
||||
definition, or the default search analyzer.
|
||||
|
||||
The `lenient` parameter can be set to `true` to ignore exceptions caused by
|
||||
data-type mismatches, such as trying to query a numeric field with a text
|
||||
query string. Defaults to `false`.
|
||||
|
||||
[[query-dsl-match-query-fuzziness]]
|
||||
[float]
|
||||
===== Fuzziness
|
||||
|
||||
`fuzziness` allows _fuzzy matching_ based on the type of field being queried.
|
||||
See <<fuzziness>> for allowed settings.
|
||||
|
||||
|
@ -59,7 +68,9 @@ change in structure, `message` is the field name):
|
|||
}
|
||||
--------------------------------------------------
|
||||
|
||||
.zero_terms_query
|
||||
[[query-dsl-match-query-zero]]
|
||||
[float]
|
||||
===== Zero terms query
|
||||
If the analyzer used removes all tokens in a query like a `stop` filter
|
||||
does, the default behavior is to match no documents at all. In order to
|
||||
change that the `zero_terms_query` option can be used, which accepts
|
||||
|
@ -78,7 +89,10 @@ change that the `zero_terms_query` option can be used, which accepts
|
|||
}
|
||||
--------------------------------------------------
|
||||
|
||||
.cutoff_frequency
|
||||
[[query-dsl-match-query-cutoff]]
|
||||
[float]
|
||||
===== Cutoff frequency
|
||||
|
||||
The match query supports a `cutoff_frequency` that allows
|
||||
specifying an absolute or relative document frequency where high
|
||||
frequency terms are moved into an optional subquery and are only scored
|
||||
|
@ -117,8 +131,9 @@ IMPORTANT: The `cutoff_frequency` option operates on a per-shard-level. This mea
|
|||
that when trying it out on test indexes with low document numbers you
|
||||
should follow the advice in {defguide}/relevance-is-broken.html[Relevance is broken].
|
||||
|
||||
[[query-dsl-match-query-phrase]]
|
||||
[float]
|
||||
===== phrase
|
||||
==== phrase
|
||||
|
||||
The `match_phrase` query analyzes the text and creates a `phrase` query
|
||||
out of the analyzed text. For example:
|
||||
|
@ -167,6 +182,7 @@ definition, or the default search analyzer, for example:
|
|||
--------------------------------------------------
|
||||
|
||||
[float]
|
||||
[[query-dsl-match-query-phrase-prefix]]
|
||||
===== match_phrase_prefix
|
||||
|
||||
The `match_phrase_prefix` is the same as `match_phrase`, except that it
|
||||
|
@ -213,8 +229,8 @@ For example:
|
|||
}
|
||||
--------------------------------------------------
|
||||
|
||||
[float]
|
||||
==== Comparison to query_string / field
|
||||
.Comparison to query_string / field
|
||||
**************************************************
|
||||
|
||||
The match family of queries does not go through a "query parsing"
|
||||
process. It does not support field name prefixes, wildcard characters,
|
||||
|
@ -225,8 +241,4 @@ usually what a text search box does). Also, the `phrase_prefix` type can
|
|||
provide a great "as you type" behavior to automatically load search
|
||||
results.
|
||||
|
||||
[float]
|
||||
==== Other options
|
||||
|
||||
* `lenient` - If set to true will cause format based failures (like
|
||||
providing text to a numeric field) to be ignored. Defaults to false.
|
||||
**************************************************
|
||||
|
|
|
@ -0,0 +1,380 @@
|
|||
["appendix",role="exclude",id="redirects"]
|
||||
= Deleted pages
|
||||
|
||||
The following pages have moved or been deleted.
|
||||
|
||||
[role="exclude",id="cluster-nodes-shutdown"]
|
||||
=== Nodes shutdown
|
||||
|
||||
The `_shutdown` API has been removed. Instead, setup Elasticsearch to run as
|
||||
a service (see <<setup-service>> or <<setup-service-win>>) or use the `-p`
|
||||
command line option to <<setup-installation-pid,write the PID to a file>>.
|
||||
|
||||
[role="exclude",id="docs-bulk-udp"]
|
||||
=== Bulk UDP API
|
||||
|
||||
The Bulk UDP services has been removed. Use the standard <<docs-bulk>> instead.
|
||||
|
||||
[role="exclude",id="indices-delete-mapping"]
|
||||
=== Delete Mapping
|
||||
|
||||
It is no longer possible to delete the mapping for a type. Instead you should
|
||||
<<indices-delete-index,delete the index>> and recreate it with the new mappings.
|
||||
|
||||
[role="exclude",id="indices-status"]
|
||||
=== Index Status
|
||||
|
||||
The index `_status` API has been replaced with the <<indices-stats>> and
|
||||
<<indices-recovery>> APIs.
|
||||
|
||||
[role="exclude",id="mapping-analyzer-field"]
|
||||
=== `_analyzer`
|
||||
|
||||
The `_analyzer` field in type mappings is no longer supported and will be
|
||||
automatically removed from mappings when upgrading to 2.x.
|
||||
|
||||
[role="exclude",id="mapping-boost-field"]
|
||||
=== `_boost`
|
||||
|
||||
The `_boost` field in type mappings is no longer supported and will be
|
||||
automatically removed from mappings when upgrading to 2.x.
|
||||
|
||||
[role="exclude",id="mapping-conf-mappings"]
|
||||
=== Config mappings
|
||||
|
||||
It is no longer possible to specify mappings in files in the `config`
|
||||
directory. Instead, mappings should be created using the API with:
|
||||
|
||||
* <<indices-create-index>>
|
||||
* <<indices-put-mapping>>
|
||||
* <<indices-templates>>
|
||||
|
||||
[role="exclude",id="modules-memcached"]
|
||||
=== memcached
|
||||
|
||||
The `memcached` transport is no longer supported. Instead use the REST
|
||||
interface over <<modules-http,HTTP>> or the
|
||||
https://www.elastic.co/guide/en/elasticsearch/client/java-api/current/index.html[Java API].
|
||||
|
||||
[role="exclude",id="modules-thrift"]
|
||||
=== Thrift
|
||||
|
||||
The `thrift` transport is no longer supported. Instead use the REST
|
||||
interface over <<modules-http,HTTP>> or the
|
||||
https://www.elastic.co/guide/en/elasticsearch/client/java-api/current/index.html[Java API].
|
||||
|
||||
// QUERY DSL
|
||||
|
||||
[role="exclude",id="query-dsl-queries"]
|
||||
=== Queries
|
||||
|
||||
Queries and filters have been merged. Any query clause can now be used as a query
|
||||
in ``query context'' and as a filter in ``filter context'' (see <<query-dsl>>).
|
||||
|
||||
[role="exclude",id="query-dsl-filters"]
|
||||
=== Filters
|
||||
|
||||
Queries and filters have been merged. Any query clause can now be used as a query
|
||||
in ``query context'' and as a filter in ``filter context'' (see <<query-dsl>>).
|
||||
|
||||
[role="exclude",id="query-dsl-and-filter"]
|
||||
=== And Filter
|
||||
|
||||
The `and` filter has been replaced by the <<query-dsl-and-query>>. It behaves
|
||||
as a query in ``query context'' and as a filter in ``filter context'' (see
|
||||
<<query-dsl>>).
|
||||
|
||||
[role="exclude",id="query-dsl-or-filter"]
|
||||
=== Or Filter
|
||||
|
||||
The `or` filter has been replaced by the <<query-dsl-or-query>>. It behaves
|
||||
as a query in ``query context'' and as a filter in ``filter context'' (see
|
||||
<<query-dsl>>).
|
||||
|
||||
[role="exclude",id="query-dsl-not-filter"]
|
||||
=== Not Filter
|
||||
|
||||
The `not` filter has been replaced by the <<query-dsl-not-query>>. It behaves
|
||||
as a query in ``query context'' and as a filter in ``filter context'' (see
|
||||
<<query-dsl>>).
|
||||
|
||||
[role="exclude",id="query-dsl-bool-filter"]
|
||||
=== Bool Filter
|
||||
|
||||
The `bool` filter has been replaced by the <<query-dsl-bool-query>>. It behaves
|
||||
as a query in ``query context'' and as a filter in ``filter context'' (see
|
||||
<<query-dsl>>).
|
||||
|
||||
[role="exclude",id="query-dsl-exists-filter"]
|
||||
=== Exists Filter
|
||||
|
||||
The `exists` filter has been replaced by the <<query-dsl-exists-query>>. It behaves
|
||||
as a query in ``query context'' and as a filter in ``filter context'' (see
|
||||
<<query-dsl>>).
|
||||
|
||||
[role="exclude",id="query-dsl-missing-filter"]
|
||||
=== Missing Filter
|
||||
|
||||
The `missing` filter has been replaced by the <<query-dsl-missing-query>>. It behaves
|
||||
as a query in ``query context'' and as a filter in ``filter context'' (see
|
||||
<<query-dsl>>).
|
||||
|
||||
|
||||
[role="exclude",id="query-dsl-geo-bounding-box-filter"]
|
||||
=== Geo Bounding Box Filter
|
||||
|
||||
The `geo_bounding_box` filter has been replaced by the <<query-dsl-geo-bounding-box-query>>.
|
||||
It behaves as a query in ``query context'' and as a filter in ``filter
|
||||
context'' (see <<query-dsl>>).
|
||||
|
||||
[role="exclude",id="query-dsl-geo-distance-filter"]
|
||||
=== Geo Distance Filter
|
||||
|
||||
The `geo_distance` filter has been replaced by the <<query-dsl-geo-distance-query>>.
|
||||
It behaves as a query in ``query context'' and as a filter in ``filter
|
||||
context'' (see <<query-dsl>>).
|
||||
|
||||
[role="exclude",id="query-dsl-geo-distance-range-filter"]
|
||||
=== Geo Distance Range Filter
|
||||
|
||||
The `geo_distance_range` filter has been replaced by the <<query-dsl-geo-distance-range-query>>.
|
||||
It behaves as a query in ``query context'' and as a filter in ``filter
|
||||
context'' (see <<query-dsl>>).
|
||||
|
||||
[role="exclude",id="query-dsl-geo-polygon-filter"]
|
||||
=== Geo Polygon Filter
|
||||
|
||||
The `geo_polygon` filter has been replaced by the <<query-dsl-geo-polygon-query>>.
|
||||
It behaves as a query in ``query context'' and as a filter in ``filter
|
||||
context'' (see <<query-dsl>>).
|
||||
|
||||
[role="exclude",id="query-dsl-geo-shape-filter"]
|
||||
=== Geo Shape Filter
|
||||
|
||||
The `geo_shape` filter has been replaced by the <<query-dsl-geo-shape-query>>.
|
||||
It behaves as a query in ``query context'' and as a filter in ``filter
|
||||
context'' (see <<query-dsl>>).
|
||||
|
||||
[role="exclude",id="query-dsl-geohash-cell-filter"]
|
||||
=== Geohash Cell Filter
|
||||
|
||||
The `geohash_cell` filter has been replaced by the <<query-dsl-geohash-cell-query>>.
|
||||
It behaves as a query in ``query context'' and as a filter in ``filter
|
||||
context'' (see <<query-dsl>>).
|
||||
|
||||
[role="exclude",id="query-dsl-has-child-filter"]
|
||||
=== Has Child Filter
|
||||
|
||||
The `has_child` filter has been replaced by the <<query-dsl-has-child-query>>. It behaves
|
||||
as a query in ``query context'' and as a filter in ``filter context'' (see
|
||||
<<query-dsl>>).
|
||||
|
||||
[role="exclude",id="query-dsl-has-parent-filter"]
|
||||
=== Has Parent Filter
|
||||
|
||||
The `has_parent` filter has been replaced by the <<query-dsl-has-parent-query>>. It behaves
|
||||
as a query in ``query context'' and as a filter in ``filter context'' (see
|
||||
<<query-dsl>>).
|
||||
|
||||
[role="exclude",id="query-dsl-top-children-query"]
|
||||
=== Top Children Query
|
||||
|
||||
The `top_children` query has been removed. Use the <<query-dsl-has-child-query>> instead.
|
||||
|
||||
[role="exclude",id="query-dsl-ids-filter"]
|
||||
=== IDs Filter
|
||||
|
||||
The `ids` filter has been replaced by the <<query-dsl-ids-query>>. It behaves
|
||||
as a query in ``query context'' and as a filter in ``filter context'' (see
|
||||
<<query-dsl>>).
|
||||
|
||||
[role="exclude",id="query-dsl-indices-filter"]
|
||||
=== Indices Filter
|
||||
|
||||
The `indices` filter has been replaced by the <<query-dsl-indices-query>>. It behaves
|
||||
as a query in ``query context'' and as a filter in ``filter context'' (see
|
||||
<<query-dsl>>).
|
||||
|
||||
[role="exclude",id="query-dsl-limit-filter"]
|
||||
=== Limit Filter
|
||||
|
||||
The `limit` filter has been replaced by the <<query-dsl-limit-query>>.
|
||||
It behaves as a query in ``query context'' and as a filter in ``filter
|
||||
context'' (see <<query-dsl>>).
|
||||
|
||||
[role="exclude",id="query-dsl-match-all-filter"]
|
||||
=== Match All Filter
|
||||
|
||||
The `match_all` filter has been replaced by the <<query-dsl-match-all-query>>. It behaves
|
||||
as a query in ``query context'' and as a filter in ``filter context'' (see
|
||||
<<query-dsl>>).
|
||||
|
||||
[role="exclude",id="query-dsl-nested-filter"]
|
||||
=== Nested Filter
|
||||
|
||||
The `nested` filter has been replaced by the <<query-dsl-nested-query>>. It behaves
|
||||
as a query in ``query context'' and as a filter in ``filter context'' (see
|
||||
<<query-dsl>>).
|
||||
|
||||
[role="exclude",id="query-dsl-prefix-filter"]
|
||||
=== Prefix Filter
|
||||
|
||||
The `prefix` filter has been replaced by the <<query-dsl-prefix-query>>. It behaves
|
||||
as a query in ``query context'' and as a filter in ``filter context'' (see
|
||||
<<query-dsl>>).
|
||||
|
||||
[role="exclude",id="query-dsl-query-filter"]
|
||||
=== Query Filter
|
||||
|
||||
The `query` filter has been removed as queries and filters have been merged (see
|
||||
<<query-dsl>>).
|
||||
|
||||
[role="exclude",id="query-dsl-range-filter"]
|
||||
=== Range Filter
|
||||
|
||||
The `range` filter has been replaced by the <<query-dsl-range-query>>. It behaves
|
||||
as a query in ``query context'' and as a filter in ``filter context'' (see
|
||||
<<query-dsl>>).
|
||||
|
||||
[role="exclude",id="query-dsl-regexp-filter"]
|
||||
=== Regexp Filter
|
||||
|
||||
The `regexp` filter has been replaced by the <<query-dsl-regexp-query>>. It behaves
|
||||
as a query in ``query context'' and as a filter in ``filter context'' (see
|
||||
<<query-dsl>>).
|
||||
|
||||
[role="exclude",id="query-dsl-script-filter"]
|
||||
=== Script Filter
|
||||
|
||||
The `script` filter has been replaced by the <<query-dsl-script-query>>. It behaves
|
||||
as a query in ``query context'' and as a filter in ``filter context'' (see
|
||||
<<query-dsl>>).
|
||||
|
||||
[role="exclude",id="query-dsl-term-filter"]
|
||||
=== Term Filter
|
||||
|
||||
The `term` filter has been replaced by the <<query-dsl-term-query>>. It behaves
|
||||
as a query in ``query context'' and as a filter in ``filter context'' (see
|
||||
<<query-dsl>>).
|
||||
|
||||
[role="exclude",id="query-dsl-terms-filter"]
|
||||
=== Terms Filter
|
||||
|
||||
The `terms` filter has been replaced by the <<query-dsl-terms-query>>. It behaves
|
||||
as a query in ``query context'' and as a filter in ``filter context'' (see
|
||||
<<query-dsl>>).
|
||||
|
||||
[role="exclude",id="query-dsl-type-filter"]
|
||||
=== Type Filter
|
||||
|
||||
The `type` filter has been replaced by the <<query-dsl-type-query>>. It behaves
|
||||
as a query in ``query context'' and as a filter in ``filter context'' (see
|
||||
<<query-dsl>>).
|
||||
|
||||
[role="exclude",id="query-dsl-flt-query"]
|
||||
=== Fuzzy Like This Query
|
||||
|
||||
The `fuzzy_like_this` or `flt` query has been removed. Instead use
|
||||
the <<query-dsl-match-query-fuzziness,`fuzziness`>> parameter with the
|
||||
<<query-dsl-match-query,`match` query>> or the <<query-dsl-mlt-query>>.
|
||||
|
||||
|
||||
[role="exclude",id="query-dsl-flt-field-query"]
|
||||
=== Fuzzy Like This Field Query
|
||||
|
||||
The `fuzzy_like_this_field` or `flt_field` query has been removed. Instead use
|
||||
the <<query-dsl-match-query-fuzziness,`fuzziness`>> parameter with the
|
||||
<<query-dsl-match-query,`match` query>> or the <<query-dsl-mlt-query>>.
|
||||
|
||||
[role="exclude",id="search-more-like-this"]
|
||||
=== More Like This API
|
||||
|
||||
The More Like This API has been removed. Instead, use the <<query-dsl-mlt-query>>.
|
||||
|
||||
// FACETS
|
||||
|
||||
[role="exclude",id="search-facets"]
|
||||
=== Facets
|
||||
|
||||
Faceted search refers to a way to explore large amounts of data by displaying
|
||||
summaries about various partitions of the data and later allowing to narrow
|
||||
the navigation to a specific partition.
|
||||
|
||||
In Elasticsearch, `facets` are also the name of a feature that allowed to
|
||||
compute these summaries. `facets` have been replaced by
|
||||
<<search-aggregations, aggregations>> in Elasticsearch 1.0, which are a superset
|
||||
of facets.
|
||||
|
||||
[role="exclude",id="search-facets-filter-facet"]
|
||||
=== Filter Facet
|
||||
|
||||
Facets have been removed. Use the
|
||||
<<search-aggregations-bucket-filter-aggregation,`filter` aggregation>> or
|
||||
<<search-aggregations-bucket-filters-aggregation,`filters` aggregation>> instead.
|
||||
|
||||
[role="exclude",id="search-facets-query-facet"]
|
||||
=== Query Facet
|
||||
|
||||
Facets have been removed. Use the
|
||||
<<search-aggregations-bucket-filter-aggregation,`filter` aggregation>> or
|
||||
<<search-aggregations-bucket-filters-aggregation,`filters` aggregation>> instead.
|
||||
|
||||
[role="exclude",id="search-facets-geo-distance-facet"]
|
||||
=== Geo Distance Facet
|
||||
|
||||
Facets have been removed. Use the
|
||||
<<search-aggregations-bucket-geodistance-aggregation,`geo_distance` aggregation>> instead.
|
||||
|
||||
[role="exclude",id="search-facets-histogram-facet"]
|
||||
=== Histogram Facet
|
||||
|
||||
Facets have been removed. Use the
|
||||
<<search-aggregations-bucket-histogram-aggregation,`histogram` aggregation>> instead.
|
||||
|
||||
[role="exclude",id="search-facets-date-histogram-facet"]
|
||||
=== Date Histogram Facet
|
||||
|
||||
Facets have been removed. Use the
|
||||
<<search-aggregations-bucket-datehistogram-aggregation,`date_histogram` aggregation>> instead.
|
||||
|
||||
[role="exclude",id="search-facets-range-facet"]
|
||||
=== Range Facet
|
||||
|
||||
Facets have been removed. Use the
|
||||
<<search-aggregations-bucket-range-aggregation,`range` aggregation>> instead.
|
||||
|
||||
[role="exclude",id="search-facets-terms-facet"]
|
||||
=== Terms Facet
|
||||
|
||||
Facets have been removed. Use the
|
||||
<<search-aggregations-bucket-terms-aggregation,`terms` aggregation>> instead.
|
||||
|
||||
[role="exclude",id="search-facets-terms-statistical-facet"]
|
||||
=== Terms Stats Facet
|
||||
|
||||
Facets have been removed. Use the
|
||||
<<search-aggregations-bucket-terms-aggregation,`terms` aggregation>>
|
||||
with the <<search-aggregations-metrics-stats-aggregation,`stats` aggregation>>
|
||||
or the <<search-aggregations-metrics-extendedstats-aggregation,`extended_stats` aggregation>>
|
||||
instead.
|
||||
|
||||
[role="exclude",id="search-facets-statistical-facet"]
|
||||
=== Statistical Facet
|
||||
|
||||
Facets have been removed. Use the
|
||||
<<search-aggregations-metrics-stats-aggregation,`stats` aggregation>>
|
||||
or the <<search-aggregations-metrics-extendedstats-aggregation,`extended_stats` aggregation>> instead.
|
||||
|
||||
[role="exclude",id="search-facets-migrating-to-aggs"]
|
||||
=== Migrating from facets to aggregations
|
||||
|
||||
Facets have been removed. Use <<search-aggregations>> instead.
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
|
@ -11,7 +11,7 @@ exception of the <<search-explain>> endpoints.
|
|||
[[search-routing]]
|
||||
== Routing
|
||||
|
||||
When executing a search, it will be broadcasted to all the index/indices
|
||||
When executing a search, it will be broadcast to all the index/indices
|
||||
shards (round robin between replicas). Which shards will be searched on
|
||||
can be controlled by providing the `routing` parameter. For example,
|
||||
when indexing tweets, the routing value can be the user name:
|
||||
|
@ -85,8 +85,6 @@ include::search/search-template.asciidoc[]
|
|||
|
||||
include::search/search-shards.asciidoc[]
|
||||
|
||||
include::search/facets.asciidoc[]
|
||||
|
||||
include::search/suggesters.asciidoc[]
|
||||
|
||||
include::search/multi-search.asciidoc[]
|
||||
|
|
|
@ -1,11 +0,0 @@
|
|||
[[search-facets]]
|
||||
== Facets
|
||||
|
||||
Faceted search refers to a way to explore large amounts of data by displaying
|
||||
summaries about various partitions of the data and later allowing to narrow
|
||||
the navigation to a specific partition.
|
||||
|
||||
In Elasticsearch, `facets` are also the name of a feature that allowed to
|
||||
compute these summaries. `facets` have been replaced by
|
||||
<<search-aggregations, aggregations>> in Elasticsearch 1.0, which are a superset
|
||||
of facets.
|
|
@ -3,11 +3,14 @@
|
|||
|
||||
experimental[]
|
||||
|
||||
The field stats api allows one to find statistical properties of a field without executing a search, but
|
||||
looking up measurements that are natively available in the Lucene index. This can be useful to explore a dataset which
|
||||
you don't know much about. For example, this allows creating a histogram aggregation with meaningful intervals.
|
||||
The field stats api allows one to find statistical properties of a field
|
||||
without executing a search, but looking up measurements that are natively
|
||||
available in the Lucene index. This can be useful to explore a dataset which
|
||||
you don't know much about. For example, this allows creating a histogram
|
||||
aggregation with meaningful intervals based on the min/max range of values.
|
||||
|
||||
The field stats api by defaults executes on all indices, but can execute on specific indices too.
|
||||
The field stats api by defaults executes on all indices, but can execute on
|
||||
specific indices too.
|
||||
|
||||
All indices:
|
||||
|
||||
|
@ -26,15 +29,11 @@ curl -XGET "http://localhost:9200/index1,index2/_field_stats?fields=rating"
|
|||
Supported request options:
|
||||
|
||||
[horizontal]
|
||||
`fields`::
|
||||
|
||||
A list of fields to compute stats for.
|
||||
|
||||
`level`::
|
||||
|
||||
Defines if field stats should be returned on a per index level or on a cluster
|
||||
wide level. Valid values are `indices` and `cluster`. Defaults to `cluster`.
|
||||
`fields`:: A list of fields to compute stats for.
|
||||
`level`:: Defines if field stats should be returned on a per index level or on a
|
||||
cluster wide level. Valid values are `indices` and `cluster` (default).
|
||||
|
||||
[float]
|
||||
=== Field statistics
|
||||
|
||||
The field stats api is supported on string based, number based and date based fields and can return the following statistics per field:
|
||||
|
@ -57,13 +56,13 @@ is a derived statistic and is based on the `max_doc` and `doc_count`.
|
|||
`sum_doc_freq`::
|
||||
|
||||
The sum of each term's document frequency in this field, or -1 if this
|
||||
measurement isn't available on one or more shards. Document frequency is the
|
||||
number of documents containing a particular term.
|
||||
measurement isn't available on one or more shards.
|
||||
Document frequency is the number of documents containing a particular term.
|
||||
|
||||
`sum_total_term_freq`::
|
||||
|
||||
The sum of the term frequencies of all terms in this field across all
|
||||
documents, or `-1` if this measurement isn't available on one or more shards.
|
||||
documents, or -1 if this measurement isn't available on one or more shards.
|
||||
Term frequency is the total number of occurrences of a term in a particular
|
||||
document and field.
|
||||
|
||||
|
@ -75,18 +74,19 @@ The lowest value in the field represented in a displayable form.
|
|||
|
||||
The highest value in the field represented in a displayable form.
|
||||
|
||||
NOTE: For all the mentioned statistics, documents marked as deleted aren't taken into account. The documents marked
|
||||
as deleted are are only taken into account when the segments these documents reside on are merged away.
|
||||
NOTE: Documents marked as deleted (but not yet removed by the merge process)
|
||||
still affect all the mentioned statistics.
|
||||
|
||||
[float]
|
||||
=== Example
|
||||
|
||||
.Cluster level field statistics
|
||||
==================================================
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
curl -XGET "http://localhost:9200/_field_stats?fields=rating,answer_count,creation_date,display_name"
|
||||
GET /_field_stats?fields=rating,answer_count,creation_date,display_name
|
||||
--------------------------------------------------
|
||||
|
||||
[source,js]
|
||||
[source,json]
|
||||
--------------------------------------------------
|
||||
{
|
||||
"_shards": {
|
||||
|
@ -140,12 +140,14 @@ curl -XGET "http://localhost:9200/_field_stats?fields=rating,answer_count,creati
|
|||
--------------------------------------------------
|
||||
|
||||
<1> The `_all` key indicates that it contains the field stats of all indices in the cluster.
|
||||
==================================================
|
||||
|
||||
With level set to `indices`:
|
||||
.Indices level field statistics
|
||||
==================================================
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
curl -XGET "http://localhost:9200/_field_stats?fields=rating,answer_count,creation_date,display_name&level=indices"
|
||||
GET /_field_stats?fields=rating,answer_count,creation_date,display_name&level=indices
|
||||
--------------------------------------------------
|
||||
|
||||
[source,js]
|
||||
|
@ -202,3 +204,5 @@ curl -XGET "http://localhost:9200/_field_stats?fields=rating,answer_count,creati
|
|||
--------------------------------------------------
|
||||
|
||||
<1> The `stack` key means it contains all field stats for the `stack` index.
|
||||
|
||||
==================================================
|
||||
|
|
|
@ -210,8 +210,8 @@ comma separated string, in that case the request can be be executed on more than
|
|||
* `ignore_unavailable` - Controls if missing concrete indices should silently be ignored. Same as is in the search API.
|
||||
* `percolate_format` - If `ids` is specified then the matches array in the percolate response will contain a string
|
||||
array of the matching ids instead of an array of objects. This can be useful to reduce the amount of data being send
|
||||
back to the client. Obviously if there are to percolator queries with same id from different indices there is no way
|
||||
the find out which percolator query belongs to what index. Any other value to `percolate_format` will be ignored.
|
||||
back to the client. Obviously if there are two percolator queries with same id from different indices there is no way
|
||||
to find out which percolator query belongs to what index. Any other value to `percolate_format` will be ignored.
|
||||
|
||||
.Additional request body options
|
||||
* `filter` - Reduces the number queries to execute during percolating. Only the percolator queries that match with the
|
||||
|
|
|
@ -14,7 +14,7 @@ See <<setup-repositories>>.
|
|||
[float]
|
||||
== Installation
|
||||
|
||||
After link:/download[downloading] the latest release and extracting it,
|
||||
After link:/downloads/elasticsearch[downloading] the latest release and extracting it,
|
||||
*elasticsearch* can be started using:
|
||||
|
||||
[source,sh]
|
||||
|
@ -22,7 +22,12 @@ After link:/download[downloading] the latest release and extracting it,
|
|||
$ bin/elasticsearch
|
||||
--------------------------------------------------
|
||||
|
||||
Under *nix system, the command will start the process in the foreground.
|
||||
On *nix systems, the command will start the process in the foreground.
|
||||
|
||||
[[setup-installation-daemon]]
|
||||
[float]
|
||||
=== Running as a daemon
|
||||
|
||||
To run it in the background, add the `-d` switch to it:
|
||||
|
||||
[source,sh]
|
||||
|
@ -30,6 +35,24 @@ To run it in the background, add the `-d` switch to it:
|
|||
$ bin/elasticsearch -d
|
||||
--------------------------------------------------
|
||||
|
||||
[[setup-installation-pid]]
|
||||
[float]
|
||||
=== PID
|
||||
|
||||
The Elasticsearch process can write its PID to a specified file on startup,
|
||||
making it easy to shut down the process later on:
|
||||
|
||||
[source,sh]
|
||||
--------------------------------------------------
|
||||
$ bin/elasticsearch -d -p pid <1>
|
||||
$ kill `cat pid` <2>
|
||||
--------------------------------------------------
|
||||
<1> The PID is written to a file called `pid`.
|
||||
<2> The `kill` command sends a `TERM` signal to the PID stored in the `pid` file.
|
||||
|
||||
NOTE: The startup scripts provided for <<setup-service,Linux>> and <<setup-service-win,Windows>>
|
||||
take care of starting and stopping the Elasticsearch process for you.
|
||||
|
||||
.*NIX
|
||||
*************************************************************************
|
||||
There are added features when using the `elasticsearch` shell script.
|
||||
|
|
|
@ -23,7 +23,6 @@ import org.elasticsearch.cluster.node.DiscoveryNode;
|
|||
import org.elasticsearch.common.component.AbstractComponent;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.*;
|
||||
|
||||
/**
|
||||
|
|
|
@ -27,7 +27,6 @@ import org.elasticsearch.cluster.node.DiscoveryNode;
|
|||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.settings.ImmutableSettings;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.http.HttpInfo;
|
||||
import org.elasticsearch.monitor.jvm.JvmInfo;
|
||||
|
@ -209,7 +208,7 @@ public class NodeInfo extends NodeOperationResponse {
|
|||
serviceAttributes = builder.build();
|
||||
}
|
||||
if (in.readBoolean()) {
|
||||
settings = ImmutableSettings.readSettingsFromStream(in);
|
||||
settings = Settings.readSettingsFromStream(in);
|
||||
}
|
||||
if (in.readBoolean()) {
|
||||
os = OsInfo.readOsInfo(in);
|
||||
|
@ -256,7 +255,7 @@ public class NodeInfo extends NodeOperationResponse {
|
|||
out.writeBoolean(false);
|
||||
} else {
|
||||
out.writeBoolean(true);
|
||||
ImmutableSettings.writeSettingsToStream(settings, out);
|
||||
Settings.writeSettingsToStream(settings, out);
|
||||
}
|
||||
if (os == null) {
|
||||
out.writeBoolean(false);
|
||||
|
|
|
@ -24,7 +24,6 @@ import org.elasticsearch.cluster.ClusterName;
|
|||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.settings.SettingsFilter;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentFactory;
|
||||
|
|
|
@ -19,7 +19,6 @@
|
|||
|
||||
package org.elasticsearch.action.admin.cluster.node.info;
|
||||
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.nodes.NodeOperationRequest;
|
||||
import org.elasticsearch.action.support.nodes.TransportNodesOperationAction;
|
||||
|
|
|
@ -22,7 +22,6 @@ package org.elasticsearch.action.admin.cluster.node.liveness;
|
|||
import org.elasticsearch.cluster.ClusterName;
|
||||
import org.elasticsearch.cluster.ClusterService;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.*;
|
||||
|
||||
|
|
|
@ -20,7 +20,6 @@
|
|||
package org.elasticsearch.action.admin.cluster.node.stats;
|
||||
|
||||
import com.google.common.collect.Lists;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.nodes.NodeOperationRequest;
|
||||
import org.elasticsearch.action.support.nodes.TransportNodesOperationAction;
|
||||
|
|
|
@ -19,7 +19,6 @@
|
|||
|
||||
package org.elasticsearch.action.admin.cluster.repositories.delete;
|
||||
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.master.TransportMasterNodeOperationAction;
|
||||
|
@ -60,7 +59,7 @@ public class TransportDeleteRepositoryAction extends TransportMasterNodeOperatio
|
|||
|
||||
@Override
|
||||
protected ClusterBlockException checkBlock(DeleteRepositoryRequest request, ClusterState state) {
|
||||
return state.blocks().indexBlockedException(ClusterBlockLevel.METADATA_WRITE, "");
|
||||
return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_WRITE);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -24,7 +24,7 @@ import org.elasticsearch.action.ActionResponse;
|
|||
import org.elasticsearch.cluster.metadata.RepositoryMetaData;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.settings.ImmutableSettings;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Iterator;
|
||||
|
@ -63,7 +63,7 @@ public class GetRepositoriesResponse extends ActionResponse implements Iterable<
|
|||
repositoryListBuilder.add(new RepositoryMetaData(
|
||||
in.readString(),
|
||||
in.readString(),
|
||||
ImmutableSettings.readSettingsFromStream(in))
|
||||
Settings.readSettingsFromStream(in))
|
||||
);
|
||||
}
|
||||
repositories = repositoryListBuilder.build();
|
||||
|
@ -76,7 +76,7 @@ public class GetRepositoriesResponse extends ActionResponse implements Iterable<
|
|||
for (RepositoryMetaData repository : repositories) {
|
||||
out.writeString(repository.name());
|
||||
out.writeString(repository.type());
|
||||
ImmutableSettings.writeSettingsToStream(repository.settings(), out);
|
||||
Settings.writeSettingsToStream(repository.settings(), out);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -20,7 +20,6 @@
|
|||
package org.elasticsearch.action.admin.cluster.repositories.get;
|
||||
|
||||
import com.google.common.collect.ImmutableList;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.master.TransportMasterNodeReadOperationAction;
|
||||
|
@ -60,7 +59,7 @@ public class TransportGetRepositoriesAction extends TransportMasterNodeReadOpera
|
|||
|
||||
@Override
|
||||
protected ClusterBlockException checkBlock(GetRepositoriesRequest request, ClusterState state) {
|
||||
return state.blocks().indexBlockedException(ClusterBlockLevel.METADATA_READ, "");
|
||||
return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_READ);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -26,7 +26,6 @@ import org.elasticsearch.action.support.master.AcknowledgedRequest;
|
|||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.settings.ImmutableSettings;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentFactory;
|
||||
|
@ -36,9 +35,9 @@ import java.io.IOException;
|
|||
import java.util.Map;
|
||||
|
||||
import static org.elasticsearch.action.ValidateActions.addValidationError;
|
||||
import static org.elasticsearch.common.settings.ImmutableSettings.Builder.EMPTY_SETTINGS;
|
||||
import static org.elasticsearch.common.settings.ImmutableSettings.readSettingsFromStream;
|
||||
import static org.elasticsearch.common.settings.ImmutableSettings.writeSettingsToStream;
|
||||
import static org.elasticsearch.common.settings.Settings.Builder.EMPTY_SETTINGS;
|
||||
import static org.elasticsearch.common.settings.Settings.readSettingsFromStream;
|
||||
import static org.elasticsearch.common.settings.Settings.writeSettingsToStream;
|
||||
|
||||
/**
|
||||
* Register repository request.
|
||||
|
@ -150,7 +149,7 @@ public class PutRepositoryRequest extends AcknowledgedRequest<PutRepositoryReque
|
|||
* @return this request
|
||||
*/
|
||||
public PutRepositoryRequest settings(String source) {
|
||||
this.settings = ImmutableSettings.settingsBuilder().loadFromSource(source).build();
|
||||
this.settings = Settings.settingsBuilder().loadFromSource(source).build();
|
||||
return this;
|
||||
}
|
||||
|
||||
|
|
|
@ -19,7 +19,6 @@
|
|||
|
||||
package org.elasticsearch.action.admin.cluster.repositories.put;
|
||||
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.support.master.AcknowledgedRequestBuilder;
|
||||
import org.elasticsearch.client.ElasticsearchClient;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
|
|
|
@ -19,7 +19,6 @@
|
|||
|
||||
package org.elasticsearch.action.admin.cluster.repositories.put;
|
||||
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.master.TransportMasterNodeOperationAction;
|
||||
|
@ -60,7 +59,7 @@ public class TransportPutRepositoryAction extends TransportMasterNodeOperationAc
|
|||
|
||||
@Override
|
||||
protected ClusterBlockException checkBlock(PutRepositoryRequest request, ClusterState state) {
|
||||
return state.blocks().indexBlockedException(ClusterBlockLevel.METADATA_WRITE, "");
|
||||
return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_WRITE);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -19,7 +19,6 @@
|
|||
|
||||
package org.elasticsearch.action.admin.cluster.repositories.verify;
|
||||
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.master.TransportMasterNodeOperationAction;
|
||||
|
@ -64,7 +63,7 @@ public class TransportVerifyRepositoryAction extends TransportMasterNodeOperatio
|
|||
|
||||
@Override
|
||||
protected ClusterBlockException checkBlock(VerifyRepositoryRequest request, ClusterState state) {
|
||||
return state.blocks().indexBlockedException(ClusterBlockLevel.METADATA_READ, "");
|
||||
return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_READ);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -19,7 +19,6 @@
|
|||
|
||||
package org.elasticsearch.action.admin.cluster.reroute;
|
||||
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.master.TransportMasterNodeOperationAction;
|
||||
|
|
|
@ -24,7 +24,6 @@ import org.elasticsearch.action.ActionRequestValidationException;
|
|||
import org.elasticsearch.action.support.master.AcknowledgedRequest;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.settings.ImmutableSettings;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentFactory;
|
||||
|
@ -34,9 +33,9 @@ import java.io.IOException;
|
|||
import java.util.Map;
|
||||
|
||||
import static org.elasticsearch.action.ValidateActions.addValidationError;
|
||||
import static org.elasticsearch.common.settings.ImmutableSettings.Builder.EMPTY_SETTINGS;
|
||||
import static org.elasticsearch.common.settings.ImmutableSettings.readSettingsFromStream;
|
||||
import static org.elasticsearch.common.settings.ImmutableSettings.writeSettingsToStream;
|
||||
import static org.elasticsearch.common.settings.Settings.Builder.EMPTY_SETTINGS;
|
||||
import static org.elasticsearch.common.settings.Settings.readSettingsFromStream;
|
||||
import static org.elasticsearch.common.settings.Settings.writeSettingsToStream;
|
||||
|
||||
/**
|
||||
* Request for an update cluster settings action
|
||||
|
@ -86,7 +85,7 @@ public class ClusterUpdateSettingsRequest extends AcknowledgedRequest<ClusterUpd
|
|||
* Sets the source containing the transient settings to be updated. They will not survive a full cluster restart
|
||||
*/
|
||||
public ClusterUpdateSettingsRequest transientSettings(String source) {
|
||||
this.transientSettings = ImmutableSettings.settingsBuilder().loadFromSource(source).build();
|
||||
this.transientSettings = Settings.settingsBuilder().loadFromSource(source).build();
|
||||
return this;
|
||||
}
|
||||
|
||||
|
@ -125,7 +124,7 @@ public class ClusterUpdateSettingsRequest extends AcknowledgedRequest<ClusterUpd
|
|||
* Sets the source containing the persistent settings to be updated. They will get applied cross restarts
|
||||
*/
|
||||
public ClusterUpdateSettingsRequest persistentSettings(String source) {
|
||||
this.persistentSettings = ImmutableSettings.settingsBuilder().loadFromSource(source).build();
|
||||
this.persistentSettings = Settings.settingsBuilder().loadFromSource(source).build();
|
||||
return this;
|
||||
}
|
||||
|
||||
|
|
|
@ -22,7 +22,6 @@ package org.elasticsearch.action.admin.cluster.settings;
|
|||
import org.elasticsearch.action.support.master.AcknowledgedResponse;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.settings.ImmutableSettings;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
|
||||
import java.io.IOException;
|
||||
|
@ -36,8 +35,8 @@ public class ClusterUpdateSettingsResponse extends AcknowledgedResponse {
|
|||
Settings persistentSettings;
|
||||
|
||||
ClusterUpdateSettingsResponse() {
|
||||
this.persistentSettings = ImmutableSettings.EMPTY;
|
||||
this.transientSettings = ImmutableSettings.EMPTY;
|
||||
this.persistentSettings = Settings.EMPTY;
|
||||
this.transientSettings = Settings.EMPTY;
|
||||
}
|
||||
|
||||
ClusterUpdateSettingsResponse(boolean acknowledged, Settings transientSettings, Settings persistentSettings) {
|
||||
|
@ -49,8 +48,8 @@ public class ClusterUpdateSettingsResponse extends AcknowledgedResponse {
|
|||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
transientSettings = ImmutableSettings.readSettingsFromStream(in);
|
||||
persistentSettings = ImmutableSettings.readSettingsFromStream(in);
|
||||
transientSettings = Settings.readSettingsFromStream(in);
|
||||
persistentSettings = Settings.readSettingsFromStream(in);
|
||||
readAcknowledged(in);
|
||||
}
|
||||
|
||||
|
@ -65,8 +64,8 @@ public class ClusterUpdateSettingsResponse extends AcknowledgedResponse {
|
|||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
ImmutableSettings.writeSettingsToStream(transientSettings, out);
|
||||
ImmutableSettings.writeSettingsToStream(persistentSettings, out);
|
||||
Settings.writeSettingsToStream(transientSettings, out);
|
||||
Settings.writeSettingsToStream(persistentSettings, out);
|
||||
writeAcknowledged(out);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -38,7 +38,6 @@ import org.elasticsearch.cluster.settings.DynamicSettings;
|
|||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.Priority;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.ImmutableSettings;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
@ -87,8 +86,8 @@ public class TransportClusterUpdateSettingsAction extends TransportMasterNodeOpe
|
|||
|
||||
@Override
|
||||
protected void masterOperation(final ClusterUpdateSettingsRequest request, final ClusterState state, final ActionListener<ClusterUpdateSettingsResponse> listener) {
|
||||
final ImmutableSettings.Builder transientUpdates = ImmutableSettings.settingsBuilder();
|
||||
final ImmutableSettings.Builder persistentUpdates = ImmutableSettings.settingsBuilder();
|
||||
final Settings.Builder transientUpdates = Settings.settingsBuilder();
|
||||
final Settings.Builder persistentUpdates = Settings.settingsBuilder();
|
||||
|
||||
clusterService.submitStateUpdateTask("cluster_update_settings", Priority.IMMEDIATE, new AckedClusterStateUpdateTask<ClusterUpdateSettingsResponse>(request, listener) {
|
||||
|
||||
|
@ -178,7 +177,7 @@ public class TransportClusterUpdateSettingsAction extends TransportMasterNodeOpe
|
|||
|
||||
@Override
|
||||
public ClusterState execute(final ClusterState currentState) {
|
||||
ImmutableSettings.Builder transientSettings = ImmutableSettings.settingsBuilder();
|
||||
Settings.Builder transientSettings = Settings.settingsBuilder();
|
||||
transientSettings.put(currentState.metaData().transientSettings());
|
||||
for (Map.Entry<String, String> entry : request.transientSettings().getAsMap().entrySet()) {
|
||||
if (dynamicSettings.isDynamicOrLoggingSetting(entry.getKey())) {
|
||||
|
@ -195,7 +194,7 @@ public class TransportClusterUpdateSettingsAction extends TransportMasterNodeOpe
|
|||
}
|
||||
}
|
||||
|
||||
ImmutableSettings.Builder persistentSettings = ImmutableSettings.settingsBuilder();
|
||||
Settings.Builder persistentSettings = Settings.settingsBuilder();
|
||||
persistentSettings.put(currentState.metaData().persistentSettings());
|
||||
for (Map.Entry<String, String> entry : request.persistentSettings().getAsMap().entrySet()) {
|
||||
if (dynamicSettings.isDynamicOrLoggingSetting(entry.getKey())) {
|
||||
|
|
|
@ -19,7 +19,6 @@
|
|||
|
||||
package org.elasticsearch.action.admin.cluster.shards;
|
||||
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.master.TransportMasterNodeReadOperationAction;
|
||||
|
|
|
@ -28,7 +28,6 @@ import org.elasticsearch.common.Strings;
|
|||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.settings.ImmutableSettings;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentFactory;
|
||||
|
@ -42,9 +41,9 @@ import java.util.Map;
|
|||
import static org.elasticsearch.action.ValidateActions.addValidationError;
|
||||
import static org.elasticsearch.common.Strings.EMPTY_ARRAY;
|
||||
import static org.elasticsearch.common.Strings.hasLength;
|
||||
import static org.elasticsearch.common.settings.ImmutableSettings.Builder.EMPTY_SETTINGS;
|
||||
import static org.elasticsearch.common.settings.ImmutableSettings.readSettingsFromStream;
|
||||
import static org.elasticsearch.common.settings.ImmutableSettings.writeSettingsToStream;
|
||||
import static org.elasticsearch.common.settings.Settings.Builder.EMPTY_SETTINGS;
|
||||
import static org.elasticsearch.common.settings.Settings.readSettingsFromStream;
|
||||
import static org.elasticsearch.common.settings.Settings.writeSettingsToStream;
|
||||
import static org.elasticsearch.common.xcontent.support.XContentMapValues.nodeBooleanValue;
|
||||
|
||||
/**
|
||||
|
@ -301,7 +300,7 @@ public class CreateSnapshotRequest extends MasterNodeOperationRequest<CreateSnap
|
|||
* @return this request
|
||||
*/
|
||||
public CreateSnapshotRequest settings(String source) {
|
||||
this.settings = ImmutableSettings.settingsBuilder().loadFromSource(source).build();
|
||||
this.settings = Settings.settingsBuilder().loadFromSource(source).build();
|
||||
return this;
|
||||
}
|
||||
|
||||
|
|
|
@ -19,7 +19,6 @@
|
|||
|
||||
package org.elasticsearch.action.admin.cluster.snapshots.create;
|
||||
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.support.IndicesOptions;
|
||||
import org.elasticsearch.action.support.master.MasterNodeOperationRequestBuilder;
|
||||
import org.elasticsearch.client.ElasticsearchClient;
|
||||
|
|
|
@ -19,7 +19,6 @@
|
|||
|
||||
package org.elasticsearch.action.admin.cluster.snapshots.create;
|
||||
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.master.TransportMasterNodeOperationAction;
|
||||
|
@ -60,7 +59,12 @@ public class TransportCreateSnapshotAction extends TransportMasterNodeOperationA
|
|||
|
||||
@Override
|
||||
protected ClusterBlockException checkBlock(CreateSnapshotRequest request, ClusterState state) {
|
||||
return state.blocks().indexBlockedException(ClusterBlockLevel.METADATA_WRITE, "");
|
||||
// We are writing to the cluster metadata and reading from indices - so we need to check both blocks
|
||||
ClusterBlockException clusterBlockException = state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_WRITE);
|
||||
if (clusterBlockException != null) {
|
||||
return clusterBlockException;
|
||||
}
|
||||
return state.blocks().indicesBlockedException(ClusterBlockLevel.READ, state.metaData().concreteIndices(request.indicesOptions(), request.indices()));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -19,7 +19,6 @@
|
|||
|
||||
package org.elasticsearch.action.admin.cluster.snapshots.delete;
|
||||
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.master.TransportMasterNodeOperationAction;
|
||||
|
@ -59,7 +58,7 @@ public class TransportDeleteSnapshotAction extends TransportMasterNodeOperationA
|
|||
|
||||
@Override
|
||||
protected ClusterBlockException checkBlock(DeleteSnapshotRequest request, ClusterState state) {
|
||||
return state.blocks().indexBlockedException(ClusterBlockLevel.METADATA_WRITE, "");
|
||||
return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_WRITE);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -20,7 +20,6 @@
|
|||
package org.elasticsearch.action.admin.cluster.snapshots.get;
|
||||
|
||||
import com.google.common.collect.ImmutableList;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.master.TransportMasterNodeOperationAction;
|
||||
|
@ -62,7 +61,7 @@ public class TransportGetSnapshotsAction extends TransportMasterNodeOperationAct
|
|||
|
||||
@Override
|
||||
protected ClusterBlockException checkBlock(GetSnapshotsRequest request, ClusterState state) {
|
||||
return state.blocks().indexBlockedException(ClusterBlockLevel.METADATA_READ, "");
|
||||
return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_READ);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -28,7 +28,6 @@ import org.elasticsearch.common.Strings;
|
|||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.settings.ImmutableSettings;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentFactory;
|
||||
|
@ -41,9 +40,9 @@ import java.util.Map;
|
|||
|
||||
import static org.elasticsearch.action.ValidateActions.addValidationError;
|
||||
import static org.elasticsearch.common.Strings.hasLength;
|
||||
import static org.elasticsearch.common.settings.ImmutableSettings.Builder.EMPTY_SETTINGS;
|
||||
import static org.elasticsearch.common.settings.ImmutableSettings.readSettingsFromStream;
|
||||
import static org.elasticsearch.common.settings.ImmutableSettings.writeSettingsToStream;
|
||||
import static org.elasticsearch.common.settings.Settings.Builder.EMPTY_SETTINGS;
|
||||
import static org.elasticsearch.common.settings.Settings.readSettingsFromStream;
|
||||
import static org.elasticsearch.common.settings.Settings.writeSettingsToStream;
|
||||
import static org.elasticsearch.common.xcontent.support.XContentMapValues.nodeBooleanValue;
|
||||
|
||||
/**
|
||||
|
@ -340,7 +339,7 @@ public class RestoreSnapshotRequest extends MasterNodeOperationRequest<RestoreSn
|
|||
* @return this request
|
||||
*/
|
||||
public RestoreSnapshotRequest settings(String source) {
|
||||
this.settings = ImmutableSettings.settingsBuilder().loadFromSource(source).build();
|
||||
this.settings = Settings.settingsBuilder().loadFromSource(source).build();
|
||||
return this;
|
||||
}
|
||||
|
||||
|
@ -457,7 +456,7 @@ public class RestoreSnapshotRequest extends MasterNodeOperationRequest<RestoreSn
|
|||
* Sets settings that should be added/changed in all restored indices
|
||||
*/
|
||||
public RestoreSnapshotRequest indexSettings(String source) {
|
||||
this.indexSettings = ImmutableSettings.settingsBuilder().loadFromSource(source).build();
|
||||
this.indexSettings = Settings.settingsBuilder().loadFromSource(source).build();
|
||||
return this;
|
||||
}
|
||||
|
||||
|
|
|
@ -19,7 +19,6 @@
|
|||
|
||||
package org.elasticsearch.action.admin.cluster.snapshots.restore;
|
||||
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.master.TransportMasterNodeOperationAction;
|
||||
|
@ -62,11 +61,12 @@ public class TransportRestoreSnapshotAction extends TransportMasterNodeOperation
|
|||
protected ClusterBlockException checkBlock(RestoreSnapshotRequest request, ClusterState state) {
|
||||
// Restoring a snapshot might change the global state and create/change an index,
|
||||
// so we need to check for METADATA_WRITE and WRITE blocks
|
||||
ClusterBlockException blockException = state.blocks().indexBlockedException(ClusterBlockLevel.METADATA_WRITE, "");
|
||||
ClusterBlockException blockException = state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_WRITE);
|
||||
if (blockException != null) {
|
||||
return blockException;
|
||||
}
|
||||
return state.blocks().indexBlockedException(ClusterBlockLevel.WRITE, "");
|
||||
return state.blocks().globalBlockedException(ClusterBlockLevel.WRITE);
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -20,7 +20,6 @@
|
|||
package org.elasticsearch.action.admin.cluster.state;
|
||||
|
||||
import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.master.TransportMasterNodeReadOperationAction;
|
||||
|
|
|
@ -19,7 +19,6 @@
|
|||
|
||||
package org.elasticsearch.action.admin.cluster.stats;
|
||||
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.action.admin.cluster.health.ClusterHealthStatus;
|
||||
import org.elasticsearch.action.admin.cluster.health.ClusterIndexHealth;
|
||||
import org.elasticsearch.action.admin.cluster.node.info.NodeInfo;
|
||||
|
|
|
@ -19,9 +19,7 @@
|
|||
|
||||
package org.elasticsearch.action.admin.cluster.tasks;
|
||||
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.admin.cluster.state.ClusterStateRequest;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.master.TransportMasterNodeReadOperationAction;
|
||||
import org.elasticsearch.cluster.ClusterService;
|
||||
|
|
|
@ -20,7 +20,6 @@
|
|||
package org.elasticsearch.action.admin.indices.alias;
|
||||
|
||||
import com.google.common.collect.Sets;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest.AliasActions;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
|
|
|
@ -18,7 +18,6 @@
|
|||
*/
|
||||
package org.elasticsearch.action.admin.indices.alias.exists;
|
||||
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.admin.indices.alias.get.GetAliasesRequest;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
|
|
|
@ -18,7 +18,6 @@
|
|||
*/
|
||||
package org.elasticsearch.action.admin.indices.alias.get;
|
||||
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.master.TransportMasterNodeReadOperationAction;
|
||||
|
|
|
@ -36,7 +36,6 @@ import org.elasticsearch.cluster.block.ClusterBlockException;
|
|||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.routing.ShardsIterator;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.ImmutableSettings;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.index.analysis.*;
|
||||
import org.elasticsearch.index.mapper.FieldMapper;
|
||||
|
@ -59,7 +58,7 @@ public class TransportAnalyzeAction extends TransportSingleCustomOperationAction
|
|||
private final IndicesService indicesService;
|
||||
private final IndicesAnalysisService indicesAnalysisService;
|
||||
|
||||
private static final Settings DEFAULT_SETTINGS = ImmutableSettings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT).build();
|
||||
private static final Settings DEFAULT_SETTINGS = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT).build();
|
||||
|
||||
@Inject
|
||||
public TransportAnalyzeAction(Settings settings, ThreadPool threadPool, ClusterService clusterService, TransportService transportService,
|
||||
|
|
|
@ -19,7 +19,6 @@
|
|||
|
||||
package org.elasticsearch.action.admin.indices.cache.clear;
|
||||
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.action.ShardOperationFailedException;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.DefaultShardOperationFailedException;
|
||||
|
@ -34,7 +33,6 @@ import org.elasticsearch.cluster.routing.ShardRouting;
|
|||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.index.IndexService;
|
||||
import org.elasticsearch.index.mapper.internal.ParentFieldMapper;
|
||||
import org.elasticsearch.index.shard.IndexShard;
|
||||
import org.elasticsearch.indices.IndicesService;
|
||||
import org.elasticsearch.indices.cache.query.IndicesQueryCache;
|
||||
|
|
|
@ -19,7 +19,6 @@
|
|||
|
||||
package org.elasticsearch.action.admin.indices.close;
|
||||
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.DestructiveOperations;
|
||||
|
|
|
@ -25,7 +25,6 @@ import org.elasticsearch.action.admin.indices.alias.Alias;
|
|||
import org.elasticsearch.cluster.ack.ClusterStateUpdateRequest;
|
||||
import org.elasticsearch.cluster.block.ClusterBlock;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.common.settings.ImmutableSettings;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.transport.TransportMessage;
|
||||
|
||||
|
@ -45,7 +44,7 @@ public class CreateIndexClusterStateUpdateRequest extends ClusterStateUpdateRequ
|
|||
|
||||
private IndexMetaData.State state = IndexMetaData.State.OPEN;
|
||||
|
||||
private Settings settings = ImmutableSettings.Builder.EMPTY_SETTINGS;
|
||||
private Settings settings = Settings.Builder.EMPTY_SETTINGS;
|
||||
|
||||
private final Map<String, String> mappings = Maps.newHashMap();
|
||||
|
||||
|
|
|
@ -36,7 +36,6 @@ import org.elasticsearch.common.bytes.BytesReference;
|
|||
import org.elasticsearch.common.collect.MapBuilder;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.settings.ImmutableSettings;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.xcontent.*;
|
||||
|
||||
|
@ -46,9 +45,9 @@ import java.util.Set;
|
|||
|
||||
import static com.google.common.collect.Maps.newHashMap;
|
||||
import static org.elasticsearch.action.ValidateActions.addValidationError;
|
||||
import static org.elasticsearch.common.settings.ImmutableSettings.Builder.EMPTY_SETTINGS;
|
||||
import static org.elasticsearch.common.settings.ImmutableSettings.readSettingsFromStream;
|
||||
import static org.elasticsearch.common.settings.ImmutableSettings.writeSettingsToStream;
|
||||
import static org.elasticsearch.common.settings.Settings.Builder.EMPTY_SETTINGS;
|
||||
import static org.elasticsearch.common.settings.Settings.readSettingsFromStream;
|
||||
import static org.elasticsearch.common.settings.Settings.writeSettingsToStream;
|
||||
|
||||
/**
|
||||
* A request to create an index. Best created with {@link org.elasticsearch.client.Requests#createIndexRequest(String)}.
|
||||
|
@ -148,7 +147,7 @@ public class CreateIndexRequest extends AcknowledgedRequest<CreateIndexRequest>
|
|||
* A simplified version of settings that takes key value pairs settings.
|
||||
*/
|
||||
public CreateIndexRequest settings(Object... settings) {
|
||||
this.settings = ImmutableSettings.builder().put(settings).build();
|
||||
this.settings = Settings.builder().put(settings).build();
|
||||
return this;
|
||||
}
|
||||
|
||||
|
@ -172,7 +171,7 @@ public class CreateIndexRequest extends AcknowledgedRequest<CreateIndexRequest>
|
|||
* The settings to create the index with (either json/yaml/properties format)
|
||||
*/
|
||||
public CreateIndexRequest settings(String source) {
|
||||
this.settings = ImmutableSettings.settingsBuilder().loadFromSource(source).build();
|
||||
this.settings = Settings.settingsBuilder().loadFromSource(source).build();
|
||||
return this;
|
||||
}
|
||||
|
||||
|
|
|
@ -19,7 +19,6 @@
|
|||
|
||||
package org.elasticsearch.action.admin.indices.create;
|
||||
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.master.TransportMasterNodeOperationAction;
|
||||
|
|
|
@ -19,7 +19,6 @@
|
|||
|
||||
package org.elasticsearch.action.admin.indices.delete;
|
||||
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.DestructiveOperations;
|
||||
|
|
|
@ -19,7 +19,6 @@
|
|||
|
||||
package org.elasticsearch.action.admin.indices.exists.indices;
|
||||
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.IndicesOptions;
|
||||
|
|
|
@ -18,7 +18,6 @@
|
|||
*/
|
||||
package org.elasticsearch.action.admin.indices.exists.types;
|
||||
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.master.TransportMasterNodeReadOperationAction;
|
||||
|
|
|
@ -19,7 +19,6 @@
|
|||
|
||||
package org.elasticsearch.action.admin.indices.flush;
|
||||
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.action.ShardOperationFailedException;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.DefaultShardOperationFailedException;
|
||||
|
|
|
@ -28,7 +28,6 @@ import org.elasticsearch.cluster.metadata.MappingMetaData;
|
|||
import org.elasticsearch.common.collect.ImmutableOpenMap;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.settings.ImmutableSettings;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.search.warmer.IndexWarmersMetaData;
|
||||
|
||||
|
@ -155,7 +154,7 @@ public class GetIndexResponse extends ActionResponse {
|
|||
ImmutableOpenMap.Builder<String, Settings> settingsMapBuilder = ImmutableOpenMap.builder();
|
||||
for (int i = 0; i < settingsSize; i++) {
|
||||
String key = in.readString();
|
||||
settingsMapBuilder.put(key, ImmutableSettings.readSettingsFromStream(in));
|
||||
settingsMapBuilder.put(key, Settings.readSettingsFromStream(in));
|
||||
}
|
||||
settings = settingsMapBuilder.build();
|
||||
}
|
||||
|
@ -195,7 +194,7 @@ public class GetIndexResponse extends ActionResponse {
|
|||
out.writeVInt(settings.size());
|
||||
for (ObjectObjectCursor<String, Settings> indexEntry : settings) {
|
||||
out.writeString(indexEntry.key);
|
||||
ImmutableSettings.writeSettingsToStream(indexEntry.value, out);
|
||||
Settings.writeSettingsToStream(indexEntry.value, out);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -21,7 +21,6 @@ package org.elasticsearch.action.admin.indices.get;
|
|||
|
||||
import com.google.common.collect.ImmutableList;
|
||||
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.admin.indices.get.GetIndexRequest.Feature;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
|
|
|
@ -23,14 +23,12 @@ import com.google.common.collect.ImmutableMap;
|
|||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.HandledTransportAction;
|
||||
import org.elasticsearch.action.support.TransportAction;
|
||||
import org.elasticsearch.cluster.ClusterService;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.common.collect.MapBuilder;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportChannel;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
|
|
|
@ -19,7 +19,6 @@
|
|||
|
||||
package org.elasticsearch.action.admin.indices.mapping.get;
|
||||
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.master.info.TransportClusterInfoAction;
|
||||
|
|
|
@ -19,7 +19,6 @@
|
|||
|
||||
package org.elasticsearch.action.admin.indices.mapping.put;
|
||||
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.master.TransportMasterNodeOperationAction;
|
||||
|
|
|
@ -19,7 +19,6 @@
|
|||
|
||||
package org.elasticsearch.action.admin.indices.open;
|
||||
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.DestructiveOperations;
|
||||
|
|
|
@ -19,7 +19,6 @@
|
|||
|
||||
package org.elasticsearch.action.admin.indices.optimize;
|
||||
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.action.ShardOperationFailedException;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.DefaultShardOperationFailedException;
|
||||
|
|
|
@ -19,7 +19,6 @@
|
|||
|
||||
package org.elasticsearch.action.admin.indices.recovery;
|
||||
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.action.ShardOperationFailedException;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.DefaultShardOperationFailedException;
|
||||
|
|
|
@ -19,7 +19,6 @@
|
|||
|
||||
package org.elasticsearch.action.admin.indices.refresh;
|
||||
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.action.ShardOperationFailedException;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.DefaultShardOperationFailedException;
|
||||
|
|
|
@ -24,23 +24,17 @@ import org.elasticsearch.action.support.ActionFilters;
|
|||
import org.elasticsearch.action.support.HandledTransportAction;
|
||||
import org.elasticsearch.cluster.ClusterService;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.cluster.routing.GroupShardsIterator;
|
||||
import org.elasticsearch.cluster.routing.ShardIterator;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
|
||||
import org.elasticsearch.common.util.concurrent.CountDown;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
import org.elasticsearch.indices.IndicesLifecycle;
|
||||
import org.elasticsearch.indices.SyncedFlushService;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.*;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.HashSet;
|
||||
import java.util.Set;
|
||||
|
||||
/**
|
||||
|
|
|
@ -19,7 +19,6 @@
|
|||
|
||||
package org.elasticsearch.action.admin.indices.segments;
|
||||
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.action.ShardOperationFailedException;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.DefaultShardOperationFailedException;
|
||||
|
|
|
@ -24,7 +24,6 @@ import org.elasticsearch.action.ActionResponse;
|
|||
import org.elasticsearch.common.collect.ImmutableOpenMap;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.settings.ImmutableSettings;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
|
||||
import java.io.IOException;
|
||||
|
@ -61,7 +60,7 @@ public class GetSettingsResponse extends ActionResponse {
|
|||
int size = in.readVInt();
|
||||
ImmutableOpenMap.Builder<String, Settings> builder = ImmutableOpenMap.builder();
|
||||
for (int i = 0; i < size; i++) {
|
||||
builder.put(in.readString(), ImmutableSettings.readSettingsFromStream(in));
|
||||
builder.put(in.readString(), Settings.readSettingsFromStream(in));
|
||||
}
|
||||
indexToSettings = builder.build();
|
||||
}
|
||||
|
@ -72,7 +71,7 @@ public class GetSettingsResponse extends ActionResponse {
|
|||
out.writeVInt(indexToSettings.size());
|
||||
for (ObjectObjectCursor<String, Settings> cursor : indexToSettings) {
|
||||
out.writeString(cursor.key);
|
||||
ImmutableSettings.writeSettingsToStream(cursor.value, out);
|
||||
Settings.writeSettingsToStream(cursor.value, out);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -19,9 +19,7 @@
|
|||
|
||||
package org.elasticsearch.action.admin.indices.settings.get;
|
||||
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.admin.cluster.state.ClusterStateRequest;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.master.TransportMasterNodeReadOperationAction;
|
||||
import org.elasticsearch.cluster.ClusterService;
|
||||
|
@ -32,7 +30,6 @@ import org.elasticsearch.cluster.metadata.IndexMetaData;
|
|||
import org.elasticsearch.common.collect.ImmutableOpenMap;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.regex.Regex;
|
||||
import org.elasticsearch.common.settings.ImmutableSettings;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.settings.SettingsFilter;
|
||||
import org.elasticsearch.common.util.CollectionUtils;
|
||||
|
@ -83,7 +80,7 @@ public class TransportGetSettingsAction extends TransportMasterNodeReadOperation
|
|||
|
||||
Settings settings = SettingsFilter.filterSettings(settingsFilter.getPatterns(), indexMetaData.settings());
|
||||
if (!CollectionUtils.isEmpty(request.names())) {
|
||||
ImmutableSettings.Builder settingsBuilder = ImmutableSettings.builder();
|
||||
Settings.Builder settingsBuilder = Settings.builder();
|
||||
for (Map.Entry<String, String> entry : settings.getAsMap().entrySet()) {
|
||||
if (Regex.simpleMatch(request.names(), entry.getKey())) {
|
||||
settingsBuilder.put(entry.getKey(), entry.getValue());
|
||||
|
|
|
@ -19,7 +19,6 @@
|
|||
|
||||
package org.elasticsearch.action.admin.indices.settings.put;
|
||||
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.master.TransportMasterNodeOperationAction;
|
||||
|
|
|
@ -26,7 +26,6 @@ import org.elasticsearch.action.support.IndicesOptions;
|
|||
import org.elasticsearch.action.support.master.AcknowledgedRequest;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.settings.ImmutableSettings;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentFactory;
|
||||
|
@ -36,9 +35,9 @@ import java.io.IOException;
|
|||
import java.util.Map;
|
||||
|
||||
import static org.elasticsearch.action.ValidateActions.addValidationError;
|
||||
import static org.elasticsearch.common.settings.ImmutableSettings.Builder.EMPTY_SETTINGS;
|
||||
import static org.elasticsearch.common.settings.ImmutableSettings.readSettingsFromStream;
|
||||
import static org.elasticsearch.common.settings.ImmutableSettings.writeSettingsToStream;
|
||||
import static org.elasticsearch.common.settings.Settings.Builder.EMPTY_SETTINGS;
|
||||
import static org.elasticsearch.common.settings.Settings.readSettingsFromStream;
|
||||
import static org.elasticsearch.common.settings.Settings.writeSettingsToStream;
|
||||
|
||||
/**
|
||||
* Request for an update index settings action
|
||||
|
@ -124,7 +123,7 @@ public class UpdateSettingsRequest extends AcknowledgedRequest<UpdateSettingsReq
|
|||
* Sets the settings to be updated (either json/yaml/properties format)
|
||||
*/
|
||||
public UpdateSettingsRequest settings(String source) {
|
||||
this.settings = ImmutableSettings.settingsBuilder().loadFromSource(source).build();
|
||||
this.settings = Settings.settingsBuilder().loadFromSource(source).build();
|
||||
return this;
|
||||
}
|
||||
|
||||
|
|
|
@ -19,7 +19,6 @@
|
|||
|
||||
package org.elasticsearch.action.admin.indices.settings.put;
|
||||
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.support.IndicesOptions;
|
||||
import org.elasticsearch.action.support.master.AcknowledgedRequestBuilder;
|
||||
import org.elasticsearch.client.ElasticsearchClient;
|
||||
|
|
|
@ -20,7 +20,6 @@
|
|||
package org.elasticsearch.action.admin.indices.stats;
|
||||
|
||||
import com.google.common.collect.Lists;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.action.ShardOperationFailedException;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.DefaultShardOperationFailedException;
|
||||
|
|
|
@ -18,7 +18,6 @@
|
|||
*/
|
||||
package org.elasticsearch.action.admin.indices.template.delete;
|
||||
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.master.TransportMasterNodeOperationAction;
|
||||
|
|
|
@ -20,7 +20,6 @@ package org.elasticsearch.action.admin.indices.template.get;
|
|||
|
||||
import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
|
||||
import com.google.common.collect.Lists;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.master.TransportMasterNodeReadOperationAction;
|
||||
|
|
|
@ -32,7 +32,6 @@ import org.elasticsearch.common.bytes.BytesReference;
|
|||
import org.elasticsearch.common.collect.MapBuilder;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.settings.ImmutableSettings;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.xcontent.*;
|
||||
import org.elasticsearch.common.xcontent.support.XContentMapValues;
|
||||
|
@ -44,9 +43,9 @@ import java.util.Set;
|
|||
import static com.google.common.collect.Maps.newHashMap;
|
||||
import static com.google.common.collect.Sets.newHashSet;
|
||||
import static org.elasticsearch.action.ValidateActions.addValidationError;
|
||||
import static org.elasticsearch.common.settings.ImmutableSettings.Builder.EMPTY_SETTINGS;
|
||||
import static org.elasticsearch.common.settings.ImmutableSettings.readSettingsFromStream;
|
||||
import static org.elasticsearch.common.settings.ImmutableSettings.writeSettingsToStream;
|
||||
import static org.elasticsearch.common.settings.Settings.Builder.EMPTY_SETTINGS;
|
||||
import static org.elasticsearch.common.settings.Settings.readSettingsFromStream;
|
||||
import static org.elasticsearch.common.settings.Settings.writeSettingsToStream;
|
||||
|
||||
/**
|
||||
* A request to create an index template.
|
||||
|
@ -159,7 +158,7 @@ public class PutIndexTemplateRequest extends MasterNodeOperationRequest<PutIndex
|
|||
* The settings to create the index template with (either json/yaml/properties format).
|
||||
*/
|
||||
public PutIndexTemplateRequest settings(String source) {
|
||||
this.settings = ImmutableSettings.settingsBuilder().loadFromSource(source).build();
|
||||
this.settings = Settings.settingsBuilder().loadFromSource(source).build();
|
||||
return this;
|
||||
}
|
||||
|
||||
|
|
|
@ -18,7 +18,6 @@
|
|||
*/
|
||||
package org.elasticsearch.action.admin.indices.template.put;
|
||||
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.admin.indices.alias.Alias;
|
||||
import org.elasticsearch.action.support.master.MasterNodeOperationRequestBuilder;
|
||||
import org.elasticsearch.client.ElasticsearchClient;
|
||||
|
|
|
@ -18,7 +18,6 @@
|
|||
*/
|
||||
package org.elasticsearch.action.admin.indices.template.put;
|
||||
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.master.TransportMasterNodeOperationAction;
|
||||
|
|
|
@ -22,7 +22,6 @@ package org.elasticsearch.action.admin.indices.validate.query;
|
|||
import org.apache.lucene.search.IndexSearcher;
|
||||
import org.apache.lucene.search.MatchNoDocsQuery;
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.ShardOperationFailedException;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
|
|
|
@ -19,7 +19,6 @@
|
|||
package org.elasticsearch.action.admin.indices.warmer.delete;
|
||||
|
||||
import com.google.common.collect.Lists;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.master.TransportMasterNodeOperationAction;
|
||||
|
|
|
@ -20,7 +20,6 @@
|
|||
package org.elasticsearch.action.admin.indices.warmer.get;
|
||||
|
||||
import com.google.common.collect.ImmutableList;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.master.info.TransportClusterInfoAction;
|
||||
|
|
|
@ -19,9 +19,7 @@
|
|||
|
||||
package org.elasticsearch.action.bulk;
|
||||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.support.replication.ShardReplicationOperationRequest;
|
||||
import org.elasticsearch.action.support.single.instance.InstanceShardOperationRequest;
|
||||
import org.elasticsearch.action.support.replication.ReplicationRequest;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
|
||||
|
@ -32,7 +30,7 @@ import java.util.List;
|
|||
/**
|
||||
*
|
||||
*/
|
||||
public class BulkShardRequest extends ShardReplicationOperationRequest<BulkShardRequest> {
|
||||
public class BulkShardRequest extends ReplicationRequest<BulkShardRequest> {
|
||||
|
||||
private int shardId;
|
||||
|
||||
|
|
|
@ -54,7 +54,6 @@ import org.elasticsearch.index.shard.ShardId;
|
|||
import org.elasticsearch.indices.IndexAlreadyExistsException;
|
||||
import org.elasticsearch.indices.IndexClosedException;
|
||||
import org.elasticsearch.indices.IndexMissingException;
|
||||
import org.elasticsearch.rest.RestStatus;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
|
|
|
@ -28,7 +28,7 @@ import org.elasticsearch.action.delete.DeleteResponse;
|
|||
import org.elasticsearch.action.index.IndexRequest;
|
||||
import org.elasticsearch.action.index.IndexResponse;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.replication.TransportShardReplicationOperationAction;
|
||||
import org.elasticsearch.action.support.replication.TransportReplicationAction;
|
||||
import org.elasticsearch.action.update.UpdateHelper;
|
||||
import org.elasticsearch.action.update.UpdateRequest;
|
||||
import org.elasticsearch.action.update.UpdateResponse;
|
||||
|
@ -65,7 +65,7 @@ import java.util.Map;
|
|||
/**
|
||||
* Performs the index operation.
|
||||
*/
|
||||
public class TransportShardBulkAction extends TransportShardReplicationOperationAction<BulkShardRequest, BulkShardRequest, BulkShardResponse> {
|
||||
public class TransportShardBulkAction extends TransportReplicationAction<BulkShardRequest, BulkShardRequest, BulkShardResponse> {
|
||||
|
||||
private final static String OP_TYPE_UPDATE = "update";
|
||||
private final static String OP_TYPE_DELETE = "delete";
|
||||
|
|
|
@ -19,7 +19,6 @@
|
|||
|
||||
package org.elasticsearch.action.count;
|
||||
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.ShardOperationFailedException;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
|
|
|
@ -22,7 +22,7 @@ package org.elasticsearch.action.delete;
|
|||
import org.elasticsearch.action.ActionRequest;
|
||||
import org.elasticsearch.action.ActionRequestValidationException;
|
||||
import org.elasticsearch.action.DocumentRequest;
|
||||
import org.elasticsearch.action.support.replication.ShardReplicationOperationRequest;
|
||||
import org.elasticsearch.action.support.replication.ReplicationRequest;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
|
@ -44,7 +44,7 @@ import static org.elasticsearch.action.ValidateActions.addValidationError;
|
|||
* @see org.elasticsearch.client.Client#delete(DeleteRequest)
|
||||
* @see org.elasticsearch.client.Requests#deleteRequest(String)
|
||||
*/
|
||||
public class DeleteRequest extends ShardReplicationOperationRequest<DeleteRequest> implements DocumentRequest<DeleteRequest> {
|
||||
public class DeleteRequest extends ReplicationRequest<DeleteRequest> implements DocumentRequest<DeleteRequest> {
|
||||
|
||||
private String type;
|
||||
private String id;
|
||||
|
|
|
@ -19,7 +19,7 @@
|
|||
|
||||
package org.elasticsearch.action.delete;
|
||||
|
||||
import org.elasticsearch.action.support.replication.ShardReplicationOperationRequestBuilder;
|
||||
import org.elasticsearch.action.support.replication.ReplicationRequestBuilder;
|
||||
import org.elasticsearch.client.ElasticsearchClient;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.index.VersionType;
|
||||
|
@ -27,7 +27,7 @@ import org.elasticsearch.index.VersionType;
|
|||
/**
|
||||
* A delete document action request builder.
|
||||
*/
|
||||
public class DeleteRequestBuilder extends ShardReplicationOperationRequestBuilder<DeleteRequest, DeleteResponse, DeleteRequestBuilder> {
|
||||
public class DeleteRequestBuilder extends ReplicationRequestBuilder<DeleteRequest, DeleteResponse, DeleteRequestBuilder> {
|
||||
|
||||
public DeleteRequestBuilder(ElasticsearchClient client, DeleteAction action) {
|
||||
super(client, action, new DeleteRequest());
|
||||
|
|
|
@ -19,17 +19,15 @@
|
|||
|
||||
package org.elasticsearch.action.delete;
|
||||
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.ExceptionsHelper;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.RoutingMissingException;
|
||||
import org.elasticsearch.action.admin.indices.create.CreateIndexRequest;
|
||||
import org.elasticsearch.action.admin.indices.create.CreateIndexResponse;
|
||||
import org.elasticsearch.action.admin.indices.create.TransportCreateIndexAction;
|
||||
import org.elasticsearch.action.index.IndexRequest;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.AutoCreateIndex;
|
||||
import org.elasticsearch.action.support.replication.TransportShardReplicationOperationAction;
|
||||
import org.elasticsearch.action.support.replication.TransportReplicationAction;
|
||||
import org.elasticsearch.cluster.ClusterService;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.action.index.MappingUpdatedAction;
|
||||
|
@ -49,12 +47,10 @@ import org.elasticsearch.indices.IndicesService;
|
|||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
/**
|
||||
* Performs the delete operation.
|
||||
*/
|
||||
public class TransportDeleteAction extends TransportShardReplicationOperationAction<DeleteRequest, DeleteRequest, DeleteResponse> {
|
||||
public class TransportDeleteAction extends TransportReplicationAction<DeleteRequest, DeleteRequest, DeleteResponse> {
|
||||
|
||||
private final AutoCreateIndex autoCreateIndex;
|
||||
private final TransportCreateIndexAction createIndexAction;
|
||||
|
|
|
@ -20,7 +20,6 @@
|
|||
package org.elasticsearch.action.exists;
|
||||
|
||||
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.ShardOperationFailedException;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
|
|
|
@ -22,7 +22,6 @@ package org.elasticsearch.action.fieldstats;
|
|||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.MultiFields;
|
||||
import org.apache.lucene.index.Terms;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.ExceptionsHelper;
|
||||
import org.elasticsearch.action.ShardOperationFailedException;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
|
@ -40,7 +39,6 @@ import org.elasticsearch.common.settings.Settings;
|
|||
import org.elasticsearch.index.IndexService;
|
||||
import org.elasticsearch.index.engine.Engine;
|
||||
import org.elasticsearch.index.mapper.FieldMapper;
|
||||
import org.elasticsearch.index.mapper.FieldMappers;
|
||||
import org.elasticsearch.index.mapper.MapperService;
|
||||
import org.elasticsearch.index.shard.IndexShard;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue